1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdalign.h> 8 #include <stdbool.h> 9 10 #include <dev_driver.h> 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_malloc.h> 14 #include <rte_cycles.h> 15 #include <rte_alarm.h> 16 #include <rte_kvargs.h> 17 #include <rte_vect.h> 18 19 #include "bnxt.h" 20 #include "bnxt_filter.h" 21 #include "bnxt_hwrm.h" 22 #include "bnxt_irq.h" 23 #include "bnxt_reps.h" 24 #include "bnxt_ring.h" 25 #include "bnxt_rxq.h" 26 #include "bnxt_rxr.h" 27 #include "bnxt_stats.h" 28 #include "bnxt_txq.h" 29 #include "bnxt_txr.h" 30 #include "bnxt_vnic.h" 31 #include "hsi_struct_def_dpdk.h" 32 #include "bnxt_nvm_defs.h" 33 #include "bnxt_tf_common.h" 34 #include "ulp_flow_db.h" 35 #include "rte_pmd_bnxt.h" 36 37 #define DRV_MODULE_NAME "bnxt" 38 static const char bnxt_version[] = 39 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 40 41 /* 42 * The set of PCI devices this driver supports 43 */ 44 static const struct rte_pci_id bnxt_pci_id_map[] = { 45 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 46 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 48 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 88 { .vendor_id = 0, /* sentinel */ }, 89 }; 90 91 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 92 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 93 #define BNXT_DEVARG_REPRESENTOR "representor" 94 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 95 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 96 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 97 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 98 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 99 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 100 #define BNXT_DEVARG_APP_ID "app-id" 101 #define BNXT_DEVARG_IEEE_1588 "ieee-1588" 102 103 static const char *const bnxt_dev_args[] = { 104 BNXT_DEVARG_REPRESENTOR, 105 BNXT_DEVARG_FLOW_XSTAT, 106 BNXT_DEVARG_MAX_NUM_KFLOWS, 107 BNXT_DEVARG_REP_BASED_PF, 108 BNXT_DEVARG_REP_IS_PF, 109 BNXT_DEVARG_REP_Q_R2F, 110 BNXT_DEVARG_REP_Q_F2R, 111 BNXT_DEVARG_REP_FC_R2F, 112 BNXT_DEVARG_REP_FC_F2R, 113 BNXT_DEVARG_APP_ID, 114 BNXT_DEVARG_IEEE_1588, 115 NULL 116 }; 117 118 /* 119 * app-id = an non-negative 8-bit number 120 */ 121 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 122 123 /* 124 * ieee-1588 = an non-negative 8-bit number 125 */ 126 #define BNXT_DEVARG_IEEE_1588_INVALID(val) ((val) > 255) 127 128 /* 129 * flow_xstat == false to disable the feature 130 * flow_xstat == true to enable the feature 131 */ 132 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 133 134 /* 135 * rep_is_pf == false to indicate VF representor 136 * rep_is_pf == true to indicate PF representor 137 */ 138 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 139 140 /* 141 * rep_based_pf == Physical index of the PF 142 */ 143 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 144 /* 145 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 146 */ 147 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 148 149 /* 150 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 151 */ 152 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 153 154 /* 155 * rep_fc_r2f == Flow control for the representor to endpoint direction 156 */ 157 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 158 159 /* 160 * rep_fc_f2r == Flow control for the endpoint to representor direction 161 */ 162 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 163 164 int bnxt_cfa_code_dynfield_offset = -1; 165 166 /* 167 * max_num_kflows must be >= 32 168 * and must be a power-of-2 supported value 169 * return: 1 -> invalid 170 * 0 -> valid 171 */ 172 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 173 { 174 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 175 return 1; 176 return 0; 177 } 178 179 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 180 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 181 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 182 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 183 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 184 static int bnxt_restore_vlan_filters(struct bnxt *bp); 185 static void bnxt_dev_recover(void *arg); 186 static void bnxt_free_error_recovery_info(struct bnxt *bp); 187 static void bnxt_free_rep_info(struct bnxt *bp); 188 static int bnxt_check_fw_ready(struct bnxt *bp); 189 190 int is_bnxt_in_error(struct bnxt *bp) 191 { 192 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 193 return -EIO; 194 if (bp->flags & BNXT_FLAG_FW_RESET) 195 return -EBUSY; 196 197 return 0; 198 } 199 200 /***********************/ 201 202 /* 203 * High level utility functions 204 */ 205 206 uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 207 { 208 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 209 BNXT_RSS_TBL_SIZE_P5); 210 211 if (!BNXT_CHIP_P5(bp)) 212 return 1; 213 214 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 215 BNXT_RSS_ENTRIES_PER_CTX_P5) / 216 BNXT_RSS_ENTRIES_PER_CTX_P5; 217 } 218 219 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 220 { 221 if (!BNXT_CHIP_P5(bp)) 222 return HW_HASH_INDEX_SIZE; 223 224 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 225 } 226 227 static void bnxt_free_parent_info(struct bnxt *bp) 228 { 229 rte_free(bp->parent); 230 bp->parent = NULL; 231 } 232 233 static void bnxt_free_pf_info(struct bnxt *bp) 234 { 235 rte_free(bp->pf); 236 bp->pf = NULL; 237 } 238 239 static void bnxt_free_link_info(struct bnxt *bp) 240 { 241 rte_free(bp->link_info); 242 bp->link_info = NULL; 243 } 244 245 static void bnxt_free_leds_info(struct bnxt *bp) 246 { 247 if (BNXT_VF(bp)) 248 return; 249 250 rte_free(bp->leds); 251 bp->leds = NULL; 252 } 253 254 static void bnxt_free_flow_stats_info(struct bnxt *bp) 255 { 256 rte_free(bp->flow_stat); 257 bp->flow_stat = NULL; 258 } 259 260 static void bnxt_free_cos_queues(struct bnxt *bp) 261 { 262 rte_free(bp->rx_cos_queue); 263 bp->rx_cos_queue = NULL; 264 rte_free(bp->tx_cos_queue); 265 bp->tx_cos_queue = NULL; 266 } 267 268 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 269 { 270 bnxt_free_filter_mem(bp); 271 bnxt_free_vnic_attributes(bp); 272 bnxt_free_vnic_mem(bp); 273 274 /* tx/rx rings are configured as part of *_queue_setup callbacks. 275 * If the number of rings change across fw update, 276 * we don't have much choice except to warn the user. 277 */ 278 if (!reconfig) { 279 bnxt_free_stats(bp); 280 bnxt_free_tx_rings(bp); 281 bnxt_free_rx_rings(bp); 282 } 283 bnxt_free_async_cp_ring(bp); 284 bnxt_free_rxtx_nq_ring(bp); 285 286 rte_free(bp->grp_info); 287 bp->grp_info = NULL; 288 } 289 290 static int bnxt_alloc_parent_info(struct bnxt *bp) 291 { 292 bp->parent = rte_zmalloc("bnxt_parent_info", 293 sizeof(struct bnxt_parent_info), 0); 294 if (bp->parent == NULL) 295 return -ENOMEM; 296 297 return 0; 298 } 299 300 static int bnxt_alloc_pf_info(struct bnxt *bp) 301 { 302 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 303 if (bp->pf == NULL) 304 return -ENOMEM; 305 306 return 0; 307 } 308 309 static int bnxt_alloc_link_info(struct bnxt *bp) 310 { 311 bp->link_info = 312 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 313 if (bp->link_info == NULL) 314 return -ENOMEM; 315 316 return 0; 317 } 318 319 static int bnxt_alloc_leds_info(struct bnxt *bp) 320 { 321 if (BNXT_VF(bp)) 322 return 0; 323 324 bp->leds = rte_zmalloc("bnxt_leds", 325 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 326 0); 327 if (bp->leds == NULL) 328 return -ENOMEM; 329 330 return 0; 331 } 332 333 static int bnxt_alloc_cos_queues(struct bnxt *bp) 334 { 335 bp->rx_cos_queue = 336 rte_zmalloc("bnxt_rx_cosq", 337 BNXT_COS_QUEUE_COUNT * 338 sizeof(struct bnxt_cos_queue_info), 339 0); 340 if (bp->rx_cos_queue == NULL) 341 return -ENOMEM; 342 343 bp->tx_cos_queue = 344 rte_zmalloc("bnxt_tx_cosq", 345 BNXT_COS_QUEUE_COUNT * 346 sizeof(struct bnxt_cos_queue_info), 347 0); 348 if (bp->tx_cos_queue == NULL) 349 return -ENOMEM; 350 351 return 0; 352 } 353 354 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 355 { 356 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 357 sizeof(struct bnxt_flow_stat_info), 0); 358 if (bp->flow_stat == NULL) 359 return -ENOMEM; 360 361 return 0; 362 } 363 364 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 365 { 366 int rc; 367 368 rc = bnxt_alloc_ring_grps(bp); 369 if (rc) 370 goto alloc_mem_err; 371 372 rc = bnxt_alloc_async_ring_struct(bp); 373 if (rc) 374 goto alloc_mem_err; 375 376 rc = bnxt_alloc_vnic_mem(bp); 377 if (rc) 378 goto alloc_mem_err; 379 380 rc = bnxt_alloc_vnic_attributes(bp, reconfig); 381 if (rc) 382 goto alloc_mem_err; 383 384 rc = bnxt_alloc_filter_mem(bp); 385 if (rc) 386 goto alloc_mem_err; 387 388 rc = bnxt_alloc_async_cp_ring(bp); 389 if (rc) 390 goto alloc_mem_err; 391 392 rc = bnxt_alloc_rxtx_nq_ring(bp); 393 if (rc) 394 goto alloc_mem_err; 395 396 if (BNXT_FLOW_XSTATS_EN(bp)) { 397 rc = bnxt_alloc_flow_stats_info(bp); 398 if (rc) 399 goto alloc_mem_err; 400 } 401 402 return 0; 403 404 alloc_mem_err: 405 bnxt_free_mem(bp, reconfig); 406 return rc; 407 } 408 409 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 410 { 411 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 412 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 413 uint64_t rx_offloads = dev_conf->rxmode.offloads; 414 struct bnxt_rx_queue *rxq; 415 unsigned int j; 416 int rc; 417 418 rc = bnxt_vnic_grp_alloc(bp, vnic); 419 if (rc) 420 goto err_out; 421 422 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 423 vnic_id, vnic, vnic->fw_grp_ids); 424 425 /* populate the fw group table */ 426 bnxt_vnic_ring_grp_populate(bp, vnic); 427 bnxt_vnic_rules_init(vnic); 428 429 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 430 if (rc) 431 goto err_out; 432 433 /* Alloc RSS context only if RSS mode is enabled */ 434 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { 435 int j, nr_ctxs = bnxt_rss_ctxts(bp); 436 437 /* RSS table size in P5 is 512. 438 * Cap max Rx rings to same value 439 */ 440 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 441 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 442 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 443 goto err_out; 444 } 445 446 rc = 0; 447 for (j = 0; j < nr_ctxs; j++) { 448 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 449 if (rc) 450 break; 451 } 452 if (rc) { 453 PMD_DRV_LOG(ERR, 454 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 455 vnic_id, j, rc); 456 goto err_out; 457 } 458 vnic->num_lb_ctxts = nr_ctxs; 459 } 460 461 /* 462 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 463 * setting is not available at this time, it will not be 464 * configured correctly in the CFA. 465 */ 466 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 467 vnic->vlan_strip = true; 468 else 469 vnic->vlan_strip = false; 470 471 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 472 if (rc) 473 goto err_out; 474 475 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 476 if (rc) 477 goto err_out; 478 479 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 480 rxq = bp->eth_dev->data->rx_queues[j]; 481 482 PMD_DRV_LOG(DEBUG, 483 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 484 j, rxq->vnic, rxq->vnic->fw_grp_ids); 485 486 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 487 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 488 } 489 490 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 491 492 rc = bnxt_vnic_rss_configure(bp, vnic); 493 if (rc) 494 goto err_out; 495 496 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 497 498 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 499 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? 500 true : false); 501 if (rc) 502 goto err_out; 503 504 return 0; 505 err_out: 506 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 507 vnic_id, rc); 508 return rc; 509 } 510 511 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 512 { 513 int rc = 0; 514 515 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 516 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 517 if (rc) 518 return rc; 519 520 PMD_DRV_LOG(DEBUG, 521 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 522 " rx_fc_in_tbl.ctx_id = %d\n", 523 bp->flow_stat->rx_fc_in_tbl.va, 524 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 525 bp->flow_stat->rx_fc_in_tbl.ctx_id); 526 527 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 528 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 529 if (rc) 530 return rc; 531 532 PMD_DRV_LOG(DEBUG, 533 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 534 " rx_fc_out_tbl.ctx_id = %d\n", 535 bp->flow_stat->rx_fc_out_tbl.va, 536 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 537 bp->flow_stat->rx_fc_out_tbl.ctx_id); 538 539 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 540 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 541 if (rc) 542 return rc; 543 544 PMD_DRV_LOG(DEBUG, 545 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 546 " tx_fc_in_tbl.ctx_id = %d\n", 547 bp->flow_stat->tx_fc_in_tbl.va, 548 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 549 bp->flow_stat->tx_fc_in_tbl.ctx_id); 550 551 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 552 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 553 if (rc) 554 return rc; 555 556 PMD_DRV_LOG(DEBUG, 557 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 558 " tx_fc_out_tbl.ctx_id = %d\n", 559 bp->flow_stat->tx_fc_out_tbl.va, 560 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 561 bp->flow_stat->tx_fc_out_tbl.ctx_id); 562 563 memset(bp->flow_stat->rx_fc_out_tbl.va, 564 0, 565 bp->flow_stat->rx_fc_out_tbl.size); 566 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 567 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 568 bp->flow_stat->rx_fc_out_tbl.ctx_id, 569 bp->flow_stat->max_fc, 570 true); 571 if (rc) 572 return rc; 573 574 memset(bp->flow_stat->tx_fc_out_tbl.va, 575 0, 576 bp->flow_stat->tx_fc_out_tbl.size); 577 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 578 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 579 bp->flow_stat->tx_fc_out_tbl.ctx_id, 580 bp->flow_stat->max_fc, 581 true); 582 583 return rc; 584 } 585 586 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 587 struct bnxt_ctx_mem_buf_info *ctx) 588 { 589 if (!ctx) 590 return -EINVAL; 591 592 ctx->va = rte_zmalloc_socket(type, size, 0, 593 bp->eth_dev->device->numa_node); 594 if (ctx->va == NULL) 595 return -ENOMEM; 596 rte_mem_lock_page(ctx->va); 597 ctx->size = size; 598 ctx->dma = rte_mem_virt2iova(ctx->va); 599 if (ctx->dma == RTE_BAD_IOVA) 600 return -ENOMEM; 601 602 return 0; 603 } 604 605 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 606 { 607 struct rte_pci_device *pdev = bp->pdev; 608 char type[RTE_MEMZONE_NAMESIZE]; 609 uint16_t max_fc; 610 int rc = 0; 611 612 max_fc = bp->flow_stat->max_fc; 613 614 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 615 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 616 /* 4 bytes for each counter-id */ 617 rc = bnxt_alloc_ctx_mem_buf(bp, type, 618 max_fc * 4, 619 &bp->flow_stat->rx_fc_in_tbl); 620 if (rc) 621 return rc; 622 623 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 624 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 625 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 626 rc = bnxt_alloc_ctx_mem_buf(bp, type, 627 max_fc * 16, 628 &bp->flow_stat->rx_fc_out_tbl); 629 if (rc) 630 return rc; 631 632 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 633 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 634 /* 4 bytes for each counter-id */ 635 rc = bnxt_alloc_ctx_mem_buf(bp, type, 636 max_fc * 4, 637 &bp->flow_stat->tx_fc_in_tbl); 638 if (rc) 639 return rc; 640 641 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 642 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 643 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 644 rc = bnxt_alloc_ctx_mem_buf(bp, type, 645 max_fc * 16, 646 &bp->flow_stat->tx_fc_out_tbl); 647 if (rc) 648 return rc; 649 650 rc = bnxt_register_fc_ctx_mem(bp); 651 652 return rc; 653 } 654 655 static int bnxt_init_ctx_mem(struct bnxt *bp) 656 { 657 int rc = 0; 658 659 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 660 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 661 !BNXT_FLOW_XSTATS_EN(bp)) 662 return 0; 663 664 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 665 if (rc) 666 return rc; 667 668 rc = bnxt_init_fc_ctx_mem(bp); 669 670 return rc; 671 } 672 673 static inline bool bnxt_force_link_config(struct bnxt *bp) 674 { 675 uint16_t subsystem_device_id = bp->pdev->id.subsystem_device_id; 676 677 switch (subsystem_device_id) { 678 case BROADCOM_DEV_957508_N2100: 679 case BROADCOM_DEV_957414_N225: 680 return true; 681 default: 682 return false; 683 } 684 } 685 686 static int bnxt_update_phy_setting(struct bnxt *bp) 687 { 688 struct rte_eth_link new; 689 int rc; 690 691 rc = bnxt_get_hwrm_link_config(bp, &new); 692 if (rc) { 693 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 694 return rc; 695 } 696 697 /* 698 * Device is not obliged link down in certain scenarios, even 699 * when forced. When FW does not allow any user other than BMC 700 * to shutdown the port, bnxt_get_hwrm_link_config() call always 701 * returns link up. Force phy update always in that case. 702 */ 703 if (!new.link_status || bnxt_force_link_config(bp)) { 704 rc = bnxt_set_hwrm_link_config(bp, true); 705 if (rc) { 706 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 707 return rc; 708 } 709 } 710 711 return rc; 712 } 713 714 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 715 { 716 rte_free(bp->prev_rx_ring_stats); 717 rte_free(bp->prev_tx_ring_stats); 718 719 bp->prev_rx_ring_stats = NULL; 720 bp->prev_tx_ring_stats = NULL; 721 } 722 723 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 724 { 725 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 726 sizeof(struct bnxt_ring_stats) * 727 bp->rx_cp_nr_rings, 728 0); 729 if (bp->prev_rx_ring_stats == NULL) 730 return -ENOMEM; 731 732 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 733 sizeof(struct bnxt_ring_stats) * 734 bp->tx_cp_nr_rings, 735 0); 736 if (bp->tx_cp_nr_rings > 0 && bp->prev_tx_ring_stats == NULL) 737 goto error; 738 739 return 0; 740 741 error: 742 bnxt_free_prev_ring_stats(bp); 743 return -ENOMEM; 744 } 745 746 static int bnxt_start_nic(struct bnxt *bp) 747 { 748 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 749 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 750 uint32_t intr_vector = 0; 751 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 752 uint32_t vec = BNXT_MISC_VEC_ID; 753 unsigned int i, j; 754 int rc; 755 756 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) 757 bp->flags |= BNXT_FLAG_JUMBO; 758 else 759 bp->flags &= ~BNXT_FLAG_JUMBO; 760 761 /* P5 does not support ring groups. 762 * But we will use the array to save RSS context IDs. 763 */ 764 if (BNXT_CHIP_P5(bp)) 765 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 766 767 rc = bnxt_vnic_queue_db_init(bp); 768 if (rc) { 769 PMD_DRV_LOG(ERR, "could not allocate vnic db\n"); 770 goto err_out; 771 } 772 773 rc = bnxt_alloc_hwrm_rings(bp); 774 if (rc) { 775 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 776 goto err_out; 777 } 778 779 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 780 if (rc) { 781 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 782 goto err_out; 783 } 784 785 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 786 goto skip_cosq_cfg; 787 788 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 789 if (bp->rx_cos_queue[i].id != 0xff) { 790 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 791 792 if (!vnic) { 793 PMD_DRV_LOG(ERR, 794 "Num pools more than FW profile\n"); 795 rc = -EINVAL; 796 goto err_out; 797 } 798 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 799 bp->rx_cosq_cnt++; 800 } 801 } 802 803 skip_cosq_cfg: 804 rc = bnxt_mq_rx_configure(bp); 805 if (rc) { 806 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 807 goto err_out; 808 } 809 810 for (j = 0; j < bp->rx_nr_rings; j++) { 811 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 812 813 if (!rxq->rx_deferred_start) { 814 bp->eth_dev->data->rx_queue_state[j] = 815 RTE_ETH_QUEUE_STATE_STARTED; 816 rxq->rx_started = true; 817 } 818 } 819 820 /* setup the default vnic details*/ 821 bnxt_vnic_queue_db_update_dlft_vnic(bp); 822 823 /* VNIC configuration */ 824 for (i = 0; i < bp->nr_vnics; i++) { 825 rc = bnxt_setup_one_vnic(bp, i); 826 if (rc) 827 goto err_out; 828 } 829 830 for (j = 0; j < bp->tx_nr_rings; j++) { 831 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 832 833 if (!txq->tx_deferred_start) { 834 bp->eth_dev->data->tx_queue_state[j] = 835 RTE_ETH_QUEUE_STATE_STARTED; 836 txq->tx_started = true; 837 } 838 } 839 840 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 841 if (rc) { 842 PMD_DRV_LOG(ERR, 843 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 844 goto err_out; 845 } 846 847 /* check and configure queue intr-vector mapping */ 848 if ((rte_intr_cap_multiple(intr_handle) || 849 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 850 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 851 intr_vector = bp->eth_dev->data->nb_rx_queues; 852 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 853 if (intr_vector > bp->rx_cp_nr_rings) { 854 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 855 bp->rx_cp_nr_rings); 856 return -ENOTSUP; 857 } 858 rc = rte_intr_efd_enable(intr_handle, intr_vector); 859 if (rc) 860 return rc; 861 } 862 863 if (rte_intr_dp_is_en(intr_handle)) { 864 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 865 bp->eth_dev->data->nb_rx_queues)) { 866 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 867 " intr_vec", bp->eth_dev->data->nb_rx_queues); 868 rc = -ENOMEM; 869 goto err_out; 870 } 871 PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " 872 "intr_handle->max_intr = %d\n", 873 rte_intr_nb_efd_get(intr_handle), 874 rte_intr_max_intr_get(intr_handle)); 875 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 876 queue_id++) { 877 rte_intr_vec_list_index_set(intr_handle, 878 queue_id, vec + BNXT_RX_VEC_START); 879 if (vec < base + rte_intr_nb_efd_get(intr_handle) 880 - 1) 881 vec++; 882 } 883 } 884 885 /* enable uio/vfio intr/eventfd mapping */ 886 rc = rte_intr_enable(intr_handle); 887 #ifndef RTE_EXEC_ENV_FREEBSD 888 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 889 if (rc) 890 goto err_out; 891 #endif 892 893 rc = bnxt_update_phy_setting(bp); 894 if (rc) 895 goto err_out; 896 897 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 898 if (!bp->mark_table) 899 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 900 901 return 0; 902 903 err_out: 904 /* Some of the error status returned by FW may not be from errno.h */ 905 if (rc > 0) 906 rc = -EIO; 907 908 return rc; 909 } 910 911 static int bnxt_shutdown_nic(struct bnxt *bp) 912 { 913 bnxt_free_all_hwrm_resources(bp); 914 bnxt_free_all_filters(bp); 915 bnxt_free_all_vnics(bp); 916 bnxt_vnic_queue_db_deinit(bp); 917 return 0; 918 } 919 920 /* 921 * Device configuration and status function 922 */ 923 924 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 925 { 926 uint32_t pam4_link_speed = 0; 927 uint32_t link_speed = 0; 928 uint32_t speed_capa = 0; 929 930 if (bp->link_info == NULL) 931 return 0; 932 933 link_speed = bp->link_info->support_speeds; 934 935 /* If PAM4 is configured, use PAM4 supported speed */ 936 if (bp->link_info->support_pam4_speeds > 0) 937 pam4_link_speed = bp->link_info->support_pam4_speeds; 938 939 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 940 speed_capa |= RTE_ETH_LINK_SPEED_100M; 941 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 942 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 943 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 944 speed_capa |= RTE_ETH_LINK_SPEED_1G; 945 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 946 speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 947 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 948 speed_capa |= RTE_ETH_LINK_SPEED_10G; 949 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 950 speed_capa |= RTE_ETH_LINK_SPEED_20G; 951 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 952 speed_capa |= RTE_ETH_LINK_SPEED_25G; 953 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 954 speed_capa |= RTE_ETH_LINK_SPEED_40G; 955 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 956 speed_capa |= RTE_ETH_LINK_SPEED_50G; 957 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 958 speed_capa |= RTE_ETH_LINK_SPEED_100G; 959 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 960 speed_capa |= RTE_ETH_LINK_SPEED_50G; 961 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 962 speed_capa |= RTE_ETH_LINK_SPEED_100G; 963 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 964 speed_capa |= RTE_ETH_LINK_SPEED_200G; 965 966 if (bp->link_info->auto_mode == 967 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 968 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 969 970 return speed_capa; 971 } 972 973 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 974 struct rte_eth_dev_info *dev_info) 975 { 976 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 977 struct bnxt *bp = eth_dev->data->dev_private; 978 uint16_t max_vnics, i, j, vpool, vrxq; 979 unsigned int max_rx_rings; 980 int rc; 981 982 rc = is_bnxt_in_error(bp); 983 if (rc) 984 return rc; 985 986 /* MAC Specifics */ 987 dev_info->max_mac_addrs = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 988 dev_info->max_hash_mac_addrs = 0; 989 990 /* PF/VF specifics */ 991 if (BNXT_PF(bp)) 992 dev_info->max_vfs = pdev->max_vfs; 993 994 max_rx_rings = bnxt_max_rings(bp); 995 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 996 dev_info->max_rx_queues = max_rx_rings; 997 dev_info->max_tx_queues = max_rx_rings; 998 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 999 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 1000 max_vnics = bp->max_vnics; 1001 1002 /* MTU specifics */ 1003 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1004 dev_info->max_mtu = BNXT_MAX_MTU; 1005 1006 /* Fast path specifics */ 1007 dev_info->min_rx_bufsize = 1; 1008 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 1009 1010 dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp); 1011 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1012 dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) | 1013 dev_info->tx_queue_offload_capa; 1014 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 1015 1016 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 1017 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1018 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1019 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1020 1021 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1022 .rx_thresh = { 1023 .pthresh = 8, 1024 .hthresh = 8, 1025 .wthresh = 0, 1026 }, 1027 .rx_free_thresh = 32, 1028 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1029 }; 1030 1031 dev_info->default_txconf = (struct rte_eth_txconf) { 1032 .tx_thresh = { 1033 .pthresh = 32, 1034 .hthresh = 0, 1035 .wthresh = 0, 1036 }, 1037 .tx_free_thresh = 32, 1038 .tx_rs_thresh = 32, 1039 }; 1040 1041 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1042 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1043 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1044 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1045 1046 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1047 dev_info->switch_info.name = eth_dev->device->name; 1048 dev_info->switch_info.domain_id = bp->switch_domain_id; 1049 dev_info->switch_info.port_id = 1050 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1051 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1052 } 1053 1054 /* 1055 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1056 * need further investigation. 1057 */ 1058 1059 /* VMDq resources */ 1060 vpool = 64; /* RTE_ETH_64_POOLS */ 1061 vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ 1062 for (i = 0; i < 4; vpool >>= 1, i++) { 1063 if (max_vnics > vpool) { 1064 for (j = 0; j < 5; vrxq >>= 1, j++) { 1065 if (dev_info->max_rx_queues > vrxq) { 1066 if (vpool > vrxq) 1067 vpool = vrxq; 1068 goto found; 1069 } 1070 } 1071 /* Not enough resources to support VMDq */ 1072 break; 1073 } 1074 } 1075 /* Not enough resources to support VMDq */ 1076 vpool = 0; 1077 vrxq = 0; 1078 found: 1079 dev_info->max_vmdq_pools = vpool; 1080 dev_info->vmdq_queue_num = vrxq; 1081 1082 dev_info->vmdq_pool_base = 0; 1083 dev_info->vmdq_queue_base = 0; 1084 1085 dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE; 1086 1087 return 0; 1088 } 1089 1090 /* Configure the device based on the configuration provided */ 1091 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1092 { 1093 struct bnxt *bp = eth_dev->data->dev_private; 1094 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1095 struct rte_eth_rss_conf *rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf; 1096 int rc; 1097 1098 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1099 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1100 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1101 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1102 1103 rc = is_bnxt_in_error(bp); 1104 if (rc) 1105 return rc; 1106 1107 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1108 rc = bnxt_hwrm_check_vf_rings(bp); 1109 if (rc) { 1110 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1111 return -ENOSPC; 1112 } 1113 1114 /* If a resource has already been allocated - in this case 1115 * it is the async completion ring, free it. Reallocate it after 1116 * resource reservation. This will ensure the resource counts 1117 * are calculated correctly. 1118 */ 1119 1120 pthread_mutex_lock(&bp->def_cp_lock); 1121 1122 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1123 bnxt_disable_int(bp); 1124 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1125 } 1126 1127 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1128 if (rc) { 1129 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1130 pthread_mutex_unlock(&bp->def_cp_lock); 1131 return -ENOSPC; 1132 } 1133 1134 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1135 rc = bnxt_alloc_async_cp_ring(bp); 1136 if (rc) { 1137 pthread_mutex_unlock(&bp->def_cp_lock); 1138 return rc; 1139 } 1140 bnxt_enable_int(bp); 1141 } 1142 1143 pthread_mutex_unlock(&bp->def_cp_lock); 1144 } 1145 1146 /* Inherit new configurations */ 1147 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1148 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1149 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1150 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1151 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1152 bp->max_stat_ctx) 1153 goto resource_error; 1154 1155 if (BNXT_HAS_RING_GRPS(bp) && 1156 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1157 goto resource_error; 1158 1159 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && 1160 bp->max_vnics < eth_dev->data->nb_rx_queues) 1161 goto resource_error; 1162 1163 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1164 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1165 1166 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1167 rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1168 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1169 1170 /* application provides the hash key to program */ 1171 if (rss_conf->rss_key != NULL) { 1172 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) 1173 PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long", 1174 eth_dev->data->port_id, HW_HASH_KEY_SIZE); 1175 else 1176 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 1177 } 1178 bp->rss_conf.rss_key_len = HW_HASH_KEY_SIZE; 1179 bp->rss_conf.rss_hf = rss_conf->rss_hf; 1180 1181 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1182 1183 return 0; 1184 1185 resource_error: 1186 PMD_DRV_LOG(ERR, 1187 "Insufficient resources to support requested config\n"); 1188 PMD_DRV_LOG(ERR, 1189 "Num Queues Requested: Tx %d, Rx %d\n", 1190 eth_dev->data->nb_tx_queues, 1191 eth_dev->data->nb_rx_queues); 1192 PMD_DRV_LOG(ERR, 1193 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1194 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1195 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1196 return -ENOSPC; 1197 } 1198 1199 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1200 { 1201 struct rte_eth_link *link = ð_dev->data->dev_link; 1202 1203 if (link->link_status) 1204 PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n", 1205 eth_dev->data->port_id, 1206 (uint32_t)link->link_speed, 1207 (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 1208 ("full-duplex") : ("half-duplex\n")); 1209 else 1210 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1211 eth_dev->data->port_id); 1212 } 1213 1214 /* 1215 * Determine whether the current configuration requires support for scattered 1216 * receive; return 1 if scattered receive is required and 0 if not. 1217 */ 1218 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1219 { 1220 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 1221 uint16_t buf_size; 1222 int i; 1223 1224 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1225 return 1; 1226 1227 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 1228 return 1; 1229 1230 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1231 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1232 1233 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1234 RTE_PKTMBUF_HEADROOM); 1235 if (eth_dev->data->mtu + overhead > buf_size) 1236 return 1; 1237 } 1238 return 0; 1239 } 1240 1241 static eth_rx_burst_t 1242 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1243 { 1244 struct bnxt *bp = eth_dev->data->dev_private; 1245 1246 /* Disable vector mode RX for Stingray2 for now */ 1247 if (BNXT_CHIP_SR2(bp)) { 1248 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1249 return bnxt_recv_pkts; 1250 } 1251 1252 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1253 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1254 if (eth_dev->data->scattered_rx) 1255 goto use_scalar_rx; 1256 1257 /* 1258 * Vector mode receive cannot be enabled if Truflow is enabled or if 1259 * asynchronous completions and receive completions can be placed in 1260 * the same completion ring. 1261 */ 1262 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1263 goto use_scalar_rx; 1264 1265 /* 1266 * Vector mode receive cannot be enabled if any receive offloads outside 1267 * a limited subset have been enabled. 1268 */ 1269 if (eth_dev->data->dev_conf.rxmode.offloads & 1270 ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1271 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1272 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1273 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1274 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1275 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1276 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 1277 RTE_ETH_RX_OFFLOAD_RSS_HASH | 1278 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) 1279 goto use_scalar_rx; 1280 1281 if (bp->ieee_1588) 1282 goto use_scalar_rx; 1283 1284 #if defined(RTE_ARCH_X86) 1285 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1286 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1287 PMD_DRV_LOG(INFO, 1288 "Using AVX2 vector mode receive for port %d\n", 1289 eth_dev->data->port_id); 1290 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1291 return bnxt_recv_pkts_vec_avx2; 1292 } 1293 #endif 1294 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1295 PMD_DRV_LOG(INFO, 1296 "Using SSE vector mode receive for port %d\n", 1297 eth_dev->data->port_id); 1298 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1299 return bnxt_recv_pkts_vec; 1300 } 1301 1302 use_scalar_rx: 1303 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1304 eth_dev->data->port_id); 1305 PMD_DRV_LOG(INFO, 1306 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1307 eth_dev->data->port_id, 1308 eth_dev->data->scattered_rx, 1309 eth_dev->data->dev_conf.rxmode.offloads); 1310 #endif 1311 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1312 return bnxt_recv_pkts; 1313 } 1314 1315 static eth_tx_burst_t 1316 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1317 { 1318 struct bnxt *bp = eth_dev->data->dev_private; 1319 1320 /* Disable vector mode TX for Stingray2 for now */ 1321 if (BNXT_CHIP_SR2(bp)) 1322 return bnxt_xmit_pkts; 1323 1324 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1325 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1326 1327 /* 1328 * Vector mode transmit can be enabled only if not using scatter rx 1329 * or tx offloads. 1330 */ 1331 if (eth_dev->data->scattered_rx || 1332 (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || 1333 BNXT_TRUFLOW_EN(bp) || bp->ieee_1588) 1334 goto use_scalar_tx; 1335 1336 #if defined(RTE_ARCH_X86) 1337 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1338 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1339 PMD_DRV_LOG(INFO, 1340 "Using AVX2 vector mode transmit for port %d\n", 1341 eth_dev->data->port_id); 1342 return bnxt_xmit_pkts_vec_avx2; 1343 } 1344 #endif 1345 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1346 PMD_DRV_LOG(INFO, 1347 "Using SSE vector mode transmit for port %d\n", 1348 eth_dev->data->port_id); 1349 return bnxt_xmit_pkts_vec; 1350 } 1351 1352 use_scalar_tx: 1353 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1354 eth_dev->data->port_id); 1355 PMD_DRV_LOG(INFO, 1356 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1357 eth_dev->data->port_id, 1358 eth_dev->data->scattered_rx, 1359 offloads); 1360 #endif 1361 return bnxt_xmit_pkts; 1362 } 1363 1364 static int bnxt_handle_if_change_status(struct bnxt *bp) 1365 { 1366 int rc; 1367 1368 /* Since fw has undergone a reset and lost all contexts, 1369 * set fatal flag to not issue hwrm during cleanup 1370 */ 1371 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1372 bnxt_uninit_resources(bp, true); 1373 1374 /* clear fatal flag so that re-init happens */ 1375 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1376 1377 rc = bnxt_check_fw_ready(bp); 1378 if (rc) 1379 return rc; 1380 1381 rc = bnxt_init_resources(bp, true); 1382 1383 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1384 1385 return rc; 1386 } 1387 1388 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1389 { 1390 struct bnxt *bp = eth_dev->data->dev_private; 1391 int rc = 0; 1392 1393 if (!BNXT_SINGLE_PF(bp)) 1394 return -ENOTSUP; 1395 1396 if (!bp->link_info->link_up) 1397 rc = bnxt_set_hwrm_link_config(bp, true); 1398 if (!rc) 1399 eth_dev->data->dev_link.link_status = 1; 1400 1401 bnxt_print_link_info(eth_dev); 1402 return rc; 1403 } 1404 1405 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1406 { 1407 struct bnxt *bp = eth_dev->data->dev_private; 1408 1409 if (!BNXT_SINGLE_PF(bp)) 1410 return -ENOTSUP; 1411 1412 eth_dev->data->dev_link.link_status = 0; 1413 bnxt_set_hwrm_link_config(bp, false); 1414 bp->link_info->link_up = 0; 1415 1416 return 0; 1417 } 1418 1419 static void bnxt_free_switch_domain(struct bnxt *bp) 1420 { 1421 int rc = 0; 1422 1423 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1424 return; 1425 1426 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1427 if (rc) 1428 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1429 bp->switch_domain_id, rc); 1430 } 1431 1432 static void bnxt_ptp_get_current_time(void *arg) 1433 { 1434 struct bnxt *bp = arg; 1435 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1436 int rc; 1437 1438 rc = is_bnxt_in_error(bp); 1439 if (rc) 1440 return; 1441 1442 if (!ptp) 1443 return; 1444 1445 rte_spinlock_lock(&ptp->ptp_lock); 1446 ptp->old_time = ptp->current_time; 1447 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1448 &ptp->current_time); 1449 rte_spinlock_unlock(&ptp->ptp_lock); 1450 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1451 if (rc != 0) { 1452 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1453 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1454 } 1455 } 1456 1457 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1458 { 1459 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1460 int rc; 1461 1462 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1463 return 0; 1464 1465 rte_spinlock_lock(&ptp->ptp_lock); 1466 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1467 &ptp->current_time); 1468 ptp->old_time = ptp->current_time; 1469 rte_spinlock_unlock(&ptp->ptp_lock); 1470 1471 1472 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1473 return rc; 1474 } 1475 1476 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1477 { 1478 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1479 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1480 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1481 } 1482 } 1483 1484 static void bnxt_ptp_stop(struct bnxt *bp) 1485 { 1486 bnxt_cancel_ptp_alarm(bp); 1487 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1488 } 1489 1490 static int bnxt_ptp_start(struct bnxt *bp) 1491 { 1492 int rc; 1493 1494 rc = bnxt_schedule_ptp_alarm(bp); 1495 if (rc != 0) { 1496 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1497 } else { 1498 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1499 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1500 } 1501 1502 return rc; 1503 } 1504 1505 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1506 { 1507 struct bnxt *bp = eth_dev->data->dev_private; 1508 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1509 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1510 struct rte_eth_link link; 1511 uint16_t i; 1512 int ret; 1513 1514 eth_dev->data->dev_started = 0; 1515 1516 /* Prevent crashes when queues are still in use */ 1517 bnxt_stop_rxtx(eth_dev); 1518 1519 bnxt_disable_int(bp); 1520 1521 /* disable uio/vfio intr/eventfd mapping */ 1522 rte_intr_disable(intr_handle); 1523 1524 /* Stop the child representors for this device */ 1525 ret = bnxt_rep_stop_all(bp); 1526 if (ret != 0) 1527 return ret; 1528 1529 /* delete the bnxt ULP port details */ 1530 bnxt_ulp_port_deinit(bp); 1531 1532 bnxt_cancel_fw_health_check(bp); 1533 1534 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1535 bnxt_cancel_ptp_alarm(bp); 1536 1537 /* Do not bring link down during reset recovery */ 1538 if (!is_bnxt_in_error(bp)) { 1539 bnxt_dev_set_link_down_op(eth_dev); 1540 /* Wait for link to be reset */ 1541 if (BNXT_SINGLE_PF(bp)) 1542 rte_delay_ms(500); 1543 /* clear the recorded link status */ 1544 memset(&link, 0, sizeof(link)); 1545 rte_eth_linkstatus_set(eth_dev, &link); 1546 } 1547 1548 /* Clean queue intr-vector mapping */ 1549 rte_intr_efd_disable(intr_handle); 1550 rte_intr_vec_list_free(intr_handle); 1551 1552 bnxt_hwrm_port_clr_stats(bp); 1553 bnxt_free_tx_mbufs(bp); 1554 bnxt_free_rx_mbufs(bp); 1555 /* Process any remaining notifications in default completion queue */ 1556 bnxt_int_handler(eth_dev); 1557 bnxt_shutdown_nic(bp); 1558 bnxt_hwrm_if_change(bp, false); 1559 1560 bnxt_free_prev_ring_stats(bp); 1561 rte_free(bp->mark_table); 1562 bp->mark_table = NULL; 1563 1564 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1565 bp->rx_cosq_cnt = 0; 1566 /* All filters are deleted on a port stop. */ 1567 if (BNXT_FLOW_XSTATS_EN(bp)) 1568 bp->flow_stat->flow_count = 0; 1569 1570 eth_dev->data->scattered_rx = 0; 1571 1572 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 1573 eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1574 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 1575 eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1576 1577 return 0; 1578 } 1579 1580 /* Unload the driver, release resources */ 1581 int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1582 { 1583 struct bnxt *bp = eth_dev->data->dev_private; 1584 1585 pthread_mutex_lock(&bp->err_recovery_lock); 1586 if (bp->flags & BNXT_FLAG_FW_RESET) { 1587 PMD_DRV_LOG(ERR, 1588 "Adapter recovering from error..Please retry\n"); 1589 pthread_mutex_unlock(&bp->err_recovery_lock); 1590 return -EAGAIN; 1591 } 1592 pthread_mutex_unlock(&bp->err_recovery_lock); 1593 1594 return bnxt_dev_stop(eth_dev); 1595 } 1596 1597 int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1598 { 1599 struct bnxt *bp = eth_dev->data->dev_private; 1600 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1601 int vlan_mask = 0; 1602 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1603 1604 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1605 PMD_DRV_LOG(ERR, 1606 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1607 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1608 1609 do { 1610 rc = bnxt_hwrm_if_change(bp, true); 1611 if (rc == 0 || rc != -EAGAIN) 1612 break; 1613 1614 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1615 } while (retry_cnt--); 1616 1617 if (rc) 1618 return rc; 1619 1620 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1621 rc = bnxt_handle_if_change_status(bp); 1622 if (rc) 1623 return rc; 1624 } 1625 1626 bnxt_enable_int(bp); 1627 1628 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1629 1630 rc = bnxt_start_nic(bp); 1631 if (rc) 1632 goto error; 1633 1634 rc = bnxt_alloc_prev_ring_stats(bp); 1635 if (rc) 1636 goto error; 1637 1638 eth_dev->data->dev_started = 1; 1639 1640 bnxt_link_update_op(eth_dev, 0); 1641 1642 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1643 vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; 1644 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1645 vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; 1646 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1647 if (rc) 1648 goto error; 1649 1650 /* Initialize bnxt ULP port details */ 1651 rc = bnxt_ulp_port_init(bp); 1652 if (rc) 1653 goto error; 1654 1655 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1656 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1657 1658 bnxt_schedule_fw_health_check(bp); 1659 1660 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1661 bnxt_schedule_ptp_alarm(bp); 1662 1663 return 0; 1664 1665 error: 1666 bnxt_dev_stop(eth_dev); 1667 return rc; 1668 } 1669 1670 static void 1671 bnxt_uninit_locks(struct bnxt *bp) 1672 { 1673 pthread_mutex_destroy(&bp->flow_lock); 1674 pthread_mutex_destroy(&bp->def_cp_lock); 1675 pthread_mutex_destroy(&bp->health_check_lock); 1676 pthread_mutex_destroy(&bp->err_recovery_lock); 1677 if (bp->rep_info) { 1678 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1679 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1680 } 1681 } 1682 1683 static void bnxt_drv_uninit(struct bnxt *bp) 1684 { 1685 bnxt_free_leds_info(bp); 1686 bnxt_free_cos_queues(bp); 1687 bnxt_free_link_info(bp); 1688 bnxt_free_parent_info(bp); 1689 bnxt_uninit_locks(bp); 1690 bnxt_free_rep_info(bp); 1691 1692 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1693 bp->tx_mem_zone = NULL; 1694 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1695 bp->rx_mem_zone = NULL; 1696 1697 bnxt_free_vf_info(bp); 1698 bnxt_free_pf_info(bp); 1699 1700 rte_free(bp->grp_info); 1701 bp->grp_info = NULL; 1702 } 1703 1704 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1705 { 1706 struct bnxt *bp = eth_dev->data->dev_private; 1707 int ret = 0; 1708 1709 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1710 return 0; 1711 1712 pthread_mutex_lock(&bp->err_recovery_lock); 1713 if (bp->flags & BNXT_FLAG_FW_RESET) { 1714 PMD_DRV_LOG(ERR, 1715 "Adapter recovering from error...Please retry\n"); 1716 pthread_mutex_unlock(&bp->err_recovery_lock); 1717 return -EAGAIN; 1718 } 1719 pthread_mutex_unlock(&bp->err_recovery_lock); 1720 1721 /* cancel the recovery handler before remove dev */ 1722 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1723 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1724 bnxt_cancel_fc_thread(bp); 1725 rte_eal_alarm_cancel(bnxt_handle_vf_cfg_change, (void *)bp); 1726 1727 if (eth_dev->data->dev_started) 1728 ret = bnxt_dev_stop(eth_dev); 1729 1730 bnxt_uninit_resources(bp, false); 1731 1732 bnxt_drv_uninit(bp); 1733 1734 return ret; 1735 } 1736 1737 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1738 uint32_t index) 1739 { 1740 struct bnxt *bp = eth_dev->data->dev_private; 1741 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1742 struct bnxt_vnic_info *vnic; 1743 struct bnxt_filter_info *filter, *temp_filter; 1744 uint32_t i; 1745 1746 if (is_bnxt_in_error(bp)) 1747 return; 1748 1749 /* 1750 * Loop through all VNICs from the specified filter flow pools to 1751 * remove the corresponding MAC addr filter 1752 */ 1753 for (i = 0; i < bp->nr_vnics; i++) { 1754 if (!(pool_mask & (1ULL << i))) 1755 continue; 1756 1757 vnic = &bp->vnic_info[i]; 1758 filter = STAILQ_FIRST(&vnic->filter); 1759 while (filter) { 1760 temp_filter = STAILQ_NEXT(filter, next); 1761 if (filter->mac_index == index) { 1762 STAILQ_REMOVE(&vnic->filter, filter, 1763 bnxt_filter_info, next); 1764 bnxt_hwrm_clear_l2_filter(bp, filter); 1765 bnxt_free_filter(bp, filter); 1766 } 1767 filter = temp_filter; 1768 } 1769 } 1770 } 1771 1772 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1773 struct rte_ether_addr *mac_addr, uint32_t index, 1774 uint32_t pool) 1775 { 1776 struct bnxt_filter_info *filter; 1777 int rc = 0; 1778 1779 /* Attach requested MAC address to the new l2_filter */ 1780 STAILQ_FOREACH(filter, &vnic->filter, next) { 1781 if (filter->mac_index == index) { 1782 PMD_DRV_LOG(DEBUG, 1783 "MAC addr already existed for pool %d\n", 1784 pool); 1785 return 0; 1786 } 1787 } 1788 1789 filter = bnxt_alloc_filter(bp); 1790 if (!filter) { 1791 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1792 return -ENODEV; 1793 } 1794 1795 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1796 * if the MAC that's been programmed now is a different one, then, 1797 * copy that addr to filter->l2_addr 1798 */ 1799 if (mac_addr) 1800 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1801 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1802 1803 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1804 if (!rc) { 1805 filter->mac_index = index; 1806 if (filter->mac_index == 0) 1807 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1808 else 1809 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1810 } else { 1811 bnxt_free_filter(bp, filter); 1812 } 1813 1814 return rc; 1815 } 1816 1817 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1818 struct rte_ether_addr *mac_addr, 1819 uint32_t index, uint32_t pool) 1820 { 1821 struct bnxt *bp = eth_dev->data->dev_private; 1822 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1823 int rc = 0; 1824 1825 rc = is_bnxt_in_error(bp); 1826 if (rc) 1827 return rc; 1828 1829 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1830 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1831 return -ENOTSUP; 1832 } 1833 1834 if (!vnic) { 1835 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1836 return -EINVAL; 1837 } 1838 1839 /* Filter settings will get applied when port is started */ 1840 if (!eth_dev->data->dev_started) 1841 return 0; 1842 1843 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1844 1845 return rc; 1846 } 1847 1848 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1849 { 1850 int rc = 0; 1851 struct bnxt *bp = eth_dev->data->dev_private; 1852 struct rte_eth_link new; 1853 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1854 BNXT_MIN_LINK_WAIT_CNT; 1855 1856 rc = is_bnxt_in_error(bp); 1857 if (rc) 1858 return rc; 1859 1860 memset(&new, 0, sizeof(new)); 1861 1862 if (bp->link_info == NULL) 1863 goto out; 1864 1865 /* Only single function PF can bring the phy down. 1866 * In certain scenarios, device is not obliged link down even when forced. 1867 * When port is stopped, report link down in those cases. 1868 */ 1869 if (!eth_dev->data->dev_started && 1870 (!BNXT_SINGLE_PF(bp) || bnxt_force_link_config(bp))) 1871 goto out; 1872 1873 do { 1874 /* Retrieve link info from hardware */ 1875 rc = bnxt_get_hwrm_link_config(bp, &new); 1876 if (rc) { 1877 new.link_speed = RTE_ETH_LINK_SPEED_100M; 1878 new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1879 PMD_DRV_LOG(ERR, 1880 "Failed to retrieve link rc = 0x%x!\n", rc); 1881 goto out; 1882 } 1883 1884 if (!wait_to_complete || new.link_status) 1885 break; 1886 1887 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1888 } while (cnt--); 1889 1890 out: 1891 /* Timed out or success */ 1892 if (new.link_status != eth_dev->data->dev_link.link_status || 1893 new.link_speed != eth_dev->data->dev_link.link_speed) { 1894 rte_eth_linkstatus_set(eth_dev, &new); 1895 bnxt_print_link_info(eth_dev); 1896 } 1897 1898 return rc; 1899 } 1900 1901 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1902 { 1903 struct bnxt *bp = eth_dev->data->dev_private; 1904 struct bnxt_vnic_info *vnic; 1905 uint32_t old_flags; 1906 int rc; 1907 1908 rc = is_bnxt_in_error(bp); 1909 if (rc) 1910 return rc; 1911 1912 /* Filter settings will get applied when port is started */ 1913 if (!eth_dev->data->dev_started) 1914 return 0; 1915 1916 if (bp->vnic_info == NULL) 1917 return 0; 1918 1919 vnic = bnxt_get_default_vnic(bp); 1920 1921 old_flags = vnic->flags; 1922 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1923 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1924 if (rc != 0) 1925 vnic->flags = old_flags; 1926 1927 return rc; 1928 } 1929 1930 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1931 { 1932 struct bnxt *bp = eth_dev->data->dev_private; 1933 struct bnxt_vnic_info *vnic; 1934 uint32_t old_flags; 1935 int rc; 1936 1937 rc = is_bnxt_in_error(bp); 1938 if (rc) 1939 return rc; 1940 1941 /* Filter settings will get applied when port is started */ 1942 if (!eth_dev->data->dev_started) 1943 return 0; 1944 1945 if (bp->vnic_info == NULL) 1946 return 0; 1947 1948 vnic = bnxt_get_default_vnic(bp); 1949 1950 old_flags = vnic->flags; 1951 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1952 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1953 if (rc != 0) 1954 vnic->flags = old_flags; 1955 1956 return rc; 1957 } 1958 1959 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1960 { 1961 struct bnxt *bp = eth_dev->data->dev_private; 1962 struct bnxt_vnic_info *vnic; 1963 uint32_t old_flags; 1964 int rc; 1965 1966 rc = is_bnxt_in_error(bp); 1967 if (rc) 1968 return rc; 1969 1970 /* Filter settings will get applied when port is started */ 1971 if (!eth_dev->data->dev_started) 1972 return 0; 1973 1974 if (bp->vnic_info == NULL) 1975 return 0; 1976 1977 vnic = bnxt_get_default_vnic(bp); 1978 1979 old_flags = vnic->flags; 1980 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1981 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1982 if (rc != 0) 1983 vnic->flags = old_flags; 1984 1985 return rc; 1986 } 1987 1988 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1989 { 1990 struct bnxt *bp = eth_dev->data->dev_private; 1991 struct bnxt_vnic_info *vnic; 1992 uint32_t old_flags; 1993 int rc; 1994 1995 rc = is_bnxt_in_error(bp); 1996 if (rc) 1997 return rc; 1998 1999 /* Filter settings will get applied when port is started */ 2000 if (!eth_dev->data->dev_started) 2001 return 0; 2002 2003 if (bp->vnic_info == NULL) 2004 return 0; 2005 2006 vnic = bnxt_get_default_vnic(bp); 2007 2008 old_flags = vnic->flags; 2009 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2010 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2011 if (rc != 0) 2012 vnic->flags = old_flags; 2013 2014 return rc; 2015 } 2016 2017 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 2018 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 2019 { 2020 if (qid >= bp->rx_nr_rings) 2021 return NULL; 2022 2023 return bp->eth_dev->data->rx_queues[qid]; 2024 } 2025 2026 /* Return rxq corresponding to a given rss table ring/group ID. */ 2027 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 2028 { 2029 struct bnxt_rx_queue *rxq; 2030 unsigned int i; 2031 2032 if (!BNXT_HAS_RING_GRPS(bp)) { 2033 for (i = 0; i < bp->rx_nr_rings; i++) { 2034 rxq = bp->eth_dev->data->rx_queues[i]; 2035 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 2036 return rxq->index; 2037 } 2038 } else { 2039 for (i = 0; i < bp->rx_nr_rings; i++) { 2040 if (bp->grp_info[i].fw_grp_id == fwr) 2041 return i; 2042 } 2043 } 2044 2045 return INVALID_HW_RING_ID; 2046 } 2047 2048 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2049 struct rte_eth_rss_reta_entry64 *reta_conf, 2050 uint16_t reta_size) 2051 { 2052 struct bnxt *bp = eth_dev->data->dev_private; 2053 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2054 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2055 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2056 uint16_t idx, sft; 2057 int i, rc; 2058 2059 rc = is_bnxt_in_error(bp); 2060 if (rc) 2061 return rc; 2062 2063 if (!vnic->rss_table) 2064 return -EINVAL; 2065 2066 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 2067 return -EINVAL; 2068 2069 if (reta_size != tbl_size) { 2070 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2071 "(%d) must equal the size supported by the hardware " 2072 "(%d)\n", reta_size, tbl_size); 2073 return -EINVAL; 2074 } 2075 2076 if (bnxt_vnic_reta_config_update(bp, vnic, reta_conf, reta_size)) { 2077 PMD_DRV_LOG(ERR, "Error in setting the reta config\n"); 2078 return -EINVAL; 2079 } 2080 for (i = 0; i < reta_size; i++) { 2081 struct bnxt_rx_queue *rxq; 2082 2083 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2084 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2085 2086 if (!(reta_conf[idx].mask & (1ULL << sft))) 2087 continue; 2088 2089 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2090 if (BNXT_CHIP_P5(bp)) { 2091 vnic->rss_table[i * 2] = 2092 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2093 vnic->rss_table[i * 2 + 1] = 2094 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2095 } else { 2096 vnic->rss_table[i] = 2097 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2098 } 2099 } 2100 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2101 return rc; 2102 } 2103 2104 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2105 struct rte_eth_rss_reta_entry64 *reta_conf, 2106 uint16_t reta_size) 2107 { 2108 struct bnxt *bp = eth_dev->data->dev_private; 2109 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2110 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2111 uint16_t idx, sft, i; 2112 int rc; 2113 2114 rc = is_bnxt_in_error(bp); 2115 if (rc) 2116 return rc; 2117 2118 if (!vnic) 2119 return -EINVAL; 2120 if (!vnic->rss_table) 2121 return -EINVAL; 2122 2123 if (reta_size != tbl_size) { 2124 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2125 "(%d) must equal the size supported by the hardware " 2126 "(%d)\n", reta_size, tbl_size); 2127 return -EINVAL; 2128 } 2129 2130 for (idx = 0, i = 0; i < reta_size; i++) { 2131 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2132 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2133 2134 if (reta_conf[idx].mask & (1ULL << sft)) { 2135 uint16_t qid; 2136 2137 if (BNXT_CHIP_P5(bp)) 2138 qid = bnxt_rss_to_qid(bp, 2139 vnic->rss_table[i * 2]); 2140 else 2141 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2142 2143 if (qid == INVALID_HW_RING_ID) { 2144 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2145 return -EINVAL; 2146 } 2147 reta_conf[idx].reta[sft] = qid; 2148 } 2149 } 2150 2151 return 0; 2152 } 2153 2154 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2155 struct rte_eth_rss_conf *rss_conf) 2156 { 2157 struct bnxt *bp = eth_dev->data->dev_private; 2158 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2159 struct bnxt_vnic_info *vnic; 2160 int rc; 2161 2162 rc = is_bnxt_in_error(bp); 2163 if (rc) 2164 return rc; 2165 2166 /* 2167 * If RSS enablement were different than dev_configure, 2168 * then return -EINVAL 2169 */ 2170 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2171 if (!rss_conf->rss_hf) 2172 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2173 } else { 2174 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2175 return -EINVAL; 2176 } 2177 2178 /* Update the default RSS VNIC(s) */ 2179 vnic = bnxt_get_default_vnic(bp); 2180 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2181 vnic->hash_mode = 2182 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2183 RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); 2184 2185 /* Cache the hash function */ 2186 bp->rss_conf.rss_hf = rss_conf->rss_hf; 2187 2188 /* 2189 * If hashkey is not specified, use the previously configured 2190 * hashkey 2191 */ 2192 if (!rss_conf->rss_key) 2193 goto rss_config; 2194 2195 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2196 PMD_DRV_LOG(ERR, 2197 "Invalid hashkey length, should be %d bytes\n", 2198 HW_HASH_KEY_SIZE); 2199 return -EINVAL; 2200 } 2201 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2202 2203 /* Cache the hash key */ 2204 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 2205 2206 rss_config: 2207 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2208 return rc; 2209 } 2210 2211 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2212 struct rte_eth_rss_conf *rss_conf) 2213 { 2214 struct bnxt *bp = eth_dev->data->dev_private; 2215 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2216 int len, rc; 2217 uint32_t hash_types; 2218 2219 rc = is_bnxt_in_error(bp); 2220 if (rc) 2221 return rc; 2222 2223 /* RSS configuration is the same for all VNICs */ 2224 if (vnic && vnic->rss_hash_key) { 2225 if (rss_conf->rss_key) { 2226 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2227 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2228 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2229 } 2230 2231 hash_types = vnic->hash_type; 2232 rss_conf->rss_hf = 0; 2233 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2234 rss_conf->rss_hf |= RTE_ETH_RSS_IPV4; 2235 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2236 } 2237 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2238 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 2239 hash_types &= 2240 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2241 } 2242 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2243 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; 2244 hash_types &= 2245 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2246 } 2247 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2248 rss_conf->rss_hf |= RTE_ETH_RSS_IPV6; 2249 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2250 } 2251 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2252 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 2253 hash_types &= 2254 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2255 } 2256 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2257 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; 2258 hash_types &= 2259 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2260 } 2261 2262 rss_conf->rss_hf |= 2263 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2264 2265 if (hash_types) { 2266 PMD_DRV_LOG(ERR, 2267 "Unknown RSS config from firmware (%08x), RSS disabled", 2268 vnic->hash_type); 2269 return -ENOTSUP; 2270 } 2271 } else { 2272 rss_conf->rss_hf = 0; 2273 } 2274 return 0; 2275 } 2276 2277 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2278 struct rte_eth_fc_conf *fc_conf) 2279 { 2280 struct bnxt *bp = dev->data->dev_private; 2281 struct rte_eth_link link_info; 2282 int rc; 2283 2284 rc = is_bnxt_in_error(bp); 2285 if (rc) 2286 return rc; 2287 2288 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2289 if (rc) 2290 return rc; 2291 2292 memset(fc_conf, 0, sizeof(*fc_conf)); 2293 if (bp->link_info->auto_pause) 2294 fc_conf->autoneg = 1; 2295 switch (bp->link_info->pause) { 2296 case 0: 2297 fc_conf->mode = RTE_ETH_FC_NONE; 2298 break; 2299 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2300 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2301 break; 2302 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2303 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2304 break; 2305 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2306 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2307 fc_conf->mode = RTE_ETH_FC_FULL; 2308 break; 2309 } 2310 return 0; 2311 } 2312 2313 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2314 struct rte_eth_fc_conf *fc_conf) 2315 { 2316 struct bnxt *bp = dev->data->dev_private; 2317 int rc; 2318 2319 rc = is_bnxt_in_error(bp); 2320 if (rc) 2321 return rc; 2322 2323 if (!BNXT_SINGLE_PF(bp)) { 2324 PMD_DRV_LOG(ERR, 2325 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2326 return -ENOTSUP; 2327 } 2328 2329 switch (fc_conf->mode) { 2330 case RTE_ETH_FC_NONE: 2331 bp->link_info->auto_pause = 0; 2332 bp->link_info->force_pause = 0; 2333 break; 2334 case RTE_ETH_FC_RX_PAUSE: 2335 if (fc_conf->autoneg) { 2336 bp->link_info->auto_pause = 2337 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2338 bp->link_info->force_pause = 0; 2339 } else { 2340 bp->link_info->auto_pause = 0; 2341 bp->link_info->force_pause = 2342 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2343 } 2344 break; 2345 case RTE_ETH_FC_TX_PAUSE: 2346 if (fc_conf->autoneg) { 2347 bp->link_info->auto_pause = 2348 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2349 bp->link_info->force_pause = 0; 2350 } else { 2351 bp->link_info->auto_pause = 0; 2352 bp->link_info->force_pause = 2353 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2354 } 2355 break; 2356 case RTE_ETH_FC_FULL: 2357 if (fc_conf->autoneg) { 2358 bp->link_info->auto_pause = 2359 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2360 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2361 bp->link_info->force_pause = 0; 2362 } else { 2363 bp->link_info->auto_pause = 0; 2364 bp->link_info->force_pause = 2365 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2366 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2367 } 2368 break; 2369 } 2370 return bnxt_set_hwrm_link_config(bp, true); 2371 } 2372 2373 /* Add UDP tunneling port */ 2374 int 2375 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2376 struct rte_eth_udp_tunnel *udp_tunnel) 2377 { 2378 struct bnxt *bp = eth_dev->data->dev_private; 2379 uint16_t tunnel_type = 0; 2380 int rc = 0; 2381 2382 rc = is_bnxt_in_error(bp); 2383 if (rc) 2384 return rc; 2385 2386 switch (udp_tunnel->prot_type) { 2387 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2388 if (bp->vxlan_port_cnt) { 2389 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2390 udp_tunnel->udp_port); 2391 if (bp->vxlan_port != udp_tunnel->udp_port) { 2392 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2393 return -ENOSPC; 2394 } 2395 bp->vxlan_port_cnt++; 2396 return 0; 2397 } 2398 tunnel_type = 2399 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2400 break; 2401 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2402 if (bp->geneve_port_cnt) { 2403 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2404 udp_tunnel->udp_port); 2405 if (bp->geneve_port != udp_tunnel->udp_port) { 2406 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2407 return -ENOSPC; 2408 } 2409 bp->geneve_port_cnt++; 2410 return 0; 2411 } 2412 tunnel_type = 2413 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2414 break; 2415 case RTE_ETH_TUNNEL_TYPE_ECPRI: 2416 if (bp->ecpri_port_cnt) { 2417 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2418 udp_tunnel->udp_port); 2419 if (bp->ecpri_port != udp_tunnel->udp_port) { 2420 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2421 return -ENOSPC; 2422 } 2423 bp->ecpri_port_cnt++; 2424 return 0; 2425 } 2426 tunnel_type = 2427 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI; 2428 break; 2429 default: 2430 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2431 return -ENOTSUP; 2432 } 2433 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2434 tunnel_type); 2435 2436 if (rc != 0) 2437 return rc; 2438 2439 if (tunnel_type == 2440 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2441 bp->vxlan_port_cnt++; 2442 2443 if (tunnel_type == 2444 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2445 bp->geneve_port_cnt++; 2446 2447 if (tunnel_type == 2448 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI) 2449 bp->ecpri_port_cnt++; 2450 2451 return rc; 2452 } 2453 2454 int 2455 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2456 struct rte_eth_udp_tunnel *udp_tunnel) 2457 { 2458 struct bnxt *bp = eth_dev->data->dev_private; 2459 uint16_t tunnel_type = 0; 2460 uint16_t port = 0; 2461 int rc = 0; 2462 2463 rc = is_bnxt_in_error(bp); 2464 if (rc) 2465 return rc; 2466 2467 switch (udp_tunnel->prot_type) { 2468 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2469 if (!bp->vxlan_port_cnt) { 2470 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2471 return -EINVAL; 2472 } 2473 if (bp->vxlan_port != udp_tunnel->udp_port) { 2474 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2475 udp_tunnel->udp_port, bp->vxlan_port); 2476 return -EINVAL; 2477 } 2478 if (--bp->vxlan_port_cnt) 2479 return 0; 2480 2481 tunnel_type = 2482 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2483 port = bp->vxlan_fw_dst_port_id; 2484 break; 2485 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2486 if (!bp->geneve_port_cnt) { 2487 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2488 return -EINVAL; 2489 } 2490 if (bp->geneve_port != udp_tunnel->udp_port) { 2491 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2492 udp_tunnel->udp_port, bp->geneve_port); 2493 return -EINVAL; 2494 } 2495 if (--bp->geneve_port_cnt) 2496 return 0; 2497 2498 tunnel_type = 2499 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2500 port = bp->geneve_fw_dst_port_id; 2501 break; 2502 case RTE_ETH_TUNNEL_TYPE_ECPRI: 2503 if (!bp->ecpri_port_cnt) { 2504 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2505 return -EINVAL; 2506 } 2507 if (bp->ecpri_port != udp_tunnel->udp_port) { 2508 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2509 udp_tunnel->udp_port, bp->ecpri_port); 2510 return -EINVAL; 2511 } 2512 if (--bp->ecpri_port_cnt) 2513 return 0; 2514 2515 tunnel_type = 2516 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI; 2517 port = bp->ecpri_fw_dst_port_id; 2518 break; 2519 default: 2520 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2521 return -ENOTSUP; 2522 } 2523 2524 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2525 return rc; 2526 } 2527 2528 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2529 { 2530 struct bnxt_filter_info *filter; 2531 struct bnxt_vnic_info *vnic; 2532 int rc = 0; 2533 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2534 2535 vnic = bnxt_get_default_vnic(bp); 2536 filter = STAILQ_FIRST(&vnic->filter); 2537 while (filter) { 2538 /* Search for this matching MAC+VLAN filter */ 2539 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2540 /* Delete the filter */ 2541 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2542 if (rc) 2543 return rc; 2544 STAILQ_REMOVE(&vnic->filter, filter, 2545 bnxt_filter_info, next); 2546 bnxt_free_filter(bp, filter); 2547 PMD_DRV_LOG(INFO, 2548 "Deleted vlan filter for %d\n", 2549 vlan_id); 2550 return 0; 2551 } 2552 filter = STAILQ_NEXT(filter, next); 2553 } 2554 return -ENOENT; 2555 } 2556 2557 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2558 { 2559 struct bnxt_filter_info *filter; 2560 struct bnxt_vnic_info *vnic; 2561 int rc = 0; 2562 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2563 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2564 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2565 2566 /* Implementation notes on the use of VNIC in this command: 2567 * 2568 * By default, these filters belong to default vnic for the function. 2569 * Once these filters are set up, only destination VNIC can be modified. 2570 * If the destination VNIC is not specified in this command, 2571 * then the HWRM shall only create an l2 context id. 2572 */ 2573 2574 vnic = bnxt_get_default_vnic(bp); 2575 filter = STAILQ_FIRST(&vnic->filter); 2576 /* Check if the VLAN has already been added */ 2577 while (filter) { 2578 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2579 return -EEXIST; 2580 2581 filter = STAILQ_NEXT(filter, next); 2582 } 2583 2584 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2585 * command to create MAC+VLAN filter with the right flags, enables set. 2586 */ 2587 filter = bnxt_alloc_filter(bp); 2588 if (!filter) { 2589 PMD_DRV_LOG(ERR, 2590 "MAC/VLAN filter alloc failed\n"); 2591 return -ENOMEM; 2592 } 2593 /* MAC + VLAN ID filter */ 2594 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2595 * untagged packets are received 2596 * 2597 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2598 * packets and only the programmed vlan's packets are received 2599 */ 2600 filter->l2_ivlan = vlan_id; 2601 filter->l2_ivlan_mask = 0x0FFF; 2602 filter->enables |= en; 2603 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2604 2605 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2606 if (rc) { 2607 /* Free the newly allocated filter as we were 2608 * not able to create the filter in hardware. 2609 */ 2610 bnxt_free_filter(bp, filter); 2611 return rc; 2612 } 2613 2614 filter->mac_index = 0; 2615 /* Add this new filter to the list */ 2616 if (vlan_id == 0) 2617 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2618 else 2619 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2620 2621 PMD_DRV_LOG(INFO, 2622 "Added Vlan filter for %d\n", vlan_id); 2623 return rc; 2624 } 2625 2626 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2627 uint16_t vlan_id, int on) 2628 { 2629 struct bnxt *bp = eth_dev->data->dev_private; 2630 int rc; 2631 2632 rc = is_bnxt_in_error(bp); 2633 if (rc) 2634 return rc; 2635 2636 if (!eth_dev->data->dev_started) { 2637 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2638 return -EINVAL; 2639 } 2640 2641 /* These operations apply to ALL existing MAC/VLAN filters */ 2642 if (on) 2643 return bnxt_add_vlan_filter(bp, vlan_id); 2644 else 2645 return bnxt_del_vlan_filter(bp, vlan_id); 2646 } 2647 2648 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2649 struct bnxt_vnic_info *vnic) 2650 { 2651 struct bnxt_filter_info *filter; 2652 int rc; 2653 2654 filter = STAILQ_FIRST(&vnic->filter); 2655 while (filter) { 2656 if (filter->mac_index == 0 && 2657 !memcmp(filter->l2_addr, bp->mac_addr, 2658 RTE_ETHER_ADDR_LEN)) { 2659 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2660 if (!rc) { 2661 STAILQ_REMOVE(&vnic->filter, filter, 2662 bnxt_filter_info, next); 2663 bnxt_free_filter(bp, filter); 2664 } 2665 return rc; 2666 } 2667 filter = STAILQ_NEXT(filter, next); 2668 } 2669 return 0; 2670 } 2671 2672 static int 2673 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2674 { 2675 struct bnxt_vnic_info *vnic; 2676 unsigned int i; 2677 int rc; 2678 2679 vnic = bnxt_get_default_vnic(bp); 2680 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 2681 /* Remove any VLAN filters programmed */ 2682 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2683 bnxt_del_vlan_filter(bp, i); 2684 2685 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2686 if (rc) 2687 return rc; 2688 } else { 2689 /* Default filter will allow packets that match the 2690 * dest mac. So, it has to be deleted, otherwise, we 2691 * will endup receiving vlan packets for which the 2692 * filter is not programmed, when hw-vlan-filter 2693 * configuration is ON 2694 */ 2695 bnxt_del_dflt_mac_filter(bp, vnic); 2696 /* This filter will allow only untagged packets */ 2697 bnxt_add_vlan_filter(bp, 0); 2698 } 2699 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2700 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); 2701 2702 return 0; 2703 } 2704 2705 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2706 { 2707 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2708 unsigned int i; 2709 int rc; 2710 2711 /* Destroy vnic filters and vnic */ 2712 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2713 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2714 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2715 bnxt_del_vlan_filter(bp, i); 2716 } 2717 bnxt_del_dflt_mac_filter(bp, vnic); 2718 2719 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2720 if (rc) 2721 return rc; 2722 2723 rc = bnxt_hwrm_vnic_free(bp, vnic); 2724 if (rc) 2725 return rc; 2726 2727 rte_free(vnic->fw_grp_ids); 2728 vnic->fw_grp_ids = NULL; 2729 2730 vnic->rx_queue_cnt = 0; 2731 2732 return 0; 2733 } 2734 2735 static int 2736 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2737 { 2738 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2739 int rc; 2740 2741 /* Destroy, recreate and reconfigure the default vnic */ 2742 rc = bnxt_free_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id); 2743 if (rc) 2744 return rc; 2745 2746 /* setup the default vnic details*/ 2747 bnxt_vnic_queue_db_update_dlft_vnic(bp); 2748 2749 rc = bnxt_setup_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id); 2750 if (rc) 2751 return rc; 2752 2753 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2754 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2755 rc = bnxt_add_vlan_filter(bp, 0); 2756 if (rc) 2757 return rc; 2758 rc = bnxt_restore_vlan_filters(bp); 2759 if (rc) 2760 return rc; 2761 } else { 2762 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2763 if (rc) 2764 return rc; 2765 } 2766 2767 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2768 if (rc) 2769 return rc; 2770 2771 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2772 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); 2773 2774 return rc; 2775 } 2776 2777 static int 2778 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2779 { 2780 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2781 struct bnxt *bp = dev->data->dev_private; 2782 int rc; 2783 2784 rc = is_bnxt_in_error(bp); 2785 if (rc) 2786 return rc; 2787 2788 /* Filter settings will get applied when port is started */ 2789 if (!dev->data->dev_started) 2790 return 0; 2791 2792 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2793 /* Enable or disable VLAN filtering */ 2794 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2795 if (rc) 2796 return rc; 2797 } 2798 2799 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2800 /* Enable or disable VLAN stripping */ 2801 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2802 if (rc) 2803 return rc; 2804 } 2805 2806 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2807 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2808 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2809 else 2810 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2811 } 2812 2813 return 0; 2814 } 2815 2816 static int 2817 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2818 uint16_t tpid) 2819 { 2820 struct bnxt *bp = dev->data->dev_private; 2821 int qinq = dev->data->dev_conf.rxmode.offloads & 2822 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 2823 2824 if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && 2825 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { 2826 PMD_DRV_LOG(ERR, 2827 "Unsupported vlan type."); 2828 return -EINVAL; 2829 } 2830 if (!qinq) { 2831 PMD_DRV_LOG(ERR, 2832 "QinQ not enabled. Needs to be ON as we can " 2833 "accelerate only outer vlan\n"); 2834 return -EINVAL; 2835 } 2836 2837 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2838 switch (tpid) { 2839 case RTE_ETHER_TYPE_QINQ: 2840 bp->outer_tpid_bd = 2841 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2842 break; 2843 case RTE_ETHER_TYPE_VLAN: 2844 bp->outer_tpid_bd = 2845 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2846 break; 2847 case RTE_ETHER_TYPE_QINQ1: 2848 bp->outer_tpid_bd = 2849 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2850 break; 2851 case RTE_ETHER_TYPE_QINQ2: 2852 bp->outer_tpid_bd = 2853 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2854 break; 2855 case RTE_ETHER_TYPE_QINQ3: 2856 bp->outer_tpid_bd = 2857 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2858 break; 2859 default: 2860 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2861 return -EINVAL; 2862 } 2863 bp->outer_tpid_bd |= tpid; 2864 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2865 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 2866 PMD_DRV_LOG(ERR, 2867 "Can accelerate only outer vlan in QinQ\n"); 2868 return -EINVAL; 2869 } 2870 2871 return 0; 2872 } 2873 2874 static int 2875 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2876 struct rte_ether_addr *addr) 2877 { 2878 struct bnxt *bp = dev->data->dev_private; 2879 /* Default Filter is tied to VNIC 0 */ 2880 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2881 int rc; 2882 2883 rc = is_bnxt_in_error(bp); 2884 if (rc) 2885 return rc; 2886 2887 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2888 return -EPERM; 2889 2890 if (rte_is_zero_ether_addr(addr)) 2891 return -EINVAL; 2892 2893 /* Filter settings will get applied when port is started */ 2894 if (!dev->data->dev_started) 2895 return 0; 2896 2897 /* Check if the requested MAC is already added */ 2898 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2899 return 0; 2900 2901 /* Destroy filter and re-create it */ 2902 bnxt_del_dflt_mac_filter(bp, vnic); 2903 2904 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2905 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2906 /* This filter will allow only untagged packets */ 2907 rc = bnxt_add_vlan_filter(bp, 0); 2908 } else { 2909 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2910 } 2911 2912 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2913 return rc; 2914 } 2915 2916 static int 2917 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2918 struct rte_ether_addr *mc_addr_set, 2919 uint32_t nb_mc_addr) 2920 { 2921 struct bnxt *bp = eth_dev->data->dev_private; 2922 struct bnxt_vnic_info *vnic; 2923 uint32_t i = 0; 2924 int rc; 2925 2926 rc = is_bnxt_in_error(bp); 2927 if (rc) 2928 return rc; 2929 2930 vnic = bnxt_get_default_vnic(bp); 2931 2932 bp->nb_mc_addr = nb_mc_addr; 2933 2934 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2935 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2936 goto allmulti; 2937 } 2938 2939 /* TODO Check for Duplicate mcast addresses */ 2940 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2941 for (i = 0; i < nb_mc_addr; i++) 2942 rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]); 2943 2944 if (bp->nb_mc_addr) 2945 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2946 else 2947 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2948 2949 allmulti: 2950 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2951 } 2952 2953 static int 2954 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2955 { 2956 struct bnxt *bp = dev->data->dev_private; 2957 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2958 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2959 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2960 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2961 int ret; 2962 2963 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2964 fw_major, fw_minor, fw_updt, fw_rsvd); 2965 if (ret < 0) 2966 return -EINVAL; 2967 2968 ret += 1; /* add the size of '\0' */ 2969 if (fw_size < (size_t)ret) 2970 return ret; 2971 else 2972 return 0; 2973 } 2974 2975 static void 2976 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2977 struct rte_eth_rxq_info *qinfo) 2978 { 2979 struct bnxt *bp = dev->data->dev_private; 2980 struct bnxt_rx_queue *rxq; 2981 2982 if (is_bnxt_in_error(bp)) 2983 return; 2984 2985 rxq = dev->data->rx_queues[queue_id]; 2986 2987 qinfo->mp = rxq->mb_pool; 2988 qinfo->scattered_rx = dev->data->scattered_rx; 2989 qinfo->nb_desc = rxq->nb_rx_desc; 2990 2991 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2992 qinfo->conf.rx_drop_en = rxq->drop_en; 2993 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2994 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2995 } 2996 2997 static void 2998 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2999 struct rte_eth_txq_info *qinfo) 3000 { 3001 struct bnxt *bp = dev->data->dev_private; 3002 struct bnxt_tx_queue *txq; 3003 3004 if (is_bnxt_in_error(bp)) 3005 return; 3006 3007 txq = dev->data->tx_queues[queue_id]; 3008 3009 qinfo->nb_desc = txq->nb_tx_desc; 3010 3011 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 3012 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 3013 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 3014 3015 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 3016 qinfo->conf.tx_rs_thresh = 0; 3017 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 3018 qinfo->conf.offloads = txq->offloads; 3019 } 3020 3021 static const struct { 3022 eth_rx_burst_t pkt_burst; 3023 const char *info; 3024 } bnxt_rx_burst_info[] = { 3025 {bnxt_recv_pkts, "Scalar"}, 3026 #if defined(RTE_ARCH_X86) 3027 {bnxt_recv_pkts_vec, "Vector SSE"}, 3028 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 3029 #endif 3030 #if defined(RTE_ARCH_ARM64) 3031 {bnxt_recv_pkts_vec, "Vector Neon"}, 3032 #endif 3033 }; 3034 3035 static int 3036 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3037 struct rte_eth_burst_mode *mode) 3038 { 3039 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 3040 size_t i; 3041 3042 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 3043 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 3044 snprintf(mode->info, sizeof(mode->info), "%s", 3045 bnxt_rx_burst_info[i].info); 3046 return 0; 3047 } 3048 } 3049 3050 return -EINVAL; 3051 } 3052 3053 static const struct { 3054 eth_tx_burst_t pkt_burst; 3055 const char *info; 3056 } bnxt_tx_burst_info[] = { 3057 {bnxt_xmit_pkts, "Scalar"}, 3058 #if defined(RTE_ARCH_X86) 3059 {bnxt_xmit_pkts_vec, "Vector SSE"}, 3060 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 3061 #endif 3062 #if defined(RTE_ARCH_ARM64) 3063 {bnxt_xmit_pkts_vec, "Vector Neon"}, 3064 #endif 3065 }; 3066 3067 static int 3068 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3069 struct rte_eth_burst_mode *mode) 3070 { 3071 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 3072 size_t i; 3073 3074 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 3075 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 3076 snprintf(mode->info, sizeof(mode->info), "%s", 3077 bnxt_tx_burst_info[i].info); 3078 return 0; 3079 } 3080 } 3081 3082 return -EINVAL; 3083 } 3084 3085 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3086 { 3087 struct bnxt *bp = eth_dev->data->dev_private; 3088 uint32_t rc = 0; 3089 3090 rc = is_bnxt_in_error(bp); 3091 if (rc) 3092 return rc; 3093 3094 /* Return if port is active */ 3095 if (eth_dev->data->dev_started) { 3096 PMD_DRV_LOG(ERR, "Stop port before changing MTU\n"); 3097 return -EBUSY; 3098 } 3099 3100 /* Exit if receive queues are not configured yet */ 3101 if (!eth_dev->data->nb_rx_queues) 3102 return -ENOTSUP; 3103 3104 /* Is there a change in mtu setting? */ 3105 if (eth_dev->data->mtu == new_mtu) 3106 return rc; 3107 3108 if (new_mtu > RTE_ETHER_MTU) 3109 bp->flags |= BNXT_FLAG_JUMBO; 3110 else 3111 bp->flags &= ~BNXT_FLAG_JUMBO; 3112 3113 rc = bnxt_vnic_mru_config(bp, new_mtu); 3114 if (rc) { 3115 PMD_DRV_LOG(ERR, "failed to update mtu in vnic context\n"); 3116 return rc; 3117 } 3118 3119 if (bnxt_hwrm_config_host_mtu(bp)) 3120 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3121 3122 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3123 3124 return rc; 3125 } 3126 3127 static int 3128 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3129 { 3130 struct bnxt *bp = dev->data->dev_private; 3131 uint16_t vlan = bp->vlan; 3132 int rc; 3133 3134 rc = is_bnxt_in_error(bp); 3135 if (rc) 3136 return rc; 3137 3138 if (!BNXT_SINGLE_PF(bp)) { 3139 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3140 return -ENOTSUP; 3141 } 3142 bp->vlan = on ? pvid : 0; 3143 3144 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3145 if (rc) 3146 bp->vlan = vlan; 3147 return rc; 3148 } 3149 3150 static int 3151 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3152 { 3153 struct bnxt *bp = dev->data->dev_private; 3154 int rc; 3155 3156 rc = is_bnxt_in_error(bp); 3157 if (rc) 3158 return rc; 3159 3160 return bnxt_hwrm_port_led_cfg(bp, true); 3161 } 3162 3163 static int 3164 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3165 { 3166 struct bnxt *bp = dev->data->dev_private; 3167 int rc; 3168 3169 rc = is_bnxt_in_error(bp); 3170 if (rc) 3171 return rc; 3172 3173 return bnxt_hwrm_port_led_cfg(bp, false); 3174 } 3175 3176 static uint32_t 3177 bnxt_rx_queue_count_op(void *rx_queue) 3178 { 3179 struct bnxt *bp; 3180 struct bnxt_cp_ring_info *cpr; 3181 uint32_t desc = 0, raw_cons, cp_ring_size; 3182 struct bnxt_rx_queue *rxq; 3183 struct rx_pkt_cmpl *rxcmp; 3184 int rc; 3185 3186 rxq = rx_queue; 3187 bp = rxq->bp; 3188 3189 rc = is_bnxt_in_error(bp); 3190 if (rc) 3191 return rc; 3192 3193 cpr = rxq->cp_ring; 3194 raw_cons = cpr->cp_raw_cons; 3195 cp_ring_size = cpr->cp_ring_struct->ring_size; 3196 3197 while (1) { 3198 uint32_t agg_cnt, cons, cmpl_type; 3199 3200 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3201 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3202 3203 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3204 break; 3205 3206 cmpl_type = CMP_TYPE(rxcmp); 3207 3208 switch (cmpl_type) { 3209 case CMPL_BASE_TYPE_RX_L2: 3210 case CMPL_BASE_TYPE_RX_L2_V2: 3211 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3212 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3213 desc++; 3214 break; 3215 3216 case CMPL_BASE_TYPE_RX_TPA_END: 3217 if (BNXT_CHIP_P5(rxq->bp)) { 3218 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3219 3220 p5_tpa_end = (void *)rxcmp; 3221 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3222 } else { 3223 struct rx_tpa_end_cmpl *tpa_end; 3224 3225 tpa_end = (void *)rxcmp; 3226 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3227 } 3228 3229 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3230 desc++; 3231 break; 3232 3233 default: 3234 raw_cons += CMP_LEN(cmpl_type); 3235 } 3236 } 3237 3238 return desc; 3239 } 3240 3241 static int 3242 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3243 { 3244 struct bnxt_rx_queue *rxq = rx_queue; 3245 struct bnxt_cp_ring_info *cpr; 3246 struct bnxt_rx_ring_info *rxr; 3247 uint32_t desc, raw_cons, cp_ring_size; 3248 struct bnxt *bp = rxq->bp; 3249 struct rx_pkt_cmpl *rxcmp; 3250 int rc; 3251 3252 rc = is_bnxt_in_error(bp); 3253 if (rc) 3254 return rc; 3255 3256 if (offset >= rxq->nb_rx_desc) 3257 return -EINVAL; 3258 3259 rxr = rxq->rx_ring; 3260 cpr = rxq->cp_ring; 3261 cp_ring_size = cpr->cp_ring_struct->ring_size; 3262 3263 /* 3264 * For the vector receive case, the completion at the requested 3265 * offset can be indexed directly. 3266 */ 3267 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3268 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3269 struct rx_pkt_cmpl *rxcmp; 3270 uint32_t cons; 3271 3272 /* Check status of completion descriptor. */ 3273 raw_cons = cpr->cp_raw_cons + 3274 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3275 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3276 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3277 3278 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3279 return RTE_ETH_RX_DESC_DONE; 3280 3281 /* Check whether rx desc has an mbuf attached. */ 3282 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3283 if (cons >= rxq->rxrearm_start && 3284 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3285 return RTE_ETH_RX_DESC_UNAVAIL; 3286 } 3287 3288 return RTE_ETH_RX_DESC_AVAIL; 3289 } 3290 #endif 3291 3292 /* 3293 * For the non-vector receive case, scan the completion ring to 3294 * locate the completion descriptor for the requested offset. 3295 */ 3296 raw_cons = cpr->cp_raw_cons; 3297 desc = 0; 3298 while (1) { 3299 uint32_t agg_cnt, cons, cmpl_type; 3300 3301 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3302 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3303 3304 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3305 break; 3306 3307 cmpl_type = CMP_TYPE(rxcmp); 3308 3309 switch (cmpl_type) { 3310 case CMPL_BASE_TYPE_RX_L2: 3311 case CMPL_BASE_TYPE_RX_L2_V2: 3312 if (desc == offset) { 3313 cons = rxcmp->opaque; 3314 if (rxr->rx_buf_ring[cons]) 3315 return RTE_ETH_RX_DESC_DONE; 3316 else 3317 return RTE_ETH_RX_DESC_UNAVAIL; 3318 } 3319 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3320 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3321 desc++; 3322 break; 3323 3324 case CMPL_BASE_TYPE_RX_TPA_END: 3325 if (desc == offset) 3326 return RTE_ETH_RX_DESC_DONE; 3327 3328 if (BNXT_CHIP_P5(rxq->bp)) { 3329 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3330 3331 p5_tpa_end = (void *)rxcmp; 3332 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3333 } else { 3334 struct rx_tpa_end_cmpl *tpa_end; 3335 3336 tpa_end = (void *)rxcmp; 3337 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3338 } 3339 3340 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3341 desc++; 3342 break; 3343 3344 default: 3345 raw_cons += CMP_LEN(cmpl_type); 3346 } 3347 } 3348 3349 return RTE_ETH_RX_DESC_AVAIL; 3350 } 3351 3352 static int 3353 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3354 { 3355 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3356 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3357 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3358 struct cmpl_base *cp_desc_ring; 3359 int rc; 3360 3361 rc = is_bnxt_in_error(txq->bp); 3362 if (rc) 3363 return rc; 3364 3365 if (offset >= txq->nb_tx_desc) 3366 return -EINVAL; 3367 3368 /* Return "desc done" if descriptor is available for use. */ 3369 if (bnxt_tx_bds_in_hw(txq) <= offset) 3370 return RTE_ETH_TX_DESC_DONE; 3371 3372 raw_cons = cpr->cp_raw_cons; 3373 cp_desc_ring = cpr->cp_desc_ring; 3374 ring_mask = cpr->cp_ring_struct->ring_mask; 3375 3376 /* Check to see if hw has posted a completion for the descriptor. */ 3377 while (1) { 3378 struct tx_cmpl *txcmp; 3379 uint32_t cons; 3380 3381 cons = RING_CMPL(ring_mask, raw_cons); 3382 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3383 3384 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3385 break; 3386 3387 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3388 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3389 3390 if (nb_tx_pkts > offset) 3391 return RTE_ETH_TX_DESC_DONE; 3392 3393 raw_cons = NEXT_RAW_CMP(raw_cons); 3394 } 3395 3396 /* Descriptor is pending transmit, not yet completed by hardware. */ 3397 return RTE_ETH_TX_DESC_FULL; 3398 } 3399 3400 int 3401 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3402 const struct rte_flow_ops **ops) 3403 { 3404 struct bnxt *bp = dev->data->dev_private; 3405 int ret = 0; 3406 3407 if (!bp) 3408 return -EIO; 3409 3410 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3411 struct bnxt_representor *vfr = dev->data->dev_private; 3412 bp = vfr->parent_dev->data->dev_private; 3413 /* parent is deleted while children are still valid */ 3414 if (!bp) { 3415 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3416 dev->data->port_id); 3417 return -EIO; 3418 } 3419 } 3420 3421 ret = is_bnxt_in_error(bp); 3422 if (ret) 3423 return ret; 3424 3425 /* PMD supports thread-safe flow operations. rte_flow API 3426 * functions can avoid mutex for multi-thread safety. 3427 */ 3428 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3429 3430 if (BNXT_TRUFLOW_EN(bp)) 3431 *ops = &bnxt_ulp_rte_flow_ops; 3432 else 3433 *ops = &bnxt_flow_ops; 3434 3435 return ret; 3436 } 3437 3438 static const uint32_t * 3439 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3440 { 3441 static const uint32_t ptypes[] = { 3442 RTE_PTYPE_L2_ETHER_VLAN, 3443 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3444 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3445 RTE_PTYPE_L4_ICMP, 3446 RTE_PTYPE_L4_TCP, 3447 RTE_PTYPE_L4_UDP, 3448 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3449 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3450 RTE_PTYPE_INNER_L4_ICMP, 3451 RTE_PTYPE_INNER_L4_TCP, 3452 RTE_PTYPE_INNER_L4_UDP, 3453 RTE_PTYPE_UNKNOWN 3454 }; 3455 3456 if (!dev->rx_pkt_burst) 3457 return NULL; 3458 3459 return ptypes; 3460 } 3461 3462 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3463 int reg_win) 3464 { 3465 uint32_t reg_base = *reg_arr & 0xfffff000; 3466 uint32_t win_off; 3467 int i; 3468 3469 for (i = 0; i < count; i++) { 3470 if ((reg_arr[i] & 0xfffff000) != reg_base) 3471 return -ERANGE; 3472 } 3473 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3474 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3475 return 0; 3476 } 3477 3478 static int bnxt_map_ptp_regs(struct bnxt *bp) 3479 { 3480 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3481 uint32_t *reg_arr; 3482 int rc, i; 3483 3484 reg_arr = ptp->rx_regs; 3485 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3486 if (rc) 3487 return rc; 3488 3489 reg_arr = ptp->tx_regs; 3490 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3491 if (rc) 3492 return rc; 3493 3494 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3495 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3496 3497 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3498 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3499 3500 return 0; 3501 } 3502 3503 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3504 { 3505 rte_write32(0, (uint8_t *)bp->bar0 + 3506 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3507 rte_write32(0, (uint8_t *)bp->bar0 + 3508 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3509 } 3510 3511 static uint64_t bnxt_cc_read(struct bnxt *bp) 3512 { 3513 uint64_t ns; 3514 3515 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3516 BNXT_GRCPF_REG_SYNC_TIME)); 3517 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3518 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3519 return ns; 3520 } 3521 3522 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3523 { 3524 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3525 uint32_t fifo; 3526 3527 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3528 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3529 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3530 return -EAGAIN; 3531 3532 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3533 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3534 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3535 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3536 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3537 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3538 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3539 3540 return 0; 3541 } 3542 3543 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3544 { 3545 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3546 struct bnxt_pf_info *pf = bp->pf; 3547 uint16_t port_id; 3548 int i = 0; 3549 uint32_t fifo; 3550 3551 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3552 return -EINVAL; 3553 3554 port_id = pf->port_id; 3555 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3556 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3557 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3558 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3559 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3560 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3561 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3562 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3563 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3564 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3565 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3566 i++; 3567 } 3568 3569 if (i >= BNXT_PTP_RX_PND_CNT) 3570 return -EBUSY; 3571 3572 return 0; 3573 } 3574 3575 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3576 { 3577 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3578 struct bnxt_pf_info *pf = bp->pf; 3579 uint16_t port_id; 3580 uint32_t fifo; 3581 3582 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3583 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3584 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3585 return -EAGAIN; 3586 3587 port_id = pf->port_id; 3588 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3589 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3590 3591 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3592 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3593 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3594 return bnxt_clr_rx_ts(bp, ts); 3595 3596 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3597 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3598 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3599 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3600 3601 return 0; 3602 } 3603 3604 static int 3605 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3606 { 3607 uint64_t ns; 3608 struct bnxt *bp = dev->data->dev_private; 3609 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3610 3611 if (!ptp) 3612 return -ENOTSUP; 3613 3614 ns = rte_timespec_to_ns(ts); 3615 /* Set the timecounters to a new value. */ 3616 ptp->tc.nsec = ns; 3617 ptp->tx_tstamp_tc.nsec = ns; 3618 ptp->rx_tstamp_tc.nsec = ns; 3619 3620 return 0; 3621 } 3622 3623 static int 3624 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3625 { 3626 struct bnxt *bp = dev->data->dev_private; 3627 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3628 uint64_t ns, systime_cycles = 0; 3629 int rc = 0; 3630 3631 if (!ptp) 3632 return -ENOTSUP; 3633 3634 if (BNXT_CHIP_P5(bp)) 3635 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3636 &systime_cycles); 3637 else 3638 systime_cycles = bnxt_cc_read(bp); 3639 3640 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3641 *ts = rte_ns_to_timespec(ns); 3642 3643 return rc; 3644 } 3645 static int 3646 bnxt_timesync_enable(struct rte_eth_dev *dev) 3647 { 3648 struct bnxt *bp = dev->data->dev_private; 3649 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3650 uint32_t shift = 0; 3651 int rc; 3652 3653 if (!ptp) 3654 return -ENOTSUP; 3655 3656 ptp->rx_filter = 1; 3657 ptp->tx_tstamp_en = 1; 3658 ptp->filter_all = 1; 3659 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3660 3661 rc = bnxt_hwrm_ptp_cfg(bp); 3662 if (rc) 3663 return rc; 3664 3665 rte_spinlock_init(&ptp->ptp_lock); 3666 bp->ptp_all_rx_tstamp = 1; 3667 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3668 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3669 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3670 3671 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3672 ptp->tc.cc_shift = shift; 3673 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3674 3675 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3676 ptp->rx_tstamp_tc.cc_shift = shift; 3677 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3678 3679 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3680 ptp->tx_tstamp_tc.cc_shift = shift; 3681 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3682 3683 if (!BNXT_CHIP_P5(bp)) 3684 bnxt_map_ptp_regs(bp); 3685 else 3686 rc = bnxt_ptp_start(bp); 3687 3688 return rc; 3689 } 3690 3691 static int 3692 bnxt_timesync_disable(struct rte_eth_dev *dev) 3693 { 3694 struct bnxt *bp = dev->data->dev_private; 3695 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3696 3697 if (!ptp) 3698 return -ENOTSUP; 3699 3700 ptp->rx_filter = 0; 3701 ptp->tx_tstamp_en = 0; 3702 ptp->rxctl = 0; 3703 ptp->filter_all = 0; 3704 3705 bnxt_hwrm_ptp_cfg(bp); 3706 3707 bp->ptp_all_rx_tstamp = 0; 3708 if (!BNXT_CHIP_P5(bp)) 3709 bnxt_unmap_ptp_regs(bp); 3710 else 3711 bnxt_ptp_stop(bp); 3712 3713 return 0; 3714 } 3715 3716 static int 3717 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3718 struct timespec *timestamp, 3719 uint32_t flags __rte_unused) 3720 { 3721 struct bnxt *bp = dev->data->dev_private; 3722 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3723 uint64_t rx_tstamp_cycles = 0; 3724 uint64_t ns; 3725 3726 if (!ptp) 3727 return -ENOTSUP; 3728 3729 if (BNXT_CHIP_P5(bp)) 3730 rx_tstamp_cycles = ptp->rx_timestamp; 3731 else 3732 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3733 3734 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3735 *timestamp = rte_ns_to_timespec(ns); 3736 return 0; 3737 } 3738 3739 static int 3740 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3741 struct timespec *timestamp) 3742 { 3743 struct bnxt *bp = dev->data->dev_private; 3744 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3745 uint64_t tx_tstamp_cycles = 0; 3746 uint64_t ns; 3747 int rc = 0; 3748 3749 if (!ptp) 3750 return -ENOTSUP; 3751 3752 if (BNXT_CHIP_P5(bp)) 3753 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3754 &tx_tstamp_cycles); 3755 else 3756 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3757 3758 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3759 *timestamp = rte_ns_to_timespec(ns); 3760 3761 return rc; 3762 } 3763 3764 static int 3765 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3766 { 3767 struct bnxt *bp = dev->data->dev_private; 3768 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3769 3770 if (!ptp) 3771 return -ENOTSUP; 3772 3773 ptp->tc.nsec += delta; 3774 ptp->tx_tstamp_tc.nsec += delta; 3775 ptp->rx_tstamp_tc.nsec += delta; 3776 3777 return 0; 3778 } 3779 3780 static int 3781 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3782 { 3783 struct bnxt *bp = dev->data->dev_private; 3784 int rc; 3785 uint32_t dir_entries; 3786 uint32_t entry_length; 3787 3788 rc = is_bnxt_in_error(bp); 3789 if (rc) 3790 return rc; 3791 3792 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3793 bp->pdev->addr.domain, bp->pdev->addr.bus, 3794 bp->pdev->addr.devid, bp->pdev->addr.function); 3795 3796 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3797 if (rc != 0) 3798 return rc; 3799 3800 return dir_entries * entry_length; 3801 } 3802 3803 static int 3804 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3805 struct rte_dev_eeprom_info *in_eeprom) 3806 { 3807 struct bnxt *bp = dev->data->dev_private; 3808 uint32_t index; 3809 uint32_t offset; 3810 int rc; 3811 3812 rc = is_bnxt_in_error(bp); 3813 if (rc) 3814 return rc; 3815 3816 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3817 bp->pdev->addr.domain, bp->pdev->addr.bus, 3818 bp->pdev->addr.devid, bp->pdev->addr.function, 3819 in_eeprom->offset, in_eeprom->length); 3820 3821 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3822 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3823 in_eeprom->data); 3824 3825 index = in_eeprom->offset >> 24; 3826 offset = in_eeprom->offset & 0xffffff; 3827 3828 if (index != 0) 3829 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3830 in_eeprom->length, in_eeprom->data); 3831 3832 return 0; 3833 } 3834 3835 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3836 { 3837 switch (dir_type) { 3838 case BNX_DIR_TYPE_CHIMP_PATCH: 3839 case BNX_DIR_TYPE_BOOTCODE: 3840 case BNX_DIR_TYPE_BOOTCODE_2: 3841 case BNX_DIR_TYPE_APE_FW: 3842 case BNX_DIR_TYPE_APE_PATCH: 3843 case BNX_DIR_TYPE_KONG_FW: 3844 case BNX_DIR_TYPE_KONG_PATCH: 3845 case BNX_DIR_TYPE_BONO_FW: 3846 case BNX_DIR_TYPE_BONO_PATCH: 3847 /* FALLTHROUGH */ 3848 return true; 3849 } 3850 3851 return false; 3852 } 3853 3854 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3855 { 3856 switch (dir_type) { 3857 case BNX_DIR_TYPE_AVS: 3858 case BNX_DIR_TYPE_EXP_ROM_MBA: 3859 case BNX_DIR_TYPE_PCIE: 3860 case BNX_DIR_TYPE_TSCF_UCODE: 3861 case BNX_DIR_TYPE_EXT_PHY: 3862 case BNX_DIR_TYPE_CCM: 3863 case BNX_DIR_TYPE_ISCSI_BOOT: 3864 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3865 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3866 /* FALLTHROUGH */ 3867 return true; 3868 } 3869 3870 return false; 3871 } 3872 3873 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3874 { 3875 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3876 bnxt_dir_type_is_other_exec_format(dir_type); 3877 } 3878 3879 static int 3880 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3881 struct rte_dev_eeprom_info *in_eeprom) 3882 { 3883 struct bnxt *bp = dev->data->dev_private; 3884 uint8_t index, dir_op; 3885 uint16_t type, ext, ordinal, attr; 3886 int rc; 3887 3888 rc = is_bnxt_in_error(bp); 3889 if (rc) 3890 return rc; 3891 3892 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3893 bp->pdev->addr.domain, bp->pdev->addr.bus, 3894 bp->pdev->addr.devid, bp->pdev->addr.function, 3895 in_eeprom->offset, in_eeprom->length); 3896 3897 if (!BNXT_PF(bp)) { 3898 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3899 return -EINVAL; 3900 } 3901 3902 type = in_eeprom->magic >> 16; 3903 3904 if (type == 0xffff) { /* special value for directory operations */ 3905 index = in_eeprom->magic & 0xff; 3906 dir_op = in_eeprom->magic >> 8; 3907 if (index == 0) 3908 return -EINVAL; 3909 switch (dir_op) { 3910 case 0x0e: /* erase */ 3911 if (in_eeprom->offset != ~in_eeprom->magic) 3912 return -EINVAL; 3913 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3914 default: 3915 return -EINVAL; 3916 } 3917 } 3918 3919 /* Create or re-write an NVM item: */ 3920 if (bnxt_dir_type_is_executable(type) == true) 3921 return -EOPNOTSUPP; 3922 ext = in_eeprom->magic & 0xffff; 3923 ordinal = in_eeprom->offset >> 16; 3924 attr = in_eeprom->offset & 0xffff; 3925 3926 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3927 in_eeprom->data, in_eeprom->length); 3928 } 3929 3930 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3931 struct rte_eth_dev_module_info *modinfo) 3932 { 3933 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3934 struct bnxt *bp = dev->data->dev_private; 3935 int rc; 3936 3937 /* No point in going further if phy status indicates 3938 * module is not inserted or if it is powered down or 3939 * if it is of type 10GBase-T 3940 */ 3941 if (bp->link_info->module_status > 3942 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3943 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3944 dev->data->port_id); 3945 return -ENOTSUP; 3946 } 3947 3948 /* This feature is not supported in older firmware versions */ 3949 if (bp->hwrm_spec_code < 0x10202) { 3950 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3951 dev->data->port_id); 3952 return -ENOTSUP; 3953 } 3954 3955 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3956 SFF_DIAG_SUPPORT_OFFSET + 1, 3957 module_info); 3958 3959 if (rc) 3960 return rc; 3961 3962 switch (module_info[0]) { 3963 case SFF_MODULE_ID_SFP: 3964 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3965 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3966 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3967 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3968 break; 3969 case SFF_MODULE_ID_QSFP: 3970 case SFF_MODULE_ID_QSFP_PLUS: 3971 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3972 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3973 break; 3974 case SFF_MODULE_ID_QSFP28: 3975 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3976 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3977 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3978 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3979 break; 3980 default: 3981 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3982 return -ENOTSUP; 3983 } 3984 3985 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3986 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3987 3988 return 0; 3989 } 3990 3991 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3992 struct rte_dev_eeprom_info *info) 3993 { 3994 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3995 uint32_t offset = info->offset, length = info->length; 3996 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3997 struct bnxt *bp = dev->data->dev_private; 3998 uint8_t *data = info->data; 3999 uint8_t page = offset >> 7; 4000 uint8_t max_pages = 2; 4001 uint8_t opt_pages; 4002 int rc; 4003 4004 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4005 SFF_DIAG_SUPPORT_OFFSET + 1, 4006 module_info); 4007 if (rc) 4008 return rc; 4009 4010 switch (module_info[0]) { 4011 case SFF_MODULE_ID_SFP: 4012 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 4013 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 4014 pg_addr[2] = I2C_DEV_ADDR_A2; 4015 pg_addr[3] = I2C_DEV_ADDR_A2; 4016 max_pages = 4; 4017 } 4018 break; 4019 case SFF_MODULE_ID_QSFP28: 4020 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 4021 SFF8636_OPT_PAGES_OFFSET, 4022 1, &opt_pages); 4023 if (rc) 4024 return rc; 4025 4026 if (opt_pages & SFF8636_PAGE1_MASK) { 4027 pg_addr[2] = I2C_DEV_ADDR_A0; 4028 max_pages = 3; 4029 } 4030 if (opt_pages & SFF8636_PAGE2_MASK) { 4031 pg_addr[3] = I2C_DEV_ADDR_A0; 4032 max_pages = 4; 4033 } 4034 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 4035 pg_addr[4] = I2C_DEV_ADDR_A0; 4036 max_pages = 5; 4037 } 4038 break; 4039 default: 4040 break; 4041 } 4042 4043 memset(data, 0, length); 4044 4045 offset &= 0xff; 4046 while (length && page < max_pages) { 4047 uint8_t raw_page = page ? page - 1 : 0; 4048 uint16_t chunk; 4049 4050 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4051 raw_page = 0; 4052 else if (page) 4053 offset |= 0x80; 4054 chunk = RTE_MIN(length, 256 - offset); 4055 4056 if (pg_addr[page]) { 4057 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4058 raw_page, offset, 4059 chunk, data); 4060 if (rc) 4061 return rc; 4062 } 4063 4064 data += chunk; 4065 length -= chunk; 4066 offset = 0; 4067 page += 1 + (chunk > 128); 4068 } 4069 4070 return length ? -EINVAL : 0; 4071 } 4072 4073 /* 4074 * Initialization 4075 */ 4076 4077 static const struct eth_dev_ops bnxt_dev_ops = { 4078 .dev_infos_get = bnxt_dev_info_get_op, 4079 .dev_close = bnxt_dev_close_op, 4080 .dev_configure = bnxt_dev_configure_op, 4081 .dev_start = bnxt_dev_start_op, 4082 .dev_stop = bnxt_dev_stop_op, 4083 .dev_set_link_up = bnxt_dev_set_link_up_op, 4084 .dev_set_link_down = bnxt_dev_set_link_down_op, 4085 .stats_get = bnxt_stats_get_op, 4086 .stats_reset = bnxt_stats_reset_op, 4087 .rx_queue_setup = bnxt_rx_queue_setup_op, 4088 .rx_queue_release = bnxt_rx_queue_release_op, 4089 .tx_queue_setup = bnxt_tx_queue_setup_op, 4090 .tx_queue_release = bnxt_tx_queue_release_op, 4091 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4092 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4093 .reta_update = bnxt_reta_update_op, 4094 .reta_query = bnxt_reta_query_op, 4095 .rss_hash_update = bnxt_rss_hash_update_op, 4096 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4097 .link_update = bnxt_link_update_op, 4098 .promiscuous_enable = bnxt_promiscuous_enable_op, 4099 .promiscuous_disable = bnxt_promiscuous_disable_op, 4100 .allmulticast_enable = bnxt_allmulticast_enable_op, 4101 .allmulticast_disable = bnxt_allmulticast_disable_op, 4102 .mac_addr_add = bnxt_mac_addr_add_op, 4103 .mac_addr_remove = bnxt_mac_addr_remove_op, 4104 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4105 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4106 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4107 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4108 .vlan_filter_set = bnxt_vlan_filter_set_op, 4109 .vlan_offload_set = bnxt_vlan_offload_set_op, 4110 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4111 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4112 .mtu_set = bnxt_mtu_set_op, 4113 .mac_addr_set = bnxt_set_default_mac_addr_op, 4114 .xstats_get = bnxt_dev_xstats_get_op, 4115 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4116 .xstats_reset = bnxt_dev_xstats_reset_op, 4117 .fw_version_get = bnxt_fw_version_get, 4118 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4119 .rxq_info_get = bnxt_rxq_info_get_op, 4120 .txq_info_get = bnxt_txq_info_get_op, 4121 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4122 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4123 .dev_led_on = bnxt_dev_led_on_op, 4124 .dev_led_off = bnxt_dev_led_off_op, 4125 .rx_queue_start = bnxt_rx_queue_start, 4126 .rx_queue_stop = bnxt_rx_queue_stop, 4127 .tx_queue_start = bnxt_tx_queue_start, 4128 .tx_queue_stop = bnxt_tx_queue_stop, 4129 .flow_ops_get = bnxt_flow_ops_get_op, 4130 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4131 .get_eeprom_length = bnxt_get_eeprom_length_op, 4132 .get_eeprom = bnxt_get_eeprom_op, 4133 .set_eeprom = bnxt_set_eeprom_op, 4134 .get_module_info = bnxt_get_module_info, 4135 .get_module_eeprom = bnxt_get_module_eeprom, 4136 .timesync_enable = bnxt_timesync_enable, 4137 .timesync_disable = bnxt_timesync_disable, 4138 .timesync_read_time = bnxt_timesync_read_time, 4139 .timesync_write_time = bnxt_timesync_write_time, 4140 .timesync_adjust_time = bnxt_timesync_adjust_time, 4141 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4142 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4143 .mtr_ops_get = bnxt_flow_meter_ops_get, 4144 }; 4145 4146 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4147 { 4148 uint32_t offset; 4149 4150 /* Only pre-map the reset GRC registers using window 3 */ 4151 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4152 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4153 4154 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4155 4156 return offset; 4157 } 4158 4159 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4160 { 4161 struct bnxt_error_recovery_info *info = bp->recovery_info; 4162 uint32_t reg_base = 0xffffffff; 4163 int i; 4164 4165 /* Only pre-map the monitoring GRC registers using window 2 */ 4166 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4167 uint32_t reg = info->status_regs[i]; 4168 4169 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4170 continue; 4171 4172 if (reg_base == 0xffffffff) 4173 reg_base = reg & 0xfffff000; 4174 if ((reg & 0xfffff000) != reg_base) 4175 return -ERANGE; 4176 4177 /* Use mask 0xffc as the Lower 2 bits indicates 4178 * address space location 4179 */ 4180 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4181 (reg & 0xffc); 4182 } 4183 4184 if (reg_base == 0xffffffff) 4185 return 0; 4186 4187 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4188 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4189 4190 return 0; 4191 } 4192 4193 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4194 { 4195 struct bnxt_error_recovery_info *info = bp->recovery_info; 4196 uint32_t delay = info->delay_after_reset[index]; 4197 uint32_t val = info->reset_reg_val[index]; 4198 uint32_t reg = info->reset_reg[index]; 4199 uint32_t type, offset; 4200 int ret; 4201 4202 type = BNXT_FW_STATUS_REG_TYPE(reg); 4203 offset = BNXT_FW_STATUS_REG_OFF(reg); 4204 4205 switch (type) { 4206 case BNXT_FW_STATUS_REG_TYPE_CFG: 4207 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4208 if (ret < 0) { 4209 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4210 val, offset); 4211 return; 4212 } 4213 break; 4214 case BNXT_FW_STATUS_REG_TYPE_GRC: 4215 offset = bnxt_map_reset_regs(bp, offset); 4216 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4217 break; 4218 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4219 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4220 break; 4221 } 4222 /* wait on a specific interval of time until core reset is complete */ 4223 if (delay) 4224 rte_delay_ms(delay); 4225 } 4226 4227 static void bnxt_dev_cleanup(struct bnxt *bp) 4228 { 4229 bp->eth_dev->data->dev_link.link_status = 0; 4230 bp->link_info->link_up = 0; 4231 if (bp->eth_dev->data->dev_started) 4232 bnxt_dev_stop(bp->eth_dev); 4233 4234 bnxt_uninit_resources(bp, true); 4235 } 4236 4237 static int 4238 bnxt_check_fw_reset_done(struct bnxt *bp) 4239 { 4240 int timeout = bp->fw_reset_max_msecs; 4241 uint16_t val = 0; 4242 int rc; 4243 4244 do { 4245 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4246 if (rc < 0) { 4247 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4248 return rc; 4249 } 4250 if (val != 0xffff) 4251 break; 4252 rte_delay_ms(1); 4253 } while (timeout--); 4254 4255 if (val == 0xffff) { 4256 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4257 return -1; 4258 } 4259 4260 return 0; 4261 } 4262 4263 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4264 { 4265 struct rte_eth_dev *dev = bp->eth_dev; 4266 struct rte_vlan_filter_conf *vfc; 4267 int vidx, vbit, rc; 4268 uint16_t vlan_id; 4269 4270 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4271 vfc = &dev->data->vlan_filter_conf; 4272 vidx = vlan_id / 64; 4273 vbit = vlan_id % 64; 4274 4275 /* Each bit corresponds to a VLAN id */ 4276 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4277 rc = bnxt_add_vlan_filter(bp, vlan_id); 4278 if (rc) 4279 return rc; 4280 } 4281 } 4282 4283 return 0; 4284 } 4285 4286 static int bnxt_restore_mac_filters(struct bnxt *bp) 4287 { 4288 struct rte_eth_dev *dev = bp->eth_dev; 4289 struct rte_eth_dev_info dev_info; 4290 struct rte_ether_addr *addr; 4291 uint64_t pool_mask; 4292 uint32_t pool = 0; 4293 uint32_t i; 4294 int rc; 4295 4296 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4297 return 0; 4298 4299 rc = bnxt_dev_info_get_op(dev, &dev_info); 4300 if (rc) 4301 return rc; 4302 4303 /* replay MAC address configuration */ 4304 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4305 addr = &dev->data->mac_addrs[i]; 4306 4307 /* skip zero address */ 4308 if (rte_is_zero_ether_addr(addr)) 4309 continue; 4310 4311 pool = 0; 4312 pool_mask = dev->data->mac_pool_sel[i]; 4313 4314 do { 4315 if (pool_mask & 1ULL) { 4316 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4317 if (rc) 4318 return rc; 4319 } 4320 pool_mask >>= 1; 4321 pool++; 4322 } while (pool_mask); 4323 } 4324 4325 return 0; 4326 } 4327 4328 static int bnxt_restore_mcast_mac_filters(struct bnxt *bp) 4329 { 4330 int ret = 0; 4331 4332 ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list, 4333 bp->nb_mc_addr); 4334 if (ret) 4335 PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n"); 4336 4337 return ret; 4338 } 4339 4340 static int bnxt_restore_filters(struct bnxt *bp) 4341 { 4342 struct rte_eth_dev *dev = bp->eth_dev; 4343 int ret = 0; 4344 4345 if (dev->data->all_multicast) { 4346 ret = bnxt_allmulticast_enable_op(dev); 4347 if (ret) 4348 return ret; 4349 } 4350 if (dev->data->promiscuous) { 4351 ret = bnxt_promiscuous_enable_op(dev); 4352 if (ret) 4353 return ret; 4354 } 4355 4356 ret = bnxt_restore_mac_filters(bp); 4357 if (ret) 4358 return ret; 4359 4360 /* if vlans are already programmed, this can fail with -EEXIST */ 4361 ret = bnxt_restore_vlan_filters(bp); 4362 if (ret && ret != -EEXIST) 4363 return ret; 4364 4365 ret = bnxt_restore_mcast_mac_filters(bp); 4366 if (ret) 4367 return ret; 4368 4369 return ret; 4370 } 4371 4372 static int bnxt_check_fw_ready(struct bnxt *bp) 4373 { 4374 int timeout = bp->fw_reset_max_msecs ? : BNXT_MAX_FW_RESET_TIMEOUT; 4375 int rc = 0; 4376 4377 do { 4378 rc = bnxt_hwrm_poll_ver_get(bp); 4379 if (rc == 0) 4380 break; 4381 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4382 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4383 } while (rc && timeout > 0); 4384 4385 if (rc) 4386 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4387 4388 return rc; 4389 } 4390 4391 static void bnxt_dev_recover(void *arg) 4392 { 4393 struct bnxt *bp = arg; 4394 int rc = 0; 4395 4396 pthread_mutex_lock(&bp->err_recovery_lock); 4397 4398 if (!bp->fw_reset_min_msecs) { 4399 rc = bnxt_check_fw_reset_done(bp); 4400 if (rc) 4401 goto err; 4402 } 4403 4404 /* Clear Error flag so that device re-init should happen */ 4405 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4406 PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n", 4407 bp->eth_dev->data->port_id); 4408 4409 rc = bnxt_check_fw_ready(bp); 4410 if (rc) 4411 goto err; 4412 4413 rc = bnxt_init_resources(bp, true); 4414 if (rc) { 4415 PMD_DRV_LOG(ERR, 4416 "Failed to initialize resources after reset\n"); 4417 goto err; 4418 } 4419 /* clear reset flag as the device is initialized now */ 4420 bp->flags &= ~BNXT_FLAG_FW_RESET; 4421 4422 rc = bnxt_dev_start_op(bp->eth_dev); 4423 if (rc) { 4424 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4425 goto err_start; 4426 } 4427 4428 rc = bnxt_restore_filters(bp); 4429 if (rc) 4430 goto err_start; 4431 4432 rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = 4433 bp->eth_dev->rx_pkt_burst; 4434 rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = 4435 bp->eth_dev->tx_pkt_burst; 4436 rte_mb(); 4437 4438 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", 4439 bp->eth_dev->data->port_id); 4440 pthread_mutex_unlock(&bp->err_recovery_lock); 4441 rte_eth_dev_callback_process(bp->eth_dev, 4442 RTE_ETH_EVENT_RECOVERY_SUCCESS, 4443 NULL); 4444 return; 4445 err_start: 4446 bnxt_dev_stop(bp->eth_dev); 4447 err: 4448 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4449 bnxt_uninit_resources(bp, false); 4450 rte_eth_dev_callback_process(bp->eth_dev, 4451 RTE_ETH_EVENT_RECOVERY_FAILED, 4452 NULL); 4453 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4454 rte_eth_dev_callback_process(bp->eth_dev, 4455 RTE_ETH_EVENT_INTR_RMV, 4456 NULL); 4457 pthread_mutex_unlock(&bp->err_recovery_lock); 4458 PMD_DRV_LOG(ERR, "Port %u: Failed to recover from FW reset\n", 4459 bp->eth_dev->data->port_id); 4460 } 4461 4462 void bnxt_dev_reset_and_resume(void *arg) 4463 { 4464 struct bnxt *bp = arg; 4465 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4466 uint16_t val = 0; 4467 int rc; 4468 4469 bnxt_dev_cleanup(bp); 4470 PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n", 4471 bp->eth_dev->data->port_id); 4472 4473 bnxt_wait_for_device_shutdown(bp); 4474 4475 /* During some fatal firmware error conditions, the PCI config space 4476 * register 0x2e which normally contains the subsystem ID will become 4477 * 0xffff. This register will revert back to the normal value after 4478 * the chip has completed core reset. If we detect this condition, 4479 * we can poll this config register immediately for the value to revert. 4480 */ 4481 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4482 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4483 if (rc < 0) { 4484 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4485 return; 4486 } 4487 if (val == 0xffff) { 4488 bp->fw_reset_min_msecs = 0; 4489 us = 1; 4490 } 4491 } 4492 4493 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4494 if (rc) 4495 PMD_DRV_LOG(ERR, "Port %u: Error setting recovery alarm", 4496 bp->eth_dev->data->port_id); 4497 } 4498 4499 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4500 { 4501 struct bnxt_error_recovery_info *info = bp->recovery_info; 4502 uint32_t reg = info->status_regs[index]; 4503 uint32_t type, offset, val = 0; 4504 int ret = 0; 4505 4506 type = BNXT_FW_STATUS_REG_TYPE(reg); 4507 offset = BNXT_FW_STATUS_REG_OFF(reg); 4508 4509 switch (type) { 4510 case BNXT_FW_STATUS_REG_TYPE_CFG: 4511 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4512 if (ret < 0) 4513 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4514 offset); 4515 break; 4516 case BNXT_FW_STATUS_REG_TYPE_GRC: 4517 offset = info->mapped_status_regs[index]; 4518 /* FALLTHROUGH */ 4519 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4520 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4521 offset)); 4522 break; 4523 } 4524 4525 return val; 4526 } 4527 4528 static int bnxt_fw_reset_all(struct bnxt *bp) 4529 { 4530 struct bnxt_error_recovery_info *info = bp->recovery_info; 4531 uint32_t i; 4532 int rc = 0; 4533 4534 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4535 /* Reset through primary function driver */ 4536 for (i = 0; i < info->reg_array_cnt; i++) 4537 bnxt_write_fw_reset_reg(bp, i); 4538 /* Wait for time specified by FW after triggering reset */ 4539 rte_delay_ms(info->primary_func_wait_period_after_reset); 4540 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4541 /* Reset with the help of Kong processor */ 4542 rc = bnxt_hwrm_fw_reset(bp); 4543 if (rc) 4544 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4545 } 4546 4547 return rc; 4548 } 4549 4550 static void bnxt_fw_reset_cb(void *arg) 4551 { 4552 struct bnxt *bp = arg; 4553 struct bnxt_error_recovery_info *info = bp->recovery_info; 4554 int rc = 0; 4555 4556 /* Only Primary function can do FW reset */ 4557 if (bnxt_is_primary_func(bp) && 4558 bnxt_is_recovery_enabled(bp)) { 4559 rc = bnxt_fw_reset_all(bp); 4560 if (rc) { 4561 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4562 return; 4563 } 4564 } 4565 4566 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4567 * EXCEPTION_FATAL_ASYNC event to all the functions 4568 * (including MASTER FUNC). After receiving this Async, all the active 4569 * drivers should treat this case as FW initiated recovery 4570 */ 4571 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4572 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4573 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4574 4575 /* To recover from error */ 4576 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4577 (void *)bp); 4578 } 4579 } 4580 4581 /* Driver should poll FW heartbeat, reset_counter with the frequency 4582 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4583 * When the driver detects heartbeat stop or change in reset_counter, 4584 * it has to trigger a reset to recover from the error condition. 4585 * A “primary function” is the function who will have the privilege to 4586 * initiate the chimp reset. The primary function will be elected by the 4587 * firmware and will be notified through async message. 4588 */ 4589 static void bnxt_check_fw_health(void *arg) 4590 { 4591 struct bnxt *bp = arg; 4592 struct bnxt_error_recovery_info *info = bp->recovery_info; 4593 uint32_t val = 0, wait_msec; 4594 4595 if (!info || !bnxt_is_recovery_enabled(bp) || 4596 is_bnxt_in_error(bp)) 4597 return; 4598 4599 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4600 if (val == info->last_heart_beat) 4601 goto reset; 4602 4603 info->last_heart_beat = val; 4604 4605 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4606 if (val != info->last_reset_counter) 4607 goto reset; 4608 4609 info->last_reset_counter = val; 4610 4611 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4612 bnxt_check_fw_health, (void *)bp); 4613 4614 return; 4615 reset: 4616 /* Stop DMA to/from device */ 4617 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4618 bp->flags |= BNXT_FLAG_FW_RESET; 4619 4620 bnxt_stop_rxtx(bp->eth_dev); 4621 4622 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4623 4624 rte_eth_dev_callback_process(bp->eth_dev, 4625 RTE_ETH_EVENT_ERR_RECOVERING, 4626 NULL); 4627 4628 if (bnxt_is_primary_func(bp)) 4629 wait_msec = info->primary_func_wait_period; 4630 else 4631 wait_msec = info->normal_func_wait_period; 4632 4633 rte_eal_alarm_set(US_PER_MS * wait_msec, 4634 bnxt_fw_reset_cb, (void *)bp); 4635 } 4636 4637 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4638 { 4639 uint32_t polling_freq; 4640 4641 pthread_mutex_lock(&bp->health_check_lock); 4642 4643 if (!bnxt_is_recovery_enabled(bp)) 4644 goto done; 4645 4646 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4647 goto done; 4648 4649 polling_freq = bp->recovery_info->driver_polling_freq; 4650 4651 rte_eal_alarm_set(US_PER_MS * polling_freq, 4652 bnxt_check_fw_health, (void *)bp); 4653 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4654 4655 done: 4656 pthread_mutex_unlock(&bp->health_check_lock); 4657 } 4658 4659 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4660 { 4661 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4662 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4663 } 4664 4665 static bool bnxt_vf_pciid(uint16_t device_id) 4666 { 4667 switch (device_id) { 4668 case BROADCOM_DEV_ID_57304_VF: 4669 case BROADCOM_DEV_ID_57406_VF: 4670 case BROADCOM_DEV_ID_5731X_VF: 4671 case BROADCOM_DEV_ID_5741X_VF: 4672 case BROADCOM_DEV_ID_57414_VF: 4673 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4674 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4675 case BROADCOM_DEV_ID_58802_VF: 4676 case BROADCOM_DEV_ID_57500_VF1: 4677 case BROADCOM_DEV_ID_57500_VF2: 4678 case BROADCOM_DEV_ID_58818_VF: 4679 /* FALLTHROUGH */ 4680 return true; 4681 default: 4682 return false; 4683 } 4684 } 4685 4686 /* Phase 5 device */ 4687 static bool bnxt_p5_device(uint16_t device_id) 4688 { 4689 switch (device_id) { 4690 case BROADCOM_DEV_ID_57508: 4691 case BROADCOM_DEV_ID_57504: 4692 case BROADCOM_DEV_ID_57502: 4693 case BROADCOM_DEV_ID_57508_MF1: 4694 case BROADCOM_DEV_ID_57504_MF1: 4695 case BROADCOM_DEV_ID_57502_MF1: 4696 case BROADCOM_DEV_ID_57508_MF2: 4697 case BROADCOM_DEV_ID_57504_MF2: 4698 case BROADCOM_DEV_ID_57502_MF2: 4699 case BROADCOM_DEV_ID_57500_VF1: 4700 case BROADCOM_DEV_ID_57500_VF2: 4701 case BROADCOM_DEV_ID_58812: 4702 case BROADCOM_DEV_ID_58814: 4703 case BROADCOM_DEV_ID_58818: 4704 case BROADCOM_DEV_ID_58818_VF: 4705 /* FALLTHROUGH */ 4706 return true; 4707 default: 4708 return false; 4709 } 4710 } 4711 4712 bool bnxt_stratus_device(struct bnxt *bp) 4713 { 4714 uint16_t device_id = bp->pdev->id.device_id; 4715 4716 switch (device_id) { 4717 case BROADCOM_DEV_ID_STRATUS_NIC: 4718 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4719 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4720 /* FALLTHROUGH */ 4721 return true; 4722 default: 4723 return false; 4724 } 4725 } 4726 4727 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4728 { 4729 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4730 struct bnxt *bp = eth_dev->data->dev_private; 4731 4732 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4733 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4734 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4735 if (!bp->bar0 || !bp->doorbell_base) { 4736 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4737 return -ENODEV; 4738 } 4739 4740 bp->eth_dev = eth_dev; 4741 bp->pdev = pci_dev; 4742 4743 return 0; 4744 } 4745 4746 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4747 struct bnxt_ctx_pg_info *ctx_pg, 4748 uint32_t mem_size, 4749 const char *suffix, 4750 uint16_t idx) 4751 { 4752 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4753 const struct rte_memzone *mz = NULL; 4754 char mz_name[RTE_MEMZONE_NAMESIZE]; 4755 rte_iova_t mz_phys_addr; 4756 uint64_t valid_bits = 0; 4757 uint32_t sz; 4758 int i; 4759 4760 if (!mem_size) 4761 return 0; 4762 4763 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4764 BNXT_PAGE_SIZE; 4765 rmem->page_size = BNXT_PAGE_SIZE; 4766 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4767 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4768 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4769 4770 valid_bits = PTU_PTE_VALID; 4771 4772 if (rmem->nr_pages > 1) { 4773 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4774 "bnxt_ctx_pg_tbl%s_%x_%d", 4775 suffix, idx, bp->eth_dev->data->port_id); 4776 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4777 mz = rte_memzone_lookup(mz_name); 4778 if (!mz) { 4779 mz = rte_memzone_reserve_aligned(mz_name, 4780 rmem->nr_pages * 8, 4781 bp->eth_dev->device->numa_node, 4782 RTE_MEMZONE_2MB | 4783 RTE_MEMZONE_SIZE_HINT_ONLY | 4784 RTE_MEMZONE_IOVA_CONTIG, 4785 BNXT_PAGE_SIZE); 4786 if (mz == NULL) 4787 return -ENOMEM; 4788 } 4789 4790 memset(mz->addr, 0, mz->len); 4791 mz_phys_addr = mz->iova; 4792 4793 rmem->pg_tbl = mz->addr; 4794 rmem->pg_tbl_map = mz_phys_addr; 4795 rmem->pg_tbl_mz = mz; 4796 } 4797 4798 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4799 suffix, idx, bp->eth_dev->data->port_id); 4800 mz = rte_memzone_lookup(mz_name); 4801 if (!mz) { 4802 mz = rte_memzone_reserve_aligned(mz_name, 4803 mem_size, 4804 bp->eth_dev->device->numa_node, 4805 RTE_MEMZONE_1GB | 4806 RTE_MEMZONE_SIZE_HINT_ONLY | 4807 RTE_MEMZONE_IOVA_CONTIG, 4808 BNXT_PAGE_SIZE); 4809 if (mz == NULL) 4810 return -ENOMEM; 4811 } 4812 4813 memset(mz->addr, 0, mz->len); 4814 mz_phys_addr = mz->iova; 4815 4816 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4817 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4818 rmem->dma_arr[i] = mz_phys_addr + sz; 4819 4820 if (rmem->nr_pages > 1) { 4821 if (i == rmem->nr_pages - 2 && 4822 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4823 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4824 else if (i == rmem->nr_pages - 1 && 4825 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4826 valid_bits |= PTU_PTE_LAST; 4827 4828 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4829 valid_bits); 4830 } 4831 } 4832 4833 rmem->mz = mz; 4834 if (rmem->vmem_size) 4835 rmem->vmem = (void **)mz->addr; 4836 rmem->dma_arr[0] = mz_phys_addr; 4837 return 0; 4838 } 4839 4840 static void bnxt_free_ctx_mem(struct bnxt *bp) 4841 { 4842 int i; 4843 4844 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4845 return; 4846 4847 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4848 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4849 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4850 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4851 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4852 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4853 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4854 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4855 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4856 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4857 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4858 4859 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4860 if (bp->ctx->tqm_mem[i]) 4861 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4862 } 4863 4864 rte_free(bp->ctx); 4865 bp->ctx = NULL; 4866 } 4867 4868 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4869 4870 #define clamp_t(type, _x, min, max) RTE_MIN_T(RTE_MAX_T(_x, min, type), max, type) 4871 4872 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4873 { 4874 struct bnxt_ctx_pg_info *ctx_pg; 4875 struct bnxt_ctx_mem_info *ctx; 4876 uint32_t mem_size, ena, entries; 4877 uint32_t entries_sp, min; 4878 int i, rc; 4879 4880 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4881 if (rc) { 4882 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4883 return rc; 4884 } 4885 ctx = bp->ctx; 4886 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4887 return 0; 4888 4889 ctx_pg = &ctx->qp_mem; 4890 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4891 if (ctx->qp_entry_size) { 4892 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4893 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4894 if (rc) 4895 return rc; 4896 } 4897 4898 ctx_pg = &ctx->srq_mem; 4899 ctx_pg->entries = ctx->srq_max_l2_entries; 4900 if (ctx->srq_entry_size) { 4901 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4902 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4903 if (rc) 4904 return rc; 4905 } 4906 4907 ctx_pg = &ctx->cq_mem; 4908 ctx_pg->entries = ctx->cq_max_l2_entries; 4909 if (ctx->cq_entry_size) { 4910 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4911 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4912 if (rc) 4913 return rc; 4914 } 4915 4916 ctx_pg = &ctx->vnic_mem; 4917 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4918 ctx->vnic_max_ring_table_entries; 4919 if (ctx->vnic_entry_size) { 4920 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4921 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4922 if (rc) 4923 return rc; 4924 } 4925 4926 ctx_pg = &ctx->stat_mem; 4927 ctx_pg->entries = ctx->stat_max_entries; 4928 if (ctx->stat_entry_size) { 4929 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4930 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4931 if (rc) 4932 return rc; 4933 } 4934 4935 min = ctx->tqm_min_entries_per_ring; 4936 4937 entries_sp = ctx->qp_max_l2_entries + 4938 ctx->vnic_max_vnic_entries + 4939 2 * ctx->qp_min_qp1_entries + min; 4940 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4941 4942 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4943 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4944 entries = clamp_t(uint32_t, entries, min, 4945 ctx->tqm_max_entries_per_ring); 4946 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4947 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4948 * i > 8 is other ext rings. 4949 */ 4950 ctx_pg = ctx->tqm_mem[i]; 4951 ctx_pg->entries = i ? entries : entries_sp; 4952 if (ctx->tqm_entry_size) { 4953 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4954 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4955 "tqm_mem", i); 4956 if (rc) 4957 return rc; 4958 } 4959 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4960 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4961 else 4962 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4963 } 4964 4965 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4966 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4967 if (rc) 4968 PMD_DRV_LOG(ERR, 4969 "Failed to configure context mem: rc = %d\n", rc); 4970 else 4971 ctx->flags |= BNXT_CTX_FLAG_INITED; 4972 4973 return rc; 4974 } 4975 4976 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4977 { 4978 struct rte_pci_device *pci_dev = bp->pdev; 4979 char mz_name[RTE_MEMZONE_NAMESIZE]; 4980 const struct rte_memzone *mz = NULL; 4981 uint32_t total_alloc_len; 4982 rte_iova_t mz_phys_addr; 4983 4984 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4985 return 0; 4986 4987 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4988 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4989 pci_dev->addr.bus, pci_dev->addr.devid, 4990 pci_dev->addr.function, "rx_port_stats"); 4991 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4992 mz = rte_memzone_lookup(mz_name); 4993 total_alloc_len = 4994 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4995 sizeof(struct rx_port_stats_ext) + 512); 4996 if (!mz) { 4997 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4998 SOCKET_ID_ANY, 4999 RTE_MEMZONE_2MB | 5000 RTE_MEMZONE_SIZE_HINT_ONLY | 5001 RTE_MEMZONE_IOVA_CONTIG); 5002 if (mz == NULL) 5003 return -ENOMEM; 5004 } 5005 memset(mz->addr, 0, mz->len); 5006 mz_phys_addr = mz->iova; 5007 5008 bp->rx_mem_zone = (const void *)mz; 5009 bp->hw_rx_port_stats = mz->addr; 5010 bp->hw_rx_port_stats_map = mz_phys_addr; 5011 5012 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5013 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5014 pci_dev->addr.bus, pci_dev->addr.devid, 5015 pci_dev->addr.function, "tx_port_stats"); 5016 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5017 mz = rte_memzone_lookup(mz_name); 5018 total_alloc_len = 5019 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 5020 sizeof(struct tx_port_stats_ext) + 512); 5021 if (!mz) { 5022 mz = rte_memzone_reserve(mz_name, 5023 total_alloc_len, 5024 SOCKET_ID_ANY, 5025 RTE_MEMZONE_2MB | 5026 RTE_MEMZONE_SIZE_HINT_ONLY | 5027 RTE_MEMZONE_IOVA_CONTIG); 5028 if (mz == NULL) 5029 return -ENOMEM; 5030 } 5031 memset(mz->addr, 0, mz->len); 5032 mz_phys_addr = mz->iova; 5033 5034 bp->tx_mem_zone = (const void *)mz; 5035 bp->hw_tx_port_stats = mz->addr; 5036 bp->hw_tx_port_stats_map = mz_phys_addr; 5037 bp->flags |= BNXT_FLAG_PORT_STATS; 5038 5039 /* Display extended statistics if FW supports it */ 5040 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 5041 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 5042 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 5043 return 0; 5044 5045 bp->hw_rx_port_stats_ext = (void *) 5046 ((uint8_t *)bp->hw_rx_port_stats + 5047 sizeof(struct rx_port_stats)); 5048 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 5049 sizeof(struct rx_port_stats); 5050 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 5051 5052 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 5053 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 5054 bp->hw_tx_port_stats_ext = (void *) 5055 ((uint8_t *)bp->hw_tx_port_stats + 5056 sizeof(struct tx_port_stats)); 5057 bp->hw_tx_port_stats_ext_map = 5058 bp->hw_tx_port_stats_map + 5059 sizeof(struct tx_port_stats); 5060 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 5061 } 5062 5063 return 0; 5064 } 5065 5066 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5067 { 5068 struct bnxt *bp = eth_dev->data->dev_private; 5069 size_t max_mac_addr = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5070 int rc = 0; 5071 5072 if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR) 5073 PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n", 5074 bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5075 5076 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5077 RTE_ETHER_ADDR_LEN * max_mac_addr, 5078 0); 5079 if (eth_dev->data->mac_addrs == NULL) { 5080 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5081 return -ENOMEM; 5082 } 5083 5084 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5085 if (BNXT_PF(bp)) 5086 return -EINVAL; 5087 5088 /* Generate a random MAC address, if none was assigned by PF */ 5089 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5090 bnxt_eth_hw_addr_random(bp->mac_addr); 5091 PMD_DRV_LOG(INFO, 5092 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5093 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5094 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5095 5096 rc = bnxt_hwrm_set_mac(bp); 5097 if (rc) 5098 return rc; 5099 } 5100 5101 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5102 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5103 5104 /* 5105 * Allocate memory to hold multicast mac addresses added. 5106 * Used to restore them during reset recovery 5107 */ 5108 bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl", 5109 sizeof(struct rte_ether_addr) * 5110 BNXT_MAX_MC_ADDRS, 0); 5111 if (bp->mcast_addr_list == NULL) { 5112 PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n"); 5113 return -ENOMEM; 5114 } 5115 bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list); 5116 if (bp->mc_list_dma_addr == RTE_BAD_IOVA) { 5117 PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n"); 5118 return -ENOMEM; 5119 } 5120 5121 return rc; 5122 } 5123 5124 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5125 { 5126 int rc = 0; 5127 5128 /* MAC is already configured in FW */ 5129 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5130 return 0; 5131 5132 /* Restore the old MAC configured */ 5133 rc = bnxt_hwrm_set_mac(bp); 5134 if (rc) 5135 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5136 5137 return rc; 5138 } 5139 5140 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5141 { 5142 if (!BNXT_PF(bp)) 5143 return; 5144 5145 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5146 5147 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5148 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5149 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5150 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5151 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5152 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5153 } 5154 5155 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5156 { 5157 struct bnxt_error_recovery_info *info = bp->recovery_info; 5158 5159 if (info) { 5160 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5161 memset(info, 0, sizeof(*info)); 5162 return; 5163 } 5164 5165 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5166 return; 5167 5168 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5169 sizeof(*info), 0); 5170 if (!info) 5171 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5172 5173 bp->recovery_info = info; 5174 } 5175 5176 static void bnxt_check_fw_status(struct bnxt *bp) 5177 { 5178 uint32_t fw_status; 5179 5180 if (!(bp->recovery_info && 5181 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5182 return; 5183 5184 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5185 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5186 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5187 fw_status); 5188 } 5189 5190 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5191 { 5192 struct bnxt_error_recovery_info *info = bp->recovery_info; 5193 uint32_t status_loc; 5194 uint32_t sig_ver; 5195 5196 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5197 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5198 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5199 BNXT_GRCP_WINDOW_2_BASE + 5200 offsetof(struct hcomm_status, 5201 sig_ver))); 5202 /* If the signature is absent, then FW does not support this feature */ 5203 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5204 HCOMM_STATUS_SIGNATURE_VAL) 5205 return 0; 5206 5207 if (!info) { 5208 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5209 sizeof(*info), 0); 5210 if (!info) 5211 return -ENOMEM; 5212 bp->recovery_info = info; 5213 } else { 5214 memset(info, 0, sizeof(*info)); 5215 } 5216 5217 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5218 BNXT_GRCP_WINDOW_2_BASE + 5219 offsetof(struct hcomm_status, 5220 fw_status_loc))); 5221 5222 /* Only pre-map the FW health status GRC register */ 5223 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5224 return 0; 5225 5226 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5227 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5228 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5229 5230 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5231 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5232 5233 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5234 5235 return 0; 5236 } 5237 5238 /* This function gets the FW version along with the 5239 * capabilities(MAX and current) of the function, vnic, 5240 * error recovery, phy and other chip related info 5241 */ 5242 static int bnxt_get_config(struct bnxt *bp) 5243 { 5244 uint16_t mtu; 5245 int rc = 0; 5246 5247 bp->fw_cap = 0; 5248 5249 rc = bnxt_map_hcomm_fw_status_reg(bp); 5250 if (rc) 5251 return rc; 5252 5253 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5254 if (rc) { 5255 bnxt_check_fw_status(bp); 5256 return rc; 5257 } 5258 5259 rc = bnxt_hwrm_func_reset(bp); 5260 if (rc) 5261 return -EIO; 5262 5263 rc = bnxt_hwrm_vnic_qcaps(bp); 5264 if (rc) 5265 return rc; 5266 5267 rc = bnxt_hwrm_queue_qportcfg(bp); 5268 if (rc) 5269 return rc; 5270 5271 /* Get the MAX capabilities for this function. 5272 * This function also allocates context memory for TQM rings and 5273 * informs the firmware about this allocated backing store memory. 5274 */ 5275 rc = bnxt_hwrm_func_qcaps(bp); 5276 if (rc) 5277 return rc; 5278 5279 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5280 if (rc) 5281 return rc; 5282 5283 bnxt_hwrm_port_mac_qcfg(bp); 5284 5285 bnxt_hwrm_parent_pf_qcfg(bp); 5286 5287 bnxt_hwrm_port_phy_qcaps(bp); 5288 5289 bnxt_alloc_error_recovery_info(bp); 5290 /* Get the adapter error recovery support info */ 5291 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5292 if (rc) 5293 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5294 5295 bnxt_hwrm_port_led_qcaps(bp); 5296 5297 return 0; 5298 } 5299 5300 static int 5301 bnxt_init_locks(struct bnxt *bp) 5302 { 5303 int err; 5304 5305 err = pthread_mutex_init(&bp->flow_lock, NULL); 5306 if (err) { 5307 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5308 return err; 5309 } 5310 5311 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5312 if (err) { 5313 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5314 return err; 5315 } 5316 5317 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5318 if (err) { 5319 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5320 return err; 5321 } 5322 5323 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5324 if (err) 5325 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5326 5327 return err; 5328 } 5329 5330 /* This should be called after we have queried trusted VF cap */ 5331 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5332 { 5333 int rc = 0; 5334 5335 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5336 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5337 if (rc) 5338 PMD_DRV_LOG(ERR, 5339 "Failed to alloc switch domain: %d\n", rc); 5340 else 5341 PMD_DRV_LOG(INFO, 5342 "Switch domain allocated %d\n", 5343 bp->switch_domain_id); 5344 } 5345 5346 return rc; 5347 } 5348 5349 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5350 { 5351 int rc = 0; 5352 5353 if (reconfig_dev) { 5354 rc = bnxt_get_config(bp); 5355 if (rc) 5356 return rc; 5357 } 5358 5359 rc = bnxt_alloc_switch_domain(bp); 5360 if (rc) 5361 return rc; 5362 5363 if (!reconfig_dev) { 5364 rc = bnxt_setup_mac_addr(bp->eth_dev); 5365 if (rc) 5366 return rc; 5367 } else { 5368 rc = bnxt_restore_dflt_mac(bp); 5369 if (rc) 5370 return rc; 5371 } 5372 5373 bnxt_config_vf_req_fwd(bp); 5374 5375 rc = bnxt_hwrm_func_driver_register(bp); 5376 if (rc) { 5377 PMD_DRV_LOG(ERR, "Failed to register driver"); 5378 return -EBUSY; 5379 } 5380 5381 if (BNXT_PF(bp)) { 5382 if (bp->pdev->max_vfs) { 5383 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5384 if (rc) { 5385 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5386 return rc; 5387 } 5388 } else { 5389 rc = bnxt_hwrm_allocate_pf_only(bp); 5390 if (rc) { 5391 PMD_DRV_LOG(ERR, 5392 "Failed to allocate PF resources"); 5393 return rc; 5394 } 5395 } 5396 } 5397 5398 if (!reconfig_dev) { 5399 bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key", 5400 HW_HASH_KEY_SIZE, 0); 5401 if (bp->rss_conf.rss_key == NULL) { 5402 PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory", 5403 bp->eth_dev->data->port_id); 5404 return -ENOMEM; 5405 } 5406 } 5407 5408 rc = bnxt_alloc_mem(bp, reconfig_dev); 5409 if (rc) 5410 return rc; 5411 5412 rc = bnxt_setup_int(bp); 5413 if (rc) 5414 return rc; 5415 5416 rc = bnxt_request_int(bp); 5417 if (rc) 5418 return rc; 5419 5420 rc = bnxt_init_ctx_mem(bp); 5421 if (rc) { 5422 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5423 return rc; 5424 } 5425 5426 return 0; 5427 } 5428 5429 static int 5430 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5431 const char *value, void *opaque_arg) 5432 { 5433 struct bnxt *bp = opaque_arg; 5434 unsigned long flow_xstat; 5435 char *end = NULL; 5436 5437 if (!value || !opaque_arg) { 5438 PMD_DRV_LOG(ERR, 5439 "Invalid parameter passed to flow_xstat devarg.\n"); 5440 return -EINVAL; 5441 } 5442 5443 flow_xstat = strtoul(value, &end, 10); 5444 if (end == NULL || *end != '\0' || 5445 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5446 PMD_DRV_LOG(ERR, 5447 "Invalid parameter passed to flow_xstat devarg.\n"); 5448 return -EINVAL; 5449 } 5450 5451 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5452 PMD_DRV_LOG(ERR, 5453 "Invalid value passed to flow_xstat devarg.\n"); 5454 return -EINVAL; 5455 } 5456 5457 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5458 if (BNXT_FLOW_XSTATS_EN(bp)) 5459 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5460 5461 return 0; 5462 } 5463 5464 static int 5465 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5466 const char *value, void *opaque_arg) 5467 { 5468 struct bnxt *bp = opaque_arg; 5469 unsigned long max_num_kflows; 5470 char *end = NULL; 5471 5472 if (!value || !opaque_arg) { 5473 PMD_DRV_LOG(ERR, 5474 "Invalid parameter passed to max_num_kflows devarg.\n"); 5475 return -EINVAL; 5476 } 5477 5478 max_num_kflows = strtoul(value, &end, 10); 5479 if (end == NULL || *end != '\0' || 5480 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5481 PMD_DRV_LOG(ERR, 5482 "Invalid parameter passed to max_num_kflows devarg.\n"); 5483 return -EINVAL; 5484 } 5485 5486 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5487 PMD_DRV_LOG(ERR, 5488 "Invalid value passed to max_num_kflows devarg.\n"); 5489 return -EINVAL; 5490 } 5491 5492 bp->max_num_kflows = max_num_kflows; 5493 if (bp->max_num_kflows) 5494 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5495 max_num_kflows); 5496 5497 return 0; 5498 } 5499 5500 static int 5501 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5502 const char *value, void *opaque_arg) 5503 { 5504 struct bnxt *bp = opaque_arg; 5505 unsigned long app_id; 5506 char *end = NULL; 5507 5508 if (!value || !opaque_arg) { 5509 PMD_DRV_LOG(ERR, 5510 "Invalid parameter passed to app-id " 5511 "devargs.\n"); 5512 return -EINVAL; 5513 } 5514 5515 app_id = strtoul(value, &end, 10); 5516 if (end == NULL || *end != '\0' || 5517 (app_id == ULONG_MAX && errno == ERANGE)) { 5518 PMD_DRV_LOG(ERR, 5519 "Invalid parameter passed to app_id " 5520 "devargs.\n"); 5521 return -EINVAL; 5522 } 5523 5524 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5525 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5526 (uint16_t)app_id); 5527 return -EINVAL; 5528 } 5529 5530 bp->app_id = app_id; 5531 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5532 5533 return 0; 5534 } 5535 5536 static int 5537 bnxt_parse_devarg_ieee_1588(__rte_unused const char *key, 5538 const char *value, void *opaque_arg) 5539 { 5540 struct bnxt *bp = opaque_arg; 5541 unsigned long ieee_1588; 5542 char *end = NULL; 5543 5544 if (!value || !opaque_arg) { 5545 PMD_DRV_LOG(ERR, 5546 "Invalid parameter passed to ieee-1588 " 5547 "devargs.\n"); 5548 return -EINVAL; 5549 } 5550 5551 ieee_1588 = strtoul(value, &end, 10); 5552 if (end == NULL || *end != '\0' || 5553 (ieee_1588 == ULONG_MAX && errno == ERANGE)) { 5554 PMD_DRV_LOG(ERR, 5555 "Invalid parameter passed to ieee_1588 " 5556 "devargs.\n"); 5557 return -EINVAL; 5558 } 5559 5560 if (BNXT_DEVARG_IEEE_1588_INVALID(ieee_1588)) { 5561 PMD_DRV_LOG(ERR, "Invalid ieee-1588(%d) devargs.\n", 5562 (uint16_t)ieee_1588); 5563 return -EINVAL; 5564 } 5565 5566 bp->ieee_1588 = ieee_1588; 5567 PMD_DRV_LOG(INFO, "ieee-1588=%d feature enabled.\n", (uint16_t)ieee_1588); 5568 5569 return 0; 5570 } 5571 5572 static int 5573 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5574 const char *value, void *opaque_arg) 5575 { 5576 struct bnxt_representor *vfr_bp = opaque_arg; 5577 unsigned long rep_is_pf; 5578 char *end = NULL; 5579 5580 if (!value || !opaque_arg) { 5581 PMD_DRV_LOG(ERR, 5582 "Invalid parameter passed to rep_is_pf devargs.\n"); 5583 return -EINVAL; 5584 } 5585 5586 rep_is_pf = strtoul(value, &end, 10); 5587 if (end == NULL || *end != '\0' || 5588 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5589 PMD_DRV_LOG(ERR, 5590 "Invalid parameter passed to rep_is_pf devargs.\n"); 5591 return -EINVAL; 5592 } 5593 5594 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5595 PMD_DRV_LOG(ERR, 5596 "Invalid value passed to rep_is_pf devargs.\n"); 5597 return -EINVAL; 5598 } 5599 5600 vfr_bp->flags |= rep_is_pf; 5601 if (BNXT_REP_PF(vfr_bp)) 5602 PMD_DRV_LOG(INFO, "PF representor\n"); 5603 else 5604 PMD_DRV_LOG(INFO, "VF representor\n"); 5605 5606 return 0; 5607 } 5608 5609 static int 5610 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5611 const char *value, void *opaque_arg) 5612 { 5613 struct bnxt_representor *vfr_bp = opaque_arg; 5614 unsigned long rep_based_pf; 5615 char *end = NULL; 5616 5617 if (!value || !opaque_arg) { 5618 PMD_DRV_LOG(ERR, 5619 "Invalid parameter passed to rep_based_pf " 5620 "devargs.\n"); 5621 return -EINVAL; 5622 } 5623 5624 rep_based_pf = strtoul(value, &end, 10); 5625 if (end == NULL || *end != '\0' || 5626 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5627 PMD_DRV_LOG(ERR, 5628 "Invalid parameter passed to rep_based_pf " 5629 "devargs.\n"); 5630 return -EINVAL; 5631 } 5632 5633 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5634 PMD_DRV_LOG(ERR, 5635 "Invalid value passed to rep_based_pf devargs.\n"); 5636 return -EINVAL; 5637 } 5638 5639 vfr_bp->rep_based_pf = rep_based_pf; 5640 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5641 5642 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5643 5644 return 0; 5645 } 5646 5647 static int 5648 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5649 const char *value, void *opaque_arg) 5650 { 5651 struct bnxt_representor *vfr_bp = opaque_arg; 5652 unsigned long rep_q_r2f; 5653 char *end = NULL; 5654 5655 if (!value || !opaque_arg) { 5656 PMD_DRV_LOG(ERR, 5657 "Invalid parameter passed to rep_q_r2f " 5658 "devargs.\n"); 5659 return -EINVAL; 5660 } 5661 5662 rep_q_r2f = strtoul(value, &end, 10); 5663 if (end == NULL || *end != '\0' || 5664 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5665 PMD_DRV_LOG(ERR, 5666 "Invalid parameter passed to rep_q_r2f " 5667 "devargs.\n"); 5668 return -EINVAL; 5669 } 5670 5671 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5672 PMD_DRV_LOG(ERR, 5673 "Invalid value passed to rep_q_r2f devargs.\n"); 5674 return -EINVAL; 5675 } 5676 5677 vfr_bp->rep_q_r2f = rep_q_r2f; 5678 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5679 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5680 5681 return 0; 5682 } 5683 5684 static int 5685 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5686 const char *value, void *opaque_arg) 5687 { 5688 struct bnxt_representor *vfr_bp = opaque_arg; 5689 unsigned long rep_q_f2r; 5690 char *end = NULL; 5691 5692 if (!value || !opaque_arg) { 5693 PMD_DRV_LOG(ERR, 5694 "Invalid parameter passed to rep_q_f2r " 5695 "devargs.\n"); 5696 return -EINVAL; 5697 } 5698 5699 rep_q_f2r = strtoul(value, &end, 10); 5700 if (end == NULL || *end != '\0' || 5701 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5702 PMD_DRV_LOG(ERR, 5703 "Invalid parameter passed to rep_q_f2r " 5704 "devargs.\n"); 5705 return -EINVAL; 5706 } 5707 5708 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5709 PMD_DRV_LOG(ERR, 5710 "Invalid value passed to rep_q_f2r devargs.\n"); 5711 return -EINVAL; 5712 } 5713 5714 vfr_bp->rep_q_f2r = rep_q_f2r; 5715 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5716 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5717 5718 return 0; 5719 } 5720 5721 static int 5722 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5723 const char *value, void *opaque_arg) 5724 { 5725 struct bnxt_representor *vfr_bp = opaque_arg; 5726 unsigned long rep_fc_r2f; 5727 char *end = NULL; 5728 5729 if (!value || !opaque_arg) { 5730 PMD_DRV_LOG(ERR, 5731 "Invalid parameter passed to rep_fc_r2f " 5732 "devargs.\n"); 5733 return -EINVAL; 5734 } 5735 5736 rep_fc_r2f = strtoul(value, &end, 10); 5737 if (end == NULL || *end != '\0' || 5738 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5739 PMD_DRV_LOG(ERR, 5740 "Invalid parameter passed to rep_fc_r2f " 5741 "devargs.\n"); 5742 return -EINVAL; 5743 } 5744 5745 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5746 PMD_DRV_LOG(ERR, 5747 "Invalid value passed to rep_fc_r2f devargs.\n"); 5748 return -EINVAL; 5749 } 5750 5751 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5752 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5753 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5754 5755 return 0; 5756 } 5757 5758 static int 5759 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5760 const char *value, void *opaque_arg) 5761 { 5762 struct bnxt_representor *vfr_bp = opaque_arg; 5763 unsigned long rep_fc_f2r; 5764 char *end = NULL; 5765 5766 if (!value || !opaque_arg) { 5767 PMD_DRV_LOG(ERR, 5768 "Invalid parameter passed to rep_fc_f2r " 5769 "devargs.\n"); 5770 return -EINVAL; 5771 } 5772 5773 rep_fc_f2r = strtoul(value, &end, 10); 5774 if (end == NULL || *end != '\0' || 5775 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5776 PMD_DRV_LOG(ERR, 5777 "Invalid parameter passed to rep_fc_f2r " 5778 "devargs.\n"); 5779 return -EINVAL; 5780 } 5781 5782 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5783 PMD_DRV_LOG(ERR, 5784 "Invalid value passed to rep_fc_f2r devargs.\n"); 5785 return -EINVAL; 5786 } 5787 5788 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5789 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5790 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5791 5792 return 0; 5793 } 5794 5795 static int 5796 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5797 { 5798 struct rte_kvargs *kvlist; 5799 int ret = 0; 5800 5801 if (devargs == NULL) 5802 return 0; 5803 5804 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5805 if (kvlist == NULL) 5806 return -EINVAL; 5807 5808 /* 5809 * Handler for "flow_xstat" devarg. 5810 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5811 */ 5812 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5813 bnxt_parse_devarg_flow_xstat, bp); 5814 if (ret) 5815 goto err; 5816 5817 /* 5818 * Handler for "max_num_kflows" devarg. 5819 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5820 */ 5821 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5822 bnxt_parse_devarg_max_num_kflows, bp); 5823 if (ret) 5824 goto err; 5825 5826 err: 5827 /* 5828 * Handler for "app-id" devarg. 5829 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5830 */ 5831 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5832 bnxt_parse_devarg_app_id, bp); 5833 5834 /* 5835 * Handler for "ieee-1588" devarg. 5836 * Invoked as for ex: "-a 000:00:0d.0,ieee-1588=1" 5837 */ 5838 rte_kvargs_process(kvlist, BNXT_DEVARG_IEEE_1588, 5839 bnxt_parse_devarg_ieee_1588, bp); 5840 5841 rte_kvargs_free(kvlist); 5842 return ret; 5843 } 5844 5845 /* Allocate and initialize various fields in bnxt struct that 5846 * need to be allocated/destroyed only once in the lifetime of the driver 5847 */ 5848 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5849 { 5850 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5851 struct bnxt *bp = eth_dev->data->dev_private; 5852 int rc = 0; 5853 5854 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5855 5856 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5857 bp->flags |= BNXT_FLAG_VF; 5858 5859 if (bnxt_p5_device(pci_dev->id.device_id)) 5860 bp->flags |= BNXT_FLAG_CHIP_P5; 5861 5862 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5863 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5864 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5865 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5866 bp->flags |= BNXT_FLAG_STINGRAY; 5867 5868 rc = bnxt_map_pci_bars(eth_dev); 5869 if (rc) { 5870 PMD_DRV_LOG(ERR, 5871 "Failed to initialize board rc: %x\n", rc); 5872 return rc; 5873 } 5874 5875 rc = bnxt_alloc_pf_info(bp); 5876 if (rc) 5877 return rc; 5878 5879 rc = bnxt_alloc_link_info(bp); 5880 if (rc) 5881 return rc; 5882 5883 rc = bnxt_alloc_parent_info(bp); 5884 if (rc) 5885 return rc; 5886 5887 rc = bnxt_alloc_hwrm_resources(bp); 5888 if (rc) { 5889 PMD_DRV_LOG(ERR, 5890 "Failed to allocate response buffer rc: %x\n", rc); 5891 return rc; 5892 } 5893 rc = bnxt_alloc_leds_info(bp); 5894 if (rc) 5895 return rc; 5896 5897 rc = bnxt_alloc_cos_queues(bp); 5898 if (rc) 5899 return rc; 5900 5901 rc = bnxt_init_locks(bp); 5902 if (rc) 5903 return rc; 5904 5905 rc = bnxt_get_config(bp); 5906 if (rc) 5907 return rc; 5908 5909 if (BNXT_TRUFLOW_EN(bp)) { 5910 /* extra mbuf field is required to store CFA code from mark */ 5911 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5912 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5913 .size = sizeof(bnxt_cfa_code_dynfield_t), 5914 .align = alignof(bnxt_cfa_code_dynfield_t), 5915 }; 5916 bnxt_cfa_code_dynfield_offset = 5917 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5918 if (bnxt_cfa_code_dynfield_offset < 0) { 5919 PMD_DRV_LOG(ERR, 5920 "Failed to register mbuf field for TruFlow mark\n"); 5921 return -rte_errno; 5922 } 5923 } 5924 5925 return rc; 5926 } 5927 5928 static int 5929 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5930 { 5931 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5932 static int version_printed; 5933 struct bnxt *bp; 5934 int rc; 5935 5936 if (version_printed++ == 0) 5937 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5938 5939 eth_dev->dev_ops = &bnxt_dev_ops; 5940 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5941 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5942 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5943 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5944 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5945 5946 /* 5947 * For secondary processes, we don't initialise any further 5948 * as primary has already done this work. 5949 */ 5950 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5951 return 0; 5952 5953 rte_eth_copy_pci_info(eth_dev, pci_dev); 5954 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 5955 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 5956 5957 bp = eth_dev->data->dev_private; 5958 5959 /* set the default app id */ 5960 bp->app_id = bnxt_ulp_default_app_id_get(); 5961 5962 /* Parse dev arguments passed on when starting the DPDK application. */ 5963 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5964 if (rc) 5965 goto error_free; 5966 5967 rc = bnxt_drv_init(eth_dev); 5968 if (rc) 5969 goto error_free; 5970 5971 rc = bnxt_init_resources(bp, false); 5972 if (rc) 5973 goto error_free; 5974 5975 rc = bnxt_alloc_stats_mem(bp); 5976 if (rc) 5977 goto error_free; 5978 5979 PMD_DRV_LOG(INFO, 5980 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 5981 DRV_MODULE_NAME, 5982 pci_dev->mem_resource[0].phys_addr, 5983 pci_dev->mem_resource[0].addr); 5984 5985 return 0; 5986 5987 error_free: 5988 bnxt_dev_uninit(eth_dev); 5989 return rc; 5990 } 5991 5992 5993 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5994 { 5995 if (!ctx) 5996 return; 5997 5998 rte_free(ctx->va); 5999 6000 ctx->va = NULL; 6001 ctx->dma = RTE_BAD_IOVA; 6002 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6003 } 6004 6005 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6006 { 6007 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6008 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6009 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6010 bp->flow_stat->max_fc, 6011 false); 6012 6013 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6014 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6015 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6016 bp->flow_stat->max_fc, 6017 false); 6018 6019 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6020 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6021 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6022 6023 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6024 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6025 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6026 6027 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6028 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6029 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6030 6031 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6032 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6033 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6034 } 6035 6036 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6037 { 6038 bnxt_unregister_fc_ctx_mem(bp); 6039 6040 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6041 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6042 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6043 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6044 } 6045 6046 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6047 { 6048 if (BNXT_FLOW_XSTATS_EN(bp)) 6049 bnxt_uninit_fc_ctx_mem(bp); 6050 } 6051 6052 static void 6053 bnxt_free_error_recovery_info(struct bnxt *bp) 6054 { 6055 rte_free(bp->recovery_info); 6056 bp->recovery_info = NULL; 6057 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6058 } 6059 6060 static int 6061 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6062 { 6063 int rc; 6064 6065 bnxt_free_int(bp); 6066 bnxt_free_mem(bp, reconfig_dev); 6067 6068 bnxt_hwrm_func_buf_unrgtr(bp); 6069 if (bp->pf != NULL) { 6070 rte_free(bp->pf->vf_req_buf); 6071 bp->pf->vf_req_buf = NULL; 6072 } 6073 6074 rc = bnxt_hwrm_func_driver_unregister(bp); 6075 bp->flags &= ~BNXT_FLAG_REGISTERED; 6076 bnxt_free_ctx_mem(bp); 6077 if (!reconfig_dev) { 6078 bnxt_free_hwrm_resources(bp); 6079 bnxt_free_error_recovery_info(bp); 6080 rte_free(bp->mcast_addr_list); 6081 bp->mcast_addr_list = NULL; 6082 rte_free(bp->rss_conf.rss_key); 6083 bp->rss_conf.rss_key = NULL; 6084 } 6085 6086 bnxt_uninit_ctx_mem(bp); 6087 6088 bnxt_free_flow_stats_info(bp); 6089 bnxt_free_switch_domain(bp); 6090 rte_free(bp->ptp_cfg); 6091 bp->ptp_cfg = NULL; 6092 return rc; 6093 } 6094 6095 static int 6096 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6097 { 6098 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6099 return -EPERM; 6100 6101 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6102 6103 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6104 bnxt_dev_close_op(eth_dev); 6105 6106 return 0; 6107 } 6108 6109 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6110 { 6111 struct bnxt *bp = eth_dev->data->dev_private; 6112 struct rte_eth_dev *vf_rep_eth_dev; 6113 int ret = 0, i; 6114 6115 if (!bp) 6116 return -EINVAL; 6117 6118 for (i = 0; i < bp->num_reps; i++) { 6119 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6120 if (!vf_rep_eth_dev) 6121 continue; 6122 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6123 vf_rep_eth_dev->data->port_id); 6124 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6125 } 6126 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6127 eth_dev->data->port_id); 6128 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6129 6130 return ret; 6131 } 6132 6133 static void bnxt_free_rep_info(struct bnxt *bp) 6134 { 6135 rte_free(bp->rep_info); 6136 bp->rep_info = NULL; 6137 rte_free(bp->cfa_code_map); 6138 bp->cfa_code_map = NULL; 6139 } 6140 6141 static int bnxt_init_rep_info(struct bnxt *bp) 6142 { 6143 int i = 0, rc; 6144 6145 if (bp->rep_info) 6146 return 0; 6147 6148 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6149 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp), 6150 0); 6151 if (!bp->rep_info) { 6152 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6153 return -ENOMEM; 6154 } 6155 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6156 sizeof(*bp->cfa_code_map) * 6157 BNXT_MAX_CFA_CODE, 0); 6158 if (!bp->cfa_code_map) { 6159 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6160 bnxt_free_rep_info(bp); 6161 return -ENOMEM; 6162 } 6163 6164 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6165 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6166 6167 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6168 if (rc) { 6169 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6170 bnxt_free_rep_info(bp); 6171 return rc; 6172 } 6173 6174 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6175 if (rc) { 6176 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6177 bnxt_free_rep_info(bp); 6178 return rc; 6179 } 6180 6181 return rc; 6182 } 6183 6184 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6185 struct rte_eth_devargs *eth_da, 6186 struct rte_eth_dev *backing_eth_dev, 6187 const char *dev_args) 6188 { 6189 struct rte_eth_dev *vf_rep_eth_dev; 6190 char name[RTE_ETH_NAME_MAX_LEN]; 6191 struct bnxt *backing_bp = backing_eth_dev->data->dev_private; 6192 uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp); 6193 6194 uint16_t num_rep; 6195 int i, ret = 0; 6196 struct rte_kvargs *kvlist = NULL; 6197 6198 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6199 return 0; 6200 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6201 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6202 eth_da->type); 6203 return -ENOTSUP; 6204 } 6205 num_rep = eth_da->nb_representor_ports; 6206 if (num_rep > max_vf_reps) { 6207 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6208 num_rep, max_vf_reps); 6209 return -EINVAL; 6210 } 6211 6212 if (num_rep >= RTE_MAX_ETHPORTS) { 6213 PMD_DRV_LOG(ERR, 6214 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6215 num_rep, RTE_MAX_ETHPORTS); 6216 return -EINVAL; 6217 } 6218 6219 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6220 PMD_DRV_LOG(ERR, 6221 "Not a PF or trusted VF. No Representor support\n"); 6222 /* Returning an error is not an option. 6223 * Applications are not handling this correctly 6224 */ 6225 return 0; 6226 } 6227 6228 if (bnxt_init_rep_info(backing_bp)) 6229 return 0; 6230 6231 for (i = 0; i < num_rep; i++) { 6232 struct bnxt_representor representor = { 6233 .vf_id = eth_da->representor_ports[i], 6234 .switch_domain_id = backing_bp->switch_domain_id, 6235 .parent_dev = backing_eth_dev 6236 }; 6237 6238 if (representor.vf_id >= max_vf_reps) { 6239 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6240 representor.vf_id, max_vf_reps); 6241 continue; 6242 } 6243 6244 /* representor port net_bdf_port */ 6245 snprintf(name, sizeof(name), "net_%s_representor_%d", 6246 pci_dev->device.name, eth_da->representor_ports[i]); 6247 6248 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6249 if (kvlist) { 6250 /* 6251 * Handler for "rep_is_pf" devarg. 6252 * Invoked as for ex: "-a 000:00:0d.0, 6253 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6254 */ 6255 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6256 bnxt_parse_devarg_rep_is_pf, 6257 (void *)&representor); 6258 if (ret) { 6259 ret = -EINVAL; 6260 goto err; 6261 } 6262 /* 6263 * Handler for "rep_based_pf" devarg. 6264 * Invoked as for ex: "-a 000:00:0d.0, 6265 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6266 */ 6267 ret = rte_kvargs_process(kvlist, 6268 BNXT_DEVARG_REP_BASED_PF, 6269 bnxt_parse_devarg_rep_based_pf, 6270 (void *)&representor); 6271 if (ret) { 6272 ret = -EINVAL; 6273 goto err; 6274 } 6275 /* 6276 * Handler for "rep_based_pf" devarg. 6277 * Invoked as for ex: "-a 000:00:0d.0, 6278 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6279 */ 6280 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6281 bnxt_parse_devarg_rep_q_r2f, 6282 (void *)&representor); 6283 if (ret) { 6284 ret = -EINVAL; 6285 goto err; 6286 } 6287 /* 6288 * Handler for "rep_based_pf" devarg. 6289 * Invoked as for ex: "-a 000:00:0d.0, 6290 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6291 */ 6292 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6293 bnxt_parse_devarg_rep_q_f2r, 6294 (void *)&representor); 6295 if (ret) { 6296 ret = -EINVAL; 6297 goto err; 6298 } 6299 /* 6300 * Handler for "rep_based_pf" devarg. 6301 * Invoked as for ex: "-a 000:00:0d.0, 6302 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6303 */ 6304 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6305 bnxt_parse_devarg_rep_fc_r2f, 6306 (void *)&representor); 6307 if (ret) { 6308 ret = -EINVAL; 6309 goto err; 6310 } 6311 /* 6312 * Handler for "rep_based_pf" devarg. 6313 * Invoked as for ex: "-a 000:00:0d.0, 6314 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6315 */ 6316 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6317 bnxt_parse_devarg_rep_fc_f2r, 6318 (void *)&representor); 6319 if (ret) { 6320 ret = -EINVAL; 6321 goto err; 6322 } 6323 } 6324 6325 ret = rte_eth_dev_create(&pci_dev->device, name, 6326 sizeof(struct bnxt_representor), 6327 NULL, NULL, 6328 bnxt_representor_init, 6329 &representor); 6330 if (ret) { 6331 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6332 "representor %s.", name); 6333 goto err; 6334 } 6335 6336 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6337 if (!vf_rep_eth_dev) { 6338 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6339 " for VF-Rep: %s.", name); 6340 ret = -ENODEV; 6341 goto err; 6342 } 6343 6344 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6345 backing_eth_dev->data->port_id); 6346 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6347 vf_rep_eth_dev; 6348 backing_bp->num_reps++; 6349 6350 } 6351 6352 rte_kvargs_free(kvlist); 6353 return 0; 6354 6355 err: 6356 /* If num_rep > 1, then rollback already created 6357 * ports, since we'll be failing the probe anyway 6358 */ 6359 if (num_rep > 1) 6360 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6361 rte_errno = -ret; 6362 rte_kvargs_free(kvlist); 6363 6364 return ret; 6365 } 6366 6367 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6368 struct rte_pci_device *pci_dev) 6369 { 6370 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6371 struct rte_eth_dev *backing_eth_dev; 6372 uint16_t num_rep; 6373 int ret = 0; 6374 6375 if (pci_dev->device.devargs) { 6376 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6377 ð_da); 6378 if (ret) 6379 return ret; 6380 } 6381 6382 num_rep = eth_da.nb_representor_ports; 6383 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6384 num_rep); 6385 6386 /* We could come here after first level of probe is already invoked 6387 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6388 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6389 */ 6390 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6391 if (backing_eth_dev == NULL) { 6392 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6393 sizeof(struct bnxt), 6394 eth_dev_pci_specific_init, pci_dev, 6395 bnxt_dev_init, NULL); 6396 6397 if (ret || !num_rep) 6398 return ret; 6399 6400 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6401 } 6402 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6403 backing_eth_dev->data->port_id); 6404 6405 if (!num_rep) 6406 return ret; 6407 6408 /* probe representor ports now */ 6409 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6410 pci_dev->device.devargs->args); 6411 6412 return ret; 6413 } 6414 6415 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6416 { 6417 struct rte_eth_dev *eth_dev; 6418 6419 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6420 if (!eth_dev) 6421 return 0; /* Invoked typically only by OVS-DPDK, by the 6422 * time it comes here the eth_dev is already 6423 * deleted by rte_eth_dev_close(), so returning 6424 * +ve value will at least help in proper cleanup 6425 */ 6426 6427 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6428 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6429 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6430 return rte_eth_dev_destroy(eth_dev, 6431 bnxt_representor_uninit); 6432 else 6433 return rte_eth_dev_destroy(eth_dev, 6434 bnxt_dev_uninit); 6435 } else { 6436 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6437 } 6438 } 6439 6440 static struct rte_pci_driver bnxt_rte_pmd = { 6441 .id_table = bnxt_pci_id_map, 6442 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6443 RTE_PCI_DRV_INTR_RMV | 6444 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6445 * and OVS-DPDK 6446 */ 6447 .probe = bnxt_pci_probe, 6448 .remove = bnxt_pci_remove, 6449 }; 6450 6451 static bool 6452 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6453 { 6454 if (strcmp(dev->device->driver->name, drv->driver.name)) 6455 return false; 6456 6457 return true; 6458 } 6459 6460 bool is_bnxt_supported(struct rte_eth_dev *dev) 6461 { 6462 return is_device_supported(dev, &bnxt_rte_pmd); 6463 } 6464 6465 struct tf *bnxt_get_tfp_session(struct bnxt *bp, enum bnxt_session_type type) 6466 { 6467 return (type >= BNXT_SESSION_TYPE_LAST) ? 6468 &bp->tfp[BNXT_SESSION_TYPE_REGULAR] : &bp->tfp[type]; 6469 } 6470 6471 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6472 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6473 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6474 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6475