1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_ACCUM_STATS "accum-stats" 91 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 92 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 93 #define BNXT_DEVARG_REPRESENTOR "representor" 94 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 95 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 96 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 97 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 98 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 99 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 100 #define BNXT_DEVARG_APP_ID "app-id" 101 102 static const char *const bnxt_dev_args[] = { 103 BNXT_DEVARG_REPRESENTOR, 104 BNXT_DEVARG_ACCUM_STATS, 105 BNXT_DEVARG_FLOW_XSTAT, 106 BNXT_DEVARG_MAX_NUM_KFLOWS, 107 BNXT_DEVARG_REP_BASED_PF, 108 BNXT_DEVARG_REP_IS_PF, 109 BNXT_DEVARG_REP_Q_R2F, 110 BNXT_DEVARG_REP_Q_F2R, 111 BNXT_DEVARG_REP_FC_R2F, 112 BNXT_DEVARG_REP_FC_F2R, 113 BNXT_DEVARG_APP_ID, 114 NULL 115 }; 116 117 /* 118 * accum-stats == false to disable flow counter accumulation 119 * accum-stats == true to enable flow counter accumulation 120 */ 121 #define BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats) ((accum_stats) > 1) 122 123 /* 124 * app-id = an non-negative 8-bit number 125 */ 126 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 127 128 /* 129 * flow_xstat == false to disable the feature 130 * flow_xstat == true to enable the feature 131 */ 132 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 133 134 /* 135 * rep_is_pf == false to indicate VF representor 136 * rep_is_pf == true to indicate PF representor 137 */ 138 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 139 140 /* 141 * rep_based_pf == Physical index of the PF 142 */ 143 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 144 /* 145 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 146 */ 147 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 148 149 /* 150 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 151 */ 152 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 153 154 /* 155 * rep_fc_r2f == Flow control for the representor to endpoint direction 156 */ 157 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 158 159 /* 160 * rep_fc_f2r == Flow control for the endpoint to representor direction 161 */ 162 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 163 164 int bnxt_cfa_code_dynfield_offset = -1; 165 166 /* 167 * max_num_kflows must be >= 32 168 * and must be a power-of-2 supported value 169 * return: 1 -> invalid 170 * 0 -> valid 171 */ 172 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 173 { 174 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 175 return 1; 176 return 0; 177 } 178 179 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 180 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 181 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 182 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 183 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 184 static int bnxt_restore_vlan_filters(struct bnxt *bp); 185 static void bnxt_dev_recover(void *arg); 186 static void bnxt_free_error_recovery_info(struct bnxt *bp); 187 static void bnxt_free_rep_info(struct bnxt *bp); 188 189 int is_bnxt_in_error(struct bnxt *bp) 190 { 191 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 192 return -EIO; 193 if (bp->flags & BNXT_FLAG_FW_RESET) 194 return -EBUSY; 195 196 return 0; 197 } 198 199 /***********************/ 200 201 /* 202 * High level utility functions 203 */ 204 205 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 206 { 207 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 208 BNXT_RSS_TBL_SIZE_P5); 209 210 if (!BNXT_CHIP_P5(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_P5) / 215 BNXT_RSS_ENTRIES_PER_CTX_P5; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_P5(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 bp->parent = NULL; 230 } 231 232 static void bnxt_free_pf_info(struct bnxt *bp) 233 { 234 rte_free(bp->pf); 235 bp->pf = NULL; 236 } 237 238 static void bnxt_free_link_info(struct bnxt *bp) 239 { 240 rte_free(bp->link_info); 241 bp->link_info = NULL; 242 } 243 244 static void bnxt_free_leds_info(struct bnxt *bp) 245 { 246 if (BNXT_VF(bp)) 247 return; 248 249 rte_free(bp->leds); 250 bp->leds = NULL; 251 } 252 253 static void bnxt_free_flow_stats_info(struct bnxt *bp) 254 { 255 rte_free(bp->flow_stat); 256 bp->flow_stat = NULL; 257 } 258 259 static void bnxt_free_cos_queues(struct bnxt *bp) 260 { 261 rte_free(bp->rx_cos_queue); 262 bp->rx_cos_queue = NULL; 263 rte_free(bp->tx_cos_queue); 264 bp->tx_cos_queue = NULL; 265 } 266 267 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 268 { 269 bnxt_free_filter_mem(bp); 270 bnxt_free_vnic_attributes(bp); 271 bnxt_free_vnic_mem(bp); 272 273 /* tx/rx rings are configured as part of *_queue_setup callbacks. 274 * If the number of rings change across fw update, 275 * we don't have much choice except to warn the user. 276 */ 277 if (!reconfig) { 278 bnxt_free_stats(bp); 279 bnxt_free_tx_rings(bp); 280 bnxt_free_rx_rings(bp); 281 } 282 bnxt_free_async_cp_ring(bp); 283 bnxt_free_rxtx_nq_ring(bp); 284 285 rte_free(bp->grp_info); 286 bp->grp_info = NULL; 287 } 288 289 static int bnxt_alloc_parent_info(struct bnxt *bp) 290 { 291 bp->parent = rte_zmalloc("bnxt_parent_info", 292 sizeof(struct bnxt_parent_info), 0); 293 if (bp->parent == NULL) 294 return -ENOMEM; 295 296 return 0; 297 } 298 299 static int bnxt_alloc_pf_info(struct bnxt *bp) 300 { 301 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 302 if (bp->pf == NULL) 303 return -ENOMEM; 304 305 return 0; 306 } 307 308 static int bnxt_alloc_link_info(struct bnxt *bp) 309 { 310 bp->link_info = 311 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 312 if (bp->link_info == NULL) 313 return -ENOMEM; 314 315 return 0; 316 } 317 318 static int bnxt_alloc_leds_info(struct bnxt *bp) 319 { 320 if (BNXT_VF(bp)) 321 return 0; 322 323 bp->leds = rte_zmalloc("bnxt_leds", 324 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 325 0); 326 if (bp->leds == NULL) 327 return -ENOMEM; 328 329 return 0; 330 } 331 332 static int bnxt_alloc_cos_queues(struct bnxt *bp) 333 { 334 bp->rx_cos_queue = 335 rte_zmalloc("bnxt_rx_cosq", 336 BNXT_COS_QUEUE_COUNT * 337 sizeof(struct bnxt_cos_queue_info), 338 0); 339 if (bp->rx_cos_queue == NULL) 340 return -ENOMEM; 341 342 bp->tx_cos_queue = 343 rte_zmalloc("bnxt_tx_cosq", 344 BNXT_COS_QUEUE_COUNT * 345 sizeof(struct bnxt_cos_queue_info), 346 0); 347 if (bp->tx_cos_queue == NULL) 348 return -ENOMEM; 349 350 return 0; 351 } 352 353 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 354 { 355 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 356 sizeof(struct bnxt_flow_stat_info), 0); 357 if (bp->flow_stat == NULL) 358 return -ENOMEM; 359 360 return 0; 361 } 362 363 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 364 { 365 int rc; 366 367 rc = bnxt_alloc_ring_grps(bp); 368 if (rc) 369 goto alloc_mem_err; 370 371 rc = bnxt_alloc_async_ring_struct(bp); 372 if (rc) 373 goto alloc_mem_err; 374 375 rc = bnxt_alloc_vnic_mem(bp); 376 if (rc) 377 goto alloc_mem_err; 378 379 rc = bnxt_alloc_vnic_attributes(bp); 380 if (rc) 381 goto alloc_mem_err; 382 383 rc = bnxt_alloc_filter_mem(bp); 384 if (rc) 385 goto alloc_mem_err; 386 387 rc = bnxt_alloc_async_cp_ring(bp); 388 if (rc) 389 goto alloc_mem_err; 390 391 rc = bnxt_alloc_rxtx_nq_ring(bp); 392 if (rc) 393 goto alloc_mem_err; 394 395 if (BNXT_FLOW_XSTATS_EN(bp)) { 396 rc = bnxt_alloc_flow_stats_info(bp); 397 if (rc) 398 goto alloc_mem_err; 399 } 400 401 return 0; 402 403 alloc_mem_err: 404 bnxt_free_mem(bp, reconfig); 405 return rc; 406 } 407 408 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 409 { 410 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 411 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 412 uint64_t rx_offloads = dev_conf->rxmode.offloads; 413 struct bnxt_rx_queue *rxq; 414 unsigned int j; 415 int rc; 416 417 rc = bnxt_vnic_grp_alloc(bp, vnic); 418 if (rc) 419 goto err_out; 420 421 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 422 vnic_id, vnic, vnic->fw_grp_ids); 423 424 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 425 if (rc) 426 goto err_out; 427 428 /* Alloc RSS context only if RSS mode is enabled */ 429 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 430 int j, nr_ctxs = bnxt_rss_ctxts(bp); 431 432 /* RSS table size in Thor is 512. 433 * Cap max Rx rings to same value 434 */ 435 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 436 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 437 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 438 goto err_out; 439 } 440 441 rc = 0; 442 for (j = 0; j < nr_ctxs; j++) { 443 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 444 if (rc) 445 break; 446 } 447 if (rc) { 448 PMD_DRV_LOG(ERR, 449 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 450 vnic_id, j, rc); 451 goto err_out; 452 } 453 vnic->num_lb_ctxts = nr_ctxs; 454 } 455 456 /* 457 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 458 * setting is not available at this time, it will not be 459 * configured correctly in the CFA. 460 */ 461 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 462 vnic->vlan_strip = true; 463 else 464 vnic->vlan_strip = false; 465 466 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 467 if (rc) 468 goto err_out; 469 470 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 471 if (rc) 472 goto err_out; 473 474 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 475 rxq = bp->eth_dev->data->rx_queues[j]; 476 477 PMD_DRV_LOG(DEBUG, 478 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 479 j, rxq->vnic, rxq->vnic->fw_grp_ids); 480 481 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 482 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 483 else 484 vnic->rx_queue_cnt++; 485 486 if (!rxq->rx_deferred_start) { 487 bp->eth_dev->data->rx_queue_state[j] = 488 RTE_ETH_QUEUE_STATE_STARTED; 489 rxq->rx_started = true; 490 } 491 } 492 493 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 494 495 rc = bnxt_vnic_rss_configure(bp, vnic); 496 if (rc) 497 goto err_out; 498 499 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 500 501 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 502 (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ? 503 true : false); 504 if (rc) 505 goto err_out; 506 507 return 0; 508 err_out: 509 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 510 vnic_id, rc); 511 return rc; 512 } 513 514 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 515 { 516 int rc = 0; 517 518 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 519 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 520 if (rc) 521 return rc; 522 523 PMD_DRV_LOG(DEBUG, 524 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 525 " rx_fc_in_tbl.ctx_id = %d\n", 526 bp->flow_stat->rx_fc_in_tbl.va, 527 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 528 bp->flow_stat->rx_fc_in_tbl.ctx_id); 529 530 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 531 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 532 if (rc) 533 return rc; 534 535 PMD_DRV_LOG(DEBUG, 536 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 537 " rx_fc_out_tbl.ctx_id = %d\n", 538 bp->flow_stat->rx_fc_out_tbl.va, 539 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 540 bp->flow_stat->rx_fc_out_tbl.ctx_id); 541 542 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 543 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 544 if (rc) 545 return rc; 546 547 PMD_DRV_LOG(DEBUG, 548 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 549 " tx_fc_in_tbl.ctx_id = %d\n", 550 bp->flow_stat->tx_fc_in_tbl.va, 551 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 552 bp->flow_stat->tx_fc_in_tbl.ctx_id); 553 554 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 555 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 556 if (rc) 557 return rc; 558 559 PMD_DRV_LOG(DEBUG, 560 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 561 " tx_fc_out_tbl.ctx_id = %d\n", 562 bp->flow_stat->tx_fc_out_tbl.va, 563 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 564 bp->flow_stat->tx_fc_out_tbl.ctx_id); 565 566 memset(bp->flow_stat->rx_fc_out_tbl.va, 567 0, 568 bp->flow_stat->rx_fc_out_tbl.size); 569 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 570 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 571 bp->flow_stat->rx_fc_out_tbl.ctx_id, 572 bp->flow_stat->max_fc, 573 true); 574 if (rc) 575 return rc; 576 577 memset(bp->flow_stat->tx_fc_out_tbl.va, 578 0, 579 bp->flow_stat->tx_fc_out_tbl.size); 580 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 581 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 582 bp->flow_stat->tx_fc_out_tbl.ctx_id, 583 bp->flow_stat->max_fc, 584 true); 585 586 return rc; 587 } 588 589 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 590 struct bnxt_ctx_mem_buf_info *ctx) 591 { 592 if (!ctx) 593 return -EINVAL; 594 595 ctx->va = rte_zmalloc_socket(type, size, 0, 596 bp->eth_dev->device->numa_node); 597 if (ctx->va == NULL) 598 return -ENOMEM; 599 rte_mem_lock_page(ctx->va); 600 ctx->size = size; 601 ctx->dma = rte_mem_virt2iova(ctx->va); 602 if (ctx->dma == RTE_BAD_IOVA) 603 return -ENOMEM; 604 605 return 0; 606 } 607 608 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 609 { 610 struct rte_pci_device *pdev = bp->pdev; 611 char type[RTE_MEMZONE_NAMESIZE]; 612 uint16_t max_fc; 613 int rc = 0; 614 615 max_fc = bp->flow_stat->max_fc; 616 617 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 618 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 619 /* 4 bytes for each counter-id */ 620 rc = bnxt_alloc_ctx_mem_buf(bp, type, 621 max_fc * 4, 622 &bp->flow_stat->rx_fc_in_tbl); 623 if (rc) 624 return rc; 625 626 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 627 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 628 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 629 rc = bnxt_alloc_ctx_mem_buf(bp, type, 630 max_fc * 16, 631 &bp->flow_stat->rx_fc_out_tbl); 632 if (rc) 633 return rc; 634 635 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 636 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 637 /* 4 bytes for each counter-id */ 638 rc = bnxt_alloc_ctx_mem_buf(bp, type, 639 max_fc * 4, 640 &bp->flow_stat->tx_fc_in_tbl); 641 if (rc) 642 return rc; 643 644 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 645 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 646 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 647 rc = bnxt_alloc_ctx_mem_buf(bp, type, 648 max_fc * 16, 649 &bp->flow_stat->tx_fc_out_tbl); 650 if (rc) 651 return rc; 652 653 rc = bnxt_register_fc_ctx_mem(bp); 654 655 return rc; 656 } 657 658 static int bnxt_init_ctx_mem(struct bnxt *bp) 659 { 660 int rc = 0; 661 662 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 663 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 664 !BNXT_FLOW_XSTATS_EN(bp)) 665 return 0; 666 667 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 668 if (rc) 669 return rc; 670 671 rc = bnxt_init_fc_ctx_mem(bp); 672 673 return rc; 674 } 675 676 static int bnxt_update_phy_setting(struct bnxt *bp) 677 { 678 struct rte_eth_link new; 679 int rc; 680 681 rc = bnxt_get_hwrm_link_config(bp, &new); 682 if (rc) { 683 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 684 return rc; 685 } 686 687 /* 688 * On BCM957508-N2100 adapters, FW will not allow any user other 689 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 690 * always returns link up. Force phy update always in that case. 691 */ 692 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 693 rc = bnxt_set_hwrm_link_config(bp, true); 694 if (rc) { 695 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 696 return rc; 697 } 698 } 699 700 return rc; 701 } 702 703 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 704 { 705 rte_free(bp->prev_rx_ring_stats); 706 rte_free(bp->prev_tx_ring_stats); 707 708 bp->prev_rx_ring_stats = NULL; 709 bp->prev_tx_ring_stats = NULL; 710 } 711 712 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 713 { 714 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 715 sizeof(struct bnxt_ring_stats) * 716 bp->rx_cp_nr_rings, 717 0); 718 if (bp->prev_rx_ring_stats == NULL) 719 return -ENOMEM; 720 721 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 722 sizeof(struct bnxt_ring_stats) * 723 bp->tx_cp_nr_rings, 724 0); 725 if (bp->prev_tx_ring_stats == NULL) 726 goto error; 727 728 return 0; 729 730 error: 731 bnxt_free_prev_ring_stats(bp); 732 return -ENOMEM; 733 } 734 735 static int bnxt_start_nic(struct bnxt *bp) 736 { 737 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 738 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 739 uint32_t intr_vector = 0; 740 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 741 uint32_t vec = BNXT_MISC_VEC_ID; 742 unsigned int i, j; 743 int rc; 744 745 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 746 bp->eth_dev->data->dev_conf.rxmode.offloads |= 747 DEV_RX_OFFLOAD_JUMBO_FRAME; 748 bp->flags |= BNXT_FLAG_JUMBO; 749 } else { 750 bp->eth_dev->data->dev_conf.rxmode.offloads &= 751 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 752 bp->flags &= ~BNXT_FLAG_JUMBO; 753 } 754 755 /* THOR does not support ring groups. 756 * But we will use the array to save RSS context IDs. 757 */ 758 if (BNXT_CHIP_P5(bp)) 759 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 760 761 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 762 if (rc) { 763 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 764 goto err_out; 765 } 766 767 rc = bnxt_alloc_hwrm_rings(bp); 768 if (rc) { 769 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 770 goto err_out; 771 } 772 773 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 774 if (rc) { 775 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 776 goto err_out; 777 } 778 779 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 780 goto skip_cosq_cfg; 781 782 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 783 if (bp->rx_cos_queue[i].id != 0xff) { 784 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 785 786 if (!vnic) { 787 PMD_DRV_LOG(ERR, 788 "Num pools more than FW profile\n"); 789 rc = -EINVAL; 790 goto err_out; 791 } 792 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 793 bp->rx_cosq_cnt++; 794 } 795 } 796 797 skip_cosq_cfg: 798 rc = bnxt_mq_rx_configure(bp); 799 if (rc) { 800 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 801 goto err_out; 802 } 803 804 /* default vnic 0 */ 805 rc = bnxt_setup_one_vnic(bp, 0); 806 if (rc) 807 goto err_out; 808 /* VNIC configuration */ 809 if (BNXT_RFS_NEEDS_VNIC(bp)) { 810 for (i = 1; i < bp->nr_vnics; i++) { 811 rc = bnxt_setup_one_vnic(bp, i); 812 if (rc) 813 goto err_out; 814 } 815 } 816 817 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 818 if (rc) { 819 PMD_DRV_LOG(ERR, 820 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 821 goto err_out; 822 } 823 824 /* check and configure queue intr-vector mapping */ 825 if ((rte_intr_cap_multiple(intr_handle) || 826 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 827 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 828 intr_vector = bp->eth_dev->data->nb_rx_queues; 829 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 830 if (intr_vector > bp->rx_cp_nr_rings) { 831 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 832 bp->rx_cp_nr_rings); 833 return -ENOTSUP; 834 } 835 rc = rte_intr_efd_enable(intr_handle, intr_vector); 836 if (rc) 837 return rc; 838 } 839 840 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 841 intr_handle->intr_vec = 842 rte_zmalloc("intr_vec", 843 bp->eth_dev->data->nb_rx_queues * 844 sizeof(int), 0); 845 if (intr_handle->intr_vec == NULL) { 846 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 847 " intr_vec", bp->eth_dev->data->nb_rx_queues); 848 rc = -ENOMEM; 849 goto err_out; 850 } 851 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 852 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 853 intr_handle->intr_vec, intr_handle->nb_efd, 854 intr_handle->max_intr); 855 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 856 queue_id++) { 857 intr_handle->intr_vec[queue_id] = 858 vec + BNXT_RX_VEC_START; 859 if (vec < base + intr_handle->nb_efd - 1) 860 vec++; 861 } 862 } 863 864 /* enable uio/vfio intr/eventfd mapping */ 865 rc = rte_intr_enable(intr_handle); 866 #ifndef RTE_EXEC_ENV_FREEBSD 867 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 868 if (rc) 869 goto err_out; 870 #endif 871 872 rc = bnxt_update_phy_setting(bp); 873 if (rc) 874 goto err_out; 875 876 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 877 if (!bp->mark_table) 878 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 879 880 return 0; 881 882 err_out: 883 /* Some of the error status returned by FW may not be from errno.h */ 884 if (rc > 0) 885 rc = -EIO; 886 887 return rc; 888 } 889 890 static int bnxt_shutdown_nic(struct bnxt *bp) 891 { 892 bnxt_free_all_hwrm_resources(bp); 893 bnxt_free_all_filters(bp); 894 bnxt_free_all_vnics(bp); 895 return 0; 896 } 897 898 /* 899 * Device configuration and status function 900 */ 901 902 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 903 { 904 uint32_t link_speed = 0; 905 uint32_t speed_capa = 0; 906 907 if (bp->link_info == NULL) 908 return 0; 909 910 link_speed = bp->link_info->support_speeds; 911 912 /* If PAM4 is configured, use PAM4 supported speed */ 913 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 914 link_speed = bp->link_info->support_pam4_speeds; 915 916 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 917 speed_capa |= ETH_LINK_SPEED_100M; 918 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 919 speed_capa |= ETH_LINK_SPEED_100M_HD; 920 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 921 speed_capa |= ETH_LINK_SPEED_1G; 922 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 923 speed_capa |= ETH_LINK_SPEED_2_5G; 924 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 925 speed_capa |= ETH_LINK_SPEED_10G; 926 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 927 speed_capa |= ETH_LINK_SPEED_20G; 928 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 929 speed_capa |= ETH_LINK_SPEED_25G; 930 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 931 speed_capa |= ETH_LINK_SPEED_40G; 932 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 933 speed_capa |= ETH_LINK_SPEED_50G; 934 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 935 speed_capa |= ETH_LINK_SPEED_100G; 936 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 937 speed_capa |= ETH_LINK_SPEED_50G; 938 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 939 speed_capa |= ETH_LINK_SPEED_100G; 940 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 941 speed_capa |= ETH_LINK_SPEED_200G; 942 943 if (bp->link_info->auto_mode == 944 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 945 speed_capa |= ETH_LINK_SPEED_FIXED; 946 947 return speed_capa; 948 } 949 950 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 951 struct rte_eth_dev_info *dev_info) 952 { 953 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 954 struct bnxt *bp = eth_dev->data->dev_private; 955 uint16_t max_vnics, i, j, vpool, vrxq; 956 unsigned int max_rx_rings; 957 int rc; 958 959 rc = is_bnxt_in_error(bp); 960 if (rc) 961 return rc; 962 963 /* MAC Specifics */ 964 dev_info->max_mac_addrs = bp->max_l2_ctx; 965 dev_info->max_hash_mac_addrs = 0; 966 967 /* PF/VF specifics */ 968 if (BNXT_PF(bp)) 969 dev_info->max_vfs = pdev->max_vfs; 970 971 max_rx_rings = bnxt_max_rings(bp); 972 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 973 dev_info->max_rx_queues = max_rx_rings; 974 dev_info->max_tx_queues = max_rx_rings; 975 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 976 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 977 max_vnics = bp->max_vnics; 978 979 /* MTU specifics */ 980 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 981 dev_info->max_mtu = BNXT_MAX_MTU; 982 983 /* Fast path specifics */ 984 dev_info->min_rx_bufsize = 1; 985 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 986 987 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 988 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 989 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 990 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) 991 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP; 992 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; 993 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 994 dev_info->tx_queue_offload_capa; 995 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 996 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 997 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 998 999 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 1000 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1001 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1002 1003 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1004 .rx_thresh = { 1005 .pthresh = 8, 1006 .hthresh = 8, 1007 .wthresh = 0, 1008 }, 1009 .rx_free_thresh = 32, 1010 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1011 }; 1012 1013 dev_info->default_txconf = (struct rte_eth_txconf) { 1014 .tx_thresh = { 1015 .pthresh = 32, 1016 .hthresh = 0, 1017 .wthresh = 0, 1018 }, 1019 .tx_free_thresh = 32, 1020 .tx_rs_thresh = 32, 1021 }; 1022 eth_dev->data->dev_conf.intr_conf.lsc = 1; 1023 1024 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1025 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1026 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1027 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1028 1029 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1030 dev_info->switch_info.name = eth_dev->device->name; 1031 dev_info->switch_info.domain_id = bp->switch_domain_id; 1032 dev_info->switch_info.port_id = 1033 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1034 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1035 } 1036 1037 /* 1038 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1039 * need further investigation. 1040 */ 1041 1042 /* VMDq resources */ 1043 vpool = 64; /* ETH_64_POOLS */ 1044 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 1045 for (i = 0; i < 4; vpool >>= 1, i++) { 1046 if (max_vnics > vpool) { 1047 for (j = 0; j < 5; vrxq >>= 1, j++) { 1048 if (dev_info->max_rx_queues > vrxq) { 1049 if (vpool > vrxq) 1050 vpool = vrxq; 1051 goto found; 1052 } 1053 } 1054 /* Not enough resources to support VMDq */ 1055 break; 1056 } 1057 } 1058 /* Not enough resources to support VMDq */ 1059 vpool = 0; 1060 vrxq = 0; 1061 found: 1062 dev_info->max_vmdq_pools = vpool; 1063 dev_info->vmdq_queue_num = vrxq; 1064 1065 dev_info->vmdq_pool_base = 0; 1066 dev_info->vmdq_queue_base = 0; 1067 1068 return 0; 1069 } 1070 1071 /* Configure the device based on the configuration provided */ 1072 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1073 { 1074 struct bnxt *bp = eth_dev->data->dev_private; 1075 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1076 int rc; 1077 1078 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1079 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1080 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1081 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1082 1083 rc = is_bnxt_in_error(bp); 1084 if (rc) 1085 return rc; 1086 1087 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1088 rc = bnxt_hwrm_check_vf_rings(bp); 1089 if (rc) { 1090 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1091 return -ENOSPC; 1092 } 1093 1094 /* If a resource has already been allocated - in this case 1095 * it is the async completion ring, free it. Reallocate it after 1096 * resource reservation. This will ensure the resource counts 1097 * are calculated correctly. 1098 */ 1099 1100 pthread_mutex_lock(&bp->def_cp_lock); 1101 1102 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1103 bnxt_disable_int(bp); 1104 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1105 } 1106 1107 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1108 if (rc) { 1109 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1110 pthread_mutex_unlock(&bp->def_cp_lock); 1111 return -ENOSPC; 1112 } 1113 1114 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1115 rc = bnxt_alloc_async_cp_ring(bp); 1116 if (rc) { 1117 pthread_mutex_unlock(&bp->def_cp_lock); 1118 return rc; 1119 } 1120 bnxt_enable_int(bp); 1121 } 1122 1123 pthread_mutex_unlock(&bp->def_cp_lock); 1124 } 1125 1126 /* Inherit new configurations */ 1127 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1128 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1129 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1130 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1131 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1132 bp->max_stat_ctx) 1133 goto resource_error; 1134 1135 if (BNXT_HAS_RING_GRPS(bp) && 1136 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1137 goto resource_error; 1138 1139 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 1140 bp->max_vnics < eth_dev->data->nb_rx_queues) 1141 goto resource_error; 1142 1143 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1144 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1145 1146 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1147 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1148 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1149 1150 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1151 eth_dev->data->mtu = 1152 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1153 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 1154 BNXT_NUM_VLANS; 1155 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1156 } 1157 return 0; 1158 1159 resource_error: 1160 PMD_DRV_LOG(ERR, 1161 "Insufficient resources to support requested config\n"); 1162 PMD_DRV_LOG(ERR, 1163 "Num Queues Requested: Tx %d, Rx %d\n", 1164 eth_dev->data->nb_tx_queues, 1165 eth_dev->data->nb_rx_queues); 1166 PMD_DRV_LOG(ERR, 1167 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1168 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1169 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1170 return -ENOSPC; 1171 } 1172 1173 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1174 { 1175 struct rte_eth_link *link = ð_dev->data->dev_link; 1176 1177 if (link->link_status) 1178 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1179 eth_dev->data->port_id, 1180 (uint32_t)link->link_speed, 1181 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 1182 ("full-duplex") : ("half-duplex\n")); 1183 else 1184 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1185 eth_dev->data->port_id); 1186 } 1187 1188 /* 1189 * Determine whether the current configuration requires support for scattered 1190 * receive; return 1 if scattered receive is required and 0 if not. 1191 */ 1192 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1193 { 1194 uint16_t buf_size; 1195 int i; 1196 1197 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) 1198 return 1; 1199 1200 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) 1201 return 1; 1202 1203 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1204 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1205 1206 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1207 RTE_PKTMBUF_HEADROOM); 1208 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 1209 return 1; 1210 } 1211 return 0; 1212 } 1213 1214 static eth_rx_burst_t 1215 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1216 { 1217 struct bnxt *bp = eth_dev->data->dev_private; 1218 1219 /* Disable vector mode RX for Stingray2 for now */ 1220 if (BNXT_CHIP_SR2(bp)) { 1221 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1222 return bnxt_recv_pkts; 1223 } 1224 1225 #if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \ 1226 !defined(RTE_LIBRTE_IEEE1588) 1227 1228 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1229 if (eth_dev->data->scattered_rx) 1230 goto use_scalar_rx; 1231 1232 /* 1233 * Vector mode receive cannot be enabled if Truflow is enabled or if 1234 * asynchronous completions and receive completions can be placed in 1235 * the same completion ring. 1236 */ 1237 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1238 goto use_scalar_rx; 1239 1240 /* 1241 * Vector mode receive cannot be enabled if any receive offloads outside 1242 * a limited subset have been enabled. 1243 */ 1244 if (eth_dev->data->dev_conf.rxmode.offloads & 1245 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 1246 DEV_RX_OFFLOAD_KEEP_CRC | 1247 DEV_RX_OFFLOAD_JUMBO_FRAME | 1248 DEV_RX_OFFLOAD_IPV4_CKSUM | 1249 DEV_RX_OFFLOAD_UDP_CKSUM | 1250 DEV_RX_OFFLOAD_TCP_CKSUM | 1251 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1252 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 1253 DEV_RX_OFFLOAD_RSS_HASH | 1254 DEV_RX_OFFLOAD_VLAN_FILTER)) 1255 goto use_scalar_rx; 1256 1257 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1258 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1259 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1260 PMD_DRV_LOG(INFO, 1261 "Using AVX2 vector mode receive for port %d\n", 1262 eth_dev->data->port_id); 1263 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1264 return bnxt_recv_pkts_vec_avx2; 1265 } 1266 #endif 1267 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1268 PMD_DRV_LOG(INFO, 1269 "Using SSE vector mode receive for port %d\n", 1270 eth_dev->data->port_id); 1271 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1272 return bnxt_recv_pkts_vec; 1273 } 1274 1275 use_scalar_rx: 1276 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1277 eth_dev->data->port_id); 1278 PMD_DRV_LOG(INFO, 1279 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1280 eth_dev->data->port_id, 1281 eth_dev->data->scattered_rx, 1282 eth_dev->data->dev_conf.rxmode.offloads); 1283 #endif 1284 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1285 return bnxt_recv_pkts; 1286 } 1287 1288 static eth_tx_burst_t 1289 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1290 { 1291 struct bnxt *bp = eth_dev->data->dev_private; 1292 1293 /* Disable vector mode TX for Stingray2 for now */ 1294 if (BNXT_CHIP_SR2(bp)) 1295 return bnxt_xmit_pkts; 1296 1297 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \ 1298 !defined(RTE_LIBRTE_IEEE1588) 1299 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1300 1301 /* 1302 * Vector mode transmit can be enabled only if not using scatter rx 1303 * or tx offloads. 1304 */ 1305 if (eth_dev->data->scattered_rx || 1306 (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) || 1307 BNXT_TRUFLOW_EN(bp)) 1308 goto use_scalar_tx; 1309 1310 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1311 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1312 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1313 PMD_DRV_LOG(INFO, 1314 "Using AVX2 vector mode transmit for port %d\n", 1315 eth_dev->data->port_id); 1316 return bnxt_xmit_pkts_vec_avx2; 1317 } 1318 #endif 1319 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1320 PMD_DRV_LOG(INFO, 1321 "Using SSE vector mode transmit for port %d\n", 1322 eth_dev->data->port_id); 1323 return bnxt_xmit_pkts_vec; 1324 } 1325 1326 use_scalar_tx: 1327 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1328 eth_dev->data->port_id); 1329 PMD_DRV_LOG(INFO, 1330 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1331 eth_dev->data->port_id, 1332 eth_dev->data->scattered_rx, 1333 offloads); 1334 #endif 1335 return bnxt_xmit_pkts; 1336 } 1337 1338 static int bnxt_handle_if_change_status(struct bnxt *bp) 1339 { 1340 int rc; 1341 1342 /* Since fw has undergone a reset and lost all contexts, 1343 * set fatal flag to not issue hwrm during cleanup 1344 */ 1345 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1346 bnxt_uninit_resources(bp, true); 1347 1348 /* clear fatal flag so that re-init happens */ 1349 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1350 rc = bnxt_init_resources(bp, true); 1351 1352 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1353 1354 return rc; 1355 } 1356 1357 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1358 { 1359 struct bnxt *bp = eth_dev->data->dev_private; 1360 int rc = 0; 1361 1362 if (!BNXT_SINGLE_PF(bp)) 1363 return -ENOTSUP; 1364 1365 if (!bp->link_info->link_up) 1366 rc = bnxt_set_hwrm_link_config(bp, true); 1367 if (!rc) 1368 eth_dev->data->dev_link.link_status = 1; 1369 1370 bnxt_print_link_info(eth_dev); 1371 return rc; 1372 } 1373 1374 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1375 { 1376 struct bnxt *bp = eth_dev->data->dev_private; 1377 1378 if (!BNXT_SINGLE_PF(bp)) 1379 return -ENOTSUP; 1380 1381 eth_dev->data->dev_link.link_status = 0; 1382 bnxt_set_hwrm_link_config(bp, false); 1383 bp->link_info->link_up = 0; 1384 1385 return 0; 1386 } 1387 1388 static void bnxt_free_switch_domain(struct bnxt *bp) 1389 { 1390 int rc = 0; 1391 1392 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1393 return; 1394 1395 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1396 if (rc) 1397 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1398 bp->switch_domain_id, rc); 1399 } 1400 1401 static void bnxt_ptp_get_current_time(void *arg) 1402 { 1403 struct bnxt *bp = arg; 1404 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1405 int rc; 1406 1407 rc = is_bnxt_in_error(bp); 1408 if (rc) 1409 return; 1410 1411 if (!ptp) 1412 return; 1413 1414 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1415 &ptp->current_time); 1416 1417 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1418 if (rc != 0) { 1419 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1420 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1421 } 1422 } 1423 1424 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1425 { 1426 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1427 int rc; 1428 1429 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1430 return 0; 1431 1432 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1433 &ptp->current_time); 1434 1435 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1436 return rc; 1437 } 1438 1439 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1440 { 1441 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1442 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1443 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1444 } 1445 } 1446 1447 static void bnxt_ptp_stop(struct bnxt *bp) 1448 { 1449 bnxt_cancel_ptp_alarm(bp); 1450 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1451 } 1452 1453 static int bnxt_ptp_start(struct bnxt *bp) 1454 { 1455 int rc; 1456 1457 rc = bnxt_schedule_ptp_alarm(bp); 1458 if (rc != 0) { 1459 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1460 } else { 1461 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1462 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1463 } 1464 1465 return rc; 1466 } 1467 1468 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1469 { 1470 struct bnxt *bp = eth_dev->data->dev_private; 1471 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1472 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1473 struct rte_eth_link link; 1474 int ret; 1475 1476 eth_dev->data->dev_started = 0; 1477 eth_dev->data->scattered_rx = 0; 1478 1479 /* Prevent crashes when queues are still in use */ 1480 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1481 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1482 1483 bnxt_disable_int(bp); 1484 1485 /* disable uio/vfio intr/eventfd mapping */ 1486 rte_intr_disable(intr_handle); 1487 1488 /* Stop the child representors for this device */ 1489 ret = bnxt_rep_stop_all(bp); 1490 if (ret != 0) 1491 return ret; 1492 1493 /* delete the bnxt ULP port details */ 1494 bnxt_ulp_port_deinit(bp); 1495 1496 bnxt_cancel_fw_health_check(bp); 1497 1498 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1499 bnxt_cancel_ptp_alarm(bp); 1500 1501 /* Do not bring link down during reset recovery */ 1502 if (!is_bnxt_in_error(bp)) { 1503 bnxt_dev_set_link_down_op(eth_dev); 1504 /* Wait for link to be reset */ 1505 if (BNXT_SINGLE_PF(bp)) 1506 rte_delay_ms(500); 1507 /* clear the recorded link status */ 1508 memset(&link, 0, sizeof(link)); 1509 rte_eth_linkstatus_set(eth_dev, &link); 1510 } 1511 1512 /* Clean queue intr-vector mapping */ 1513 rte_intr_efd_disable(intr_handle); 1514 if (intr_handle->intr_vec != NULL) { 1515 rte_free(intr_handle->intr_vec); 1516 intr_handle->intr_vec = NULL; 1517 } 1518 1519 bnxt_hwrm_port_clr_stats(bp); 1520 bnxt_free_tx_mbufs(bp); 1521 bnxt_free_rx_mbufs(bp); 1522 /* Process any remaining notifications in default completion queue */ 1523 bnxt_int_handler(eth_dev); 1524 bnxt_shutdown_nic(bp); 1525 bnxt_hwrm_if_change(bp, false); 1526 1527 bnxt_free_prev_ring_stats(bp); 1528 rte_free(bp->mark_table); 1529 bp->mark_table = NULL; 1530 1531 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1532 bp->rx_cosq_cnt = 0; 1533 /* All filters are deleted on a port stop. */ 1534 if (BNXT_FLOW_XSTATS_EN(bp)) 1535 bp->flow_stat->flow_count = 0; 1536 1537 return 0; 1538 } 1539 1540 /* Unload the driver, release resources */ 1541 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1542 { 1543 struct bnxt *bp = eth_dev->data->dev_private; 1544 1545 pthread_mutex_lock(&bp->err_recovery_lock); 1546 if (bp->flags & BNXT_FLAG_FW_RESET) { 1547 PMD_DRV_LOG(ERR, 1548 "Adapter recovering from error..Please retry\n"); 1549 pthread_mutex_unlock(&bp->err_recovery_lock); 1550 return -EAGAIN; 1551 } 1552 pthread_mutex_unlock(&bp->err_recovery_lock); 1553 1554 return bnxt_dev_stop(eth_dev); 1555 } 1556 1557 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1558 { 1559 struct bnxt *bp = eth_dev->data->dev_private; 1560 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1561 int vlan_mask = 0; 1562 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1563 1564 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1565 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1566 return -EINVAL; 1567 } 1568 1569 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1570 PMD_DRV_LOG(ERR, 1571 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1572 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1573 1574 do { 1575 rc = bnxt_hwrm_if_change(bp, true); 1576 if (rc == 0 || rc != -EAGAIN) 1577 break; 1578 1579 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1580 } while (retry_cnt--); 1581 1582 if (rc) 1583 return rc; 1584 1585 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1586 rc = bnxt_handle_if_change_status(bp); 1587 if (rc) 1588 return rc; 1589 } 1590 1591 bnxt_enable_int(bp); 1592 1593 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1594 1595 rc = bnxt_start_nic(bp); 1596 if (rc) 1597 goto error; 1598 1599 rc = bnxt_alloc_prev_ring_stats(bp); 1600 if (rc) 1601 goto error; 1602 1603 eth_dev->data->dev_started = 1; 1604 1605 bnxt_link_update_op(eth_dev, 1); 1606 1607 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1608 vlan_mask |= ETH_VLAN_FILTER_MASK; 1609 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1610 vlan_mask |= ETH_VLAN_STRIP_MASK; 1611 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1612 if (rc) 1613 goto error; 1614 1615 /* Initialize bnxt ULP port details */ 1616 rc = bnxt_ulp_port_init(bp); 1617 if (rc) 1618 goto error; 1619 1620 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1621 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1622 1623 bnxt_schedule_fw_health_check(bp); 1624 1625 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1626 bnxt_schedule_ptp_alarm(bp); 1627 1628 return 0; 1629 1630 error: 1631 bnxt_dev_stop(eth_dev); 1632 return rc; 1633 } 1634 1635 static void 1636 bnxt_uninit_locks(struct bnxt *bp) 1637 { 1638 pthread_mutex_destroy(&bp->flow_lock); 1639 pthread_mutex_destroy(&bp->def_cp_lock); 1640 pthread_mutex_destroy(&bp->health_check_lock); 1641 pthread_mutex_destroy(&bp->err_recovery_lock); 1642 if (bp->rep_info) { 1643 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1644 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1645 } 1646 } 1647 1648 static void bnxt_drv_uninit(struct bnxt *bp) 1649 { 1650 bnxt_free_leds_info(bp); 1651 bnxt_free_cos_queues(bp); 1652 bnxt_free_link_info(bp); 1653 bnxt_free_parent_info(bp); 1654 bnxt_uninit_locks(bp); 1655 1656 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1657 bp->tx_mem_zone = NULL; 1658 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1659 bp->rx_mem_zone = NULL; 1660 1661 bnxt_free_vf_info(bp); 1662 bnxt_free_pf_info(bp); 1663 1664 rte_free(bp->grp_info); 1665 bp->grp_info = NULL; 1666 } 1667 1668 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1669 { 1670 struct bnxt *bp = eth_dev->data->dev_private; 1671 int ret = 0; 1672 1673 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1674 return 0; 1675 1676 pthread_mutex_lock(&bp->err_recovery_lock); 1677 if (bp->flags & BNXT_FLAG_FW_RESET) { 1678 PMD_DRV_LOG(ERR, 1679 "Adapter recovering from error...Please retry\n"); 1680 pthread_mutex_unlock(&bp->err_recovery_lock); 1681 return -EAGAIN; 1682 } 1683 pthread_mutex_unlock(&bp->err_recovery_lock); 1684 1685 /* cancel the recovery handler before remove dev */ 1686 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1687 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1688 bnxt_cancel_fc_thread(bp); 1689 1690 if (eth_dev->data->dev_started) 1691 ret = bnxt_dev_stop(eth_dev); 1692 1693 bnxt_uninit_resources(bp, false); 1694 1695 bnxt_drv_uninit(bp); 1696 1697 return ret; 1698 } 1699 1700 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1701 uint32_t index) 1702 { 1703 struct bnxt *bp = eth_dev->data->dev_private; 1704 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1705 struct bnxt_vnic_info *vnic; 1706 struct bnxt_filter_info *filter, *temp_filter; 1707 uint32_t i; 1708 1709 if (is_bnxt_in_error(bp)) 1710 return; 1711 1712 /* 1713 * Loop through all VNICs from the specified filter flow pools to 1714 * remove the corresponding MAC addr filter 1715 */ 1716 for (i = 0; i < bp->nr_vnics; i++) { 1717 if (!(pool_mask & (1ULL << i))) 1718 continue; 1719 1720 vnic = &bp->vnic_info[i]; 1721 filter = STAILQ_FIRST(&vnic->filter); 1722 while (filter) { 1723 temp_filter = STAILQ_NEXT(filter, next); 1724 if (filter->mac_index == index) { 1725 STAILQ_REMOVE(&vnic->filter, filter, 1726 bnxt_filter_info, next); 1727 bnxt_hwrm_clear_l2_filter(bp, filter); 1728 bnxt_free_filter(bp, filter); 1729 } 1730 filter = temp_filter; 1731 } 1732 } 1733 } 1734 1735 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1736 struct rte_ether_addr *mac_addr, uint32_t index, 1737 uint32_t pool) 1738 { 1739 struct bnxt_filter_info *filter; 1740 int rc = 0; 1741 1742 /* Attach requested MAC address to the new l2_filter */ 1743 STAILQ_FOREACH(filter, &vnic->filter, next) { 1744 if (filter->mac_index == index) { 1745 PMD_DRV_LOG(DEBUG, 1746 "MAC addr already existed for pool %d\n", 1747 pool); 1748 return 0; 1749 } 1750 } 1751 1752 filter = bnxt_alloc_filter(bp); 1753 if (!filter) { 1754 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1755 return -ENODEV; 1756 } 1757 1758 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1759 * if the MAC that's been programmed now is a different one, then, 1760 * copy that addr to filter->l2_addr 1761 */ 1762 if (mac_addr) 1763 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1764 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1765 1766 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1767 if (!rc) { 1768 filter->mac_index = index; 1769 if (filter->mac_index == 0) 1770 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1771 else 1772 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1773 } else { 1774 bnxt_free_filter(bp, filter); 1775 } 1776 1777 return rc; 1778 } 1779 1780 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1781 struct rte_ether_addr *mac_addr, 1782 uint32_t index, uint32_t pool) 1783 { 1784 struct bnxt *bp = eth_dev->data->dev_private; 1785 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1786 int rc = 0; 1787 1788 rc = is_bnxt_in_error(bp); 1789 if (rc) 1790 return rc; 1791 1792 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1793 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1794 return -ENOTSUP; 1795 } 1796 1797 if (!vnic) { 1798 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1799 return -EINVAL; 1800 } 1801 1802 /* Filter settings will get applied when port is started */ 1803 if (!eth_dev->data->dev_started) 1804 return 0; 1805 1806 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1807 1808 return rc; 1809 } 1810 1811 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1812 { 1813 int rc = 0; 1814 struct bnxt *bp = eth_dev->data->dev_private; 1815 struct rte_eth_link new; 1816 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1817 BNXT_MIN_LINK_WAIT_CNT; 1818 1819 rc = is_bnxt_in_error(bp); 1820 if (rc) 1821 return rc; 1822 1823 memset(&new, 0, sizeof(new)); 1824 1825 if (bp->link_info == NULL) 1826 goto out; 1827 1828 do { 1829 /* Retrieve link info from hardware */ 1830 rc = bnxt_get_hwrm_link_config(bp, &new); 1831 if (rc) { 1832 new.link_speed = ETH_LINK_SPEED_100M; 1833 new.link_duplex = ETH_LINK_FULL_DUPLEX; 1834 PMD_DRV_LOG(ERR, 1835 "Failed to retrieve link rc = 0x%x!\n", rc); 1836 goto out; 1837 } 1838 1839 if (!wait_to_complete || new.link_status) 1840 break; 1841 1842 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1843 } while (cnt--); 1844 1845 /* Only single function PF can bring phy down. 1846 * When port is stopped, report link down for VF/MH/NPAR functions. 1847 */ 1848 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1849 memset(&new, 0, sizeof(new)); 1850 1851 out: 1852 /* Timed out or success */ 1853 if (new.link_status != eth_dev->data->dev_link.link_status || 1854 new.link_speed != eth_dev->data->dev_link.link_speed) { 1855 rte_eth_linkstatus_set(eth_dev, &new); 1856 bnxt_print_link_info(eth_dev); 1857 } 1858 1859 return rc; 1860 } 1861 1862 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1863 { 1864 struct bnxt *bp = eth_dev->data->dev_private; 1865 struct bnxt_vnic_info *vnic; 1866 uint32_t old_flags; 1867 int rc; 1868 1869 rc = is_bnxt_in_error(bp); 1870 if (rc) 1871 return rc; 1872 1873 /* Filter settings will get applied when port is started */ 1874 if (!eth_dev->data->dev_started) 1875 return 0; 1876 1877 if (bp->vnic_info == NULL) 1878 return 0; 1879 1880 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1881 1882 old_flags = vnic->flags; 1883 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1884 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1885 if (rc != 0) 1886 vnic->flags = old_flags; 1887 1888 return rc; 1889 } 1890 1891 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1892 { 1893 struct bnxt *bp = eth_dev->data->dev_private; 1894 struct bnxt_vnic_info *vnic; 1895 uint32_t old_flags; 1896 int rc; 1897 1898 rc = is_bnxt_in_error(bp); 1899 if (rc) 1900 return rc; 1901 1902 /* Filter settings will get applied when port is started */ 1903 if (!eth_dev->data->dev_started) 1904 return 0; 1905 1906 if (bp->vnic_info == NULL) 1907 return 0; 1908 1909 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1910 1911 old_flags = vnic->flags; 1912 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1913 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1914 if (rc != 0) 1915 vnic->flags = old_flags; 1916 1917 return rc; 1918 } 1919 1920 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1921 { 1922 struct bnxt *bp = eth_dev->data->dev_private; 1923 struct bnxt_vnic_info *vnic; 1924 uint32_t old_flags; 1925 int rc; 1926 1927 rc = is_bnxt_in_error(bp); 1928 if (rc) 1929 return rc; 1930 1931 /* Filter settings will get applied when port is started */ 1932 if (!eth_dev->data->dev_started) 1933 return 0; 1934 1935 if (bp->vnic_info == NULL) 1936 return 0; 1937 1938 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1939 1940 old_flags = vnic->flags; 1941 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1942 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1943 if (rc != 0) 1944 vnic->flags = old_flags; 1945 1946 return rc; 1947 } 1948 1949 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1950 { 1951 struct bnxt *bp = eth_dev->data->dev_private; 1952 struct bnxt_vnic_info *vnic; 1953 uint32_t old_flags; 1954 int rc; 1955 1956 rc = is_bnxt_in_error(bp); 1957 if (rc) 1958 return rc; 1959 1960 /* Filter settings will get applied when port is started */ 1961 if (!eth_dev->data->dev_started) 1962 return 0; 1963 1964 if (bp->vnic_info == NULL) 1965 return 0; 1966 1967 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1968 1969 old_flags = vnic->flags; 1970 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1971 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1972 if (rc != 0) 1973 vnic->flags = old_flags; 1974 1975 return rc; 1976 } 1977 1978 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1979 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1980 { 1981 if (qid >= bp->rx_nr_rings) 1982 return NULL; 1983 1984 return bp->eth_dev->data->rx_queues[qid]; 1985 } 1986 1987 /* Return rxq corresponding to a given rss table ring/group ID. */ 1988 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1989 { 1990 struct bnxt_rx_queue *rxq; 1991 unsigned int i; 1992 1993 if (!BNXT_HAS_RING_GRPS(bp)) { 1994 for (i = 0; i < bp->rx_nr_rings; i++) { 1995 rxq = bp->eth_dev->data->rx_queues[i]; 1996 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1997 return rxq->index; 1998 } 1999 } else { 2000 for (i = 0; i < bp->rx_nr_rings; i++) { 2001 if (bp->grp_info[i].fw_grp_id == fwr) 2002 return i; 2003 } 2004 } 2005 2006 return INVALID_HW_RING_ID; 2007 } 2008 2009 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2010 struct rte_eth_rss_reta_entry64 *reta_conf, 2011 uint16_t reta_size) 2012 { 2013 struct bnxt *bp = eth_dev->data->dev_private; 2014 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2015 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2016 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2017 uint16_t idx, sft; 2018 int i, rc; 2019 2020 rc = is_bnxt_in_error(bp); 2021 if (rc) 2022 return rc; 2023 2024 if (!vnic->rss_table) 2025 return -EINVAL; 2026 2027 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 2028 return -EINVAL; 2029 2030 if (reta_size != tbl_size) { 2031 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2032 "(%d) must equal the size supported by the hardware " 2033 "(%d)\n", reta_size, tbl_size); 2034 return -EINVAL; 2035 } 2036 2037 for (i = 0; i < reta_size; i++) { 2038 struct bnxt_rx_queue *rxq; 2039 2040 idx = i / RTE_RETA_GROUP_SIZE; 2041 sft = i % RTE_RETA_GROUP_SIZE; 2042 2043 if (!(reta_conf[idx].mask & (1ULL << sft))) 2044 continue; 2045 2046 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2047 if (!rxq) { 2048 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 2049 return -EINVAL; 2050 } 2051 2052 if (BNXT_CHIP_P5(bp)) { 2053 vnic->rss_table[i * 2] = 2054 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2055 vnic->rss_table[i * 2 + 1] = 2056 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2057 } else { 2058 vnic->rss_table[i] = 2059 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2060 } 2061 } 2062 2063 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2064 return rc; 2065 } 2066 2067 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2068 struct rte_eth_rss_reta_entry64 *reta_conf, 2069 uint16_t reta_size) 2070 { 2071 struct bnxt *bp = eth_dev->data->dev_private; 2072 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2073 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2074 uint16_t idx, sft, i; 2075 int rc; 2076 2077 rc = is_bnxt_in_error(bp); 2078 if (rc) 2079 return rc; 2080 2081 if (!vnic) 2082 return -EINVAL; 2083 if (!vnic->rss_table) 2084 return -EINVAL; 2085 2086 if (reta_size != tbl_size) { 2087 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2088 "(%d) must equal the size supported by the hardware " 2089 "(%d)\n", reta_size, tbl_size); 2090 return -EINVAL; 2091 } 2092 2093 for (idx = 0, i = 0; i < reta_size; i++) { 2094 idx = i / RTE_RETA_GROUP_SIZE; 2095 sft = i % RTE_RETA_GROUP_SIZE; 2096 2097 if (reta_conf[idx].mask & (1ULL << sft)) { 2098 uint16_t qid; 2099 2100 if (BNXT_CHIP_P5(bp)) 2101 qid = bnxt_rss_to_qid(bp, 2102 vnic->rss_table[i * 2]); 2103 else 2104 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2105 2106 if (qid == INVALID_HW_RING_ID) { 2107 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2108 return -EINVAL; 2109 } 2110 reta_conf[idx].reta[sft] = qid; 2111 } 2112 } 2113 2114 return 0; 2115 } 2116 2117 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2118 struct rte_eth_rss_conf *rss_conf) 2119 { 2120 struct bnxt *bp = eth_dev->data->dev_private; 2121 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2122 struct bnxt_vnic_info *vnic; 2123 int rc; 2124 2125 rc = is_bnxt_in_error(bp); 2126 if (rc) 2127 return rc; 2128 2129 /* 2130 * If RSS enablement were different than dev_configure, 2131 * then return -EINVAL 2132 */ 2133 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 2134 if (!rss_conf->rss_hf) 2135 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2136 } else { 2137 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2138 return -EINVAL; 2139 } 2140 2141 bp->flags |= BNXT_FLAG_UPDATE_HASH; 2142 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 2143 rss_conf, 2144 sizeof(*rss_conf)); 2145 2146 /* Update the default RSS VNIC(s) */ 2147 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2148 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2149 vnic->hash_mode = 2150 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2151 ETH_RSS_LEVEL(rss_conf->rss_hf)); 2152 2153 /* 2154 * If hashkey is not specified, use the previously configured 2155 * hashkey 2156 */ 2157 if (!rss_conf->rss_key) 2158 goto rss_config; 2159 2160 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2161 PMD_DRV_LOG(ERR, 2162 "Invalid hashkey length, should be %d bytes\n", 2163 HW_HASH_KEY_SIZE); 2164 return -EINVAL; 2165 } 2166 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2167 2168 rss_config: 2169 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2170 return rc; 2171 } 2172 2173 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2174 struct rte_eth_rss_conf *rss_conf) 2175 { 2176 struct bnxt *bp = eth_dev->data->dev_private; 2177 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2178 int len, rc; 2179 uint32_t hash_types; 2180 2181 rc = is_bnxt_in_error(bp); 2182 if (rc) 2183 return rc; 2184 2185 /* RSS configuration is the same for all VNICs */ 2186 if (vnic && vnic->rss_hash_key) { 2187 if (rss_conf->rss_key) { 2188 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2189 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2190 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2191 } 2192 2193 hash_types = vnic->hash_type; 2194 rss_conf->rss_hf = 0; 2195 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2196 rss_conf->rss_hf |= ETH_RSS_IPV4; 2197 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2198 } 2199 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2200 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 2201 hash_types &= 2202 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2203 } 2204 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2205 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 2206 hash_types &= 2207 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2208 } 2209 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2210 rss_conf->rss_hf |= ETH_RSS_IPV6; 2211 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2212 } 2213 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2214 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 2215 hash_types &= 2216 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2217 } 2218 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2219 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 2220 hash_types &= 2221 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2222 } 2223 2224 rss_conf->rss_hf |= 2225 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2226 2227 if (hash_types) { 2228 PMD_DRV_LOG(ERR, 2229 "Unknown RSS config from firmware (%08x), RSS disabled", 2230 vnic->hash_type); 2231 return -ENOTSUP; 2232 } 2233 } else { 2234 rss_conf->rss_hf = 0; 2235 } 2236 return 0; 2237 } 2238 2239 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2240 struct rte_eth_fc_conf *fc_conf) 2241 { 2242 struct bnxt *bp = dev->data->dev_private; 2243 struct rte_eth_link link_info; 2244 int rc; 2245 2246 rc = is_bnxt_in_error(bp); 2247 if (rc) 2248 return rc; 2249 2250 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2251 if (rc) 2252 return rc; 2253 2254 memset(fc_conf, 0, sizeof(*fc_conf)); 2255 if (bp->link_info->auto_pause) 2256 fc_conf->autoneg = 1; 2257 switch (bp->link_info->pause) { 2258 case 0: 2259 fc_conf->mode = RTE_FC_NONE; 2260 break; 2261 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2262 fc_conf->mode = RTE_FC_TX_PAUSE; 2263 break; 2264 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2265 fc_conf->mode = RTE_FC_RX_PAUSE; 2266 break; 2267 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2268 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2269 fc_conf->mode = RTE_FC_FULL; 2270 break; 2271 } 2272 return 0; 2273 } 2274 2275 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2276 struct rte_eth_fc_conf *fc_conf) 2277 { 2278 struct bnxt *bp = dev->data->dev_private; 2279 int rc; 2280 2281 rc = is_bnxt_in_error(bp); 2282 if (rc) 2283 return rc; 2284 2285 if (!BNXT_SINGLE_PF(bp)) { 2286 PMD_DRV_LOG(ERR, 2287 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2288 return -ENOTSUP; 2289 } 2290 2291 switch (fc_conf->mode) { 2292 case RTE_FC_NONE: 2293 bp->link_info->auto_pause = 0; 2294 bp->link_info->force_pause = 0; 2295 break; 2296 case RTE_FC_RX_PAUSE: 2297 if (fc_conf->autoneg) { 2298 bp->link_info->auto_pause = 2299 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2300 bp->link_info->force_pause = 0; 2301 } else { 2302 bp->link_info->auto_pause = 0; 2303 bp->link_info->force_pause = 2304 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2305 } 2306 break; 2307 case RTE_FC_TX_PAUSE: 2308 if (fc_conf->autoneg) { 2309 bp->link_info->auto_pause = 2310 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2311 bp->link_info->force_pause = 0; 2312 } else { 2313 bp->link_info->auto_pause = 0; 2314 bp->link_info->force_pause = 2315 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2316 } 2317 break; 2318 case RTE_FC_FULL: 2319 if (fc_conf->autoneg) { 2320 bp->link_info->auto_pause = 2321 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2322 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2323 bp->link_info->force_pause = 0; 2324 } else { 2325 bp->link_info->auto_pause = 0; 2326 bp->link_info->force_pause = 2327 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2328 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2329 } 2330 break; 2331 } 2332 return bnxt_set_hwrm_link_config(bp, true); 2333 } 2334 2335 /* Add UDP tunneling port */ 2336 static int 2337 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2338 struct rte_eth_udp_tunnel *udp_tunnel) 2339 { 2340 struct bnxt *bp = eth_dev->data->dev_private; 2341 uint16_t tunnel_type = 0; 2342 int rc = 0; 2343 2344 rc = is_bnxt_in_error(bp); 2345 if (rc) 2346 return rc; 2347 2348 switch (udp_tunnel->prot_type) { 2349 case RTE_TUNNEL_TYPE_VXLAN: 2350 if (bp->vxlan_port_cnt) { 2351 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2352 udp_tunnel->udp_port); 2353 if (bp->vxlan_port != udp_tunnel->udp_port) { 2354 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2355 return -ENOSPC; 2356 } 2357 bp->vxlan_port_cnt++; 2358 return 0; 2359 } 2360 tunnel_type = 2361 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2362 bp->vxlan_port_cnt++; 2363 break; 2364 case RTE_TUNNEL_TYPE_GENEVE: 2365 if (bp->geneve_port_cnt) { 2366 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2367 udp_tunnel->udp_port); 2368 if (bp->geneve_port != udp_tunnel->udp_port) { 2369 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2370 return -ENOSPC; 2371 } 2372 bp->geneve_port_cnt++; 2373 return 0; 2374 } 2375 tunnel_type = 2376 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2377 bp->geneve_port_cnt++; 2378 break; 2379 default: 2380 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2381 return -ENOTSUP; 2382 } 2383 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2384 tunnel_type); 2385 return rc; 2386 } 2387 2388 static int 2389 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2390 struct rte_eth_udp_tunnel *udp_tunnel) 2391 { 2392 struct bnxt *bp = eth_dev->data->dev_private; 2393 uint16_t tunnel_type = 0; 2394 uint16_t port = 0; 2395 int rc = 0; 2396 2397 rc = is_bnxt_in_error(bp); 2398 if (rc) 2399 return rc; 2400 2401 switch (udp_tunnel->prot_type) { 2402 case RTE_TUNNEL_TYPE_VXLAN: 2403 if (!bp->vxlan_port_cnt) { 2404 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2405 return -EINVAL; 2406 } 2407 if (bp->vxlan_port != udp_tunnel->udp_port) { 2408 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2409 udp_tunnel->udp_port, bp->vxlan_port); 2410 return -EINVAL; 2411 } 2412 if (--bp->vxlan_port_cnt) 2413 return 0; 2414 2415 tunnel_type = 2416 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2417 port = bp->vxlan_fw_dst_port_id; 2418 break; 2419 case RTE_TUNNEL_TYPE_GENEVE: 2420 if (!bp->geneve_port_cnt) { 2421 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2422 return -EINVAL; 2423 } 2424 if (bp->geneve_port != udp_tunnel->udp_port) { 2425 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2426 udp_tunnel->udp_port, bp->geneve_port); 2427 return -EINVAL; 2428 } 2429 if (--bp->geneve_port_cnt) 2430 return 0; 2431 2432 tunnel_type = 2433 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2434 port = bp->geneve_fw_dst_port_id; 2435 break; 2436 default: 2437 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2438 return -ENOTSUP; 2439 } 2440 2441 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2442 return rc; 2443 } 2444 2445 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2446 { 2447 struct bnxt_filter_info *filter; 2448 struct bnxt_vnic_info *vnic; 2449 int rc = 0; 2450 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2451 2452 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2453 filter = STAILQ_FIRST(&vnic->filter); 2454 while (filter) { 2455 /* Search for this matching MAC+VLAN filter */ 2456 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2457 /* Delete the filter */ 2458 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2459 if (rc) 2460 return rc; 2461 STAILQ_REMOVE(&vnic->filter, filter, 2462 bnxt_filter_info, next); 2463 bnxt_free_filter(bp, filter); 2464 PMD_DRV_LOG(INFO, 2465 "Deleted vlan filter for %d\n", 2466 vlan_id); 2467 return 0; 2468 } 2469 filter = STAILQ_NEXT(filter, next); 2470 } 2471 return -ENOENT; 2472 } 2473 2474 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2475 { 2476 struct bnxt_filter_info *filter; 2477 struct bnxt_vnic_info *vnic; 2478 int rc = 0; 2479 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2480 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2481 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2482 2483 /* Implementation notes on the use of VNIC in this command: 2484 * 2485 * By default, these filters belong to default vnic for the function. 2486 * Once these filters are set up, only destination VNIC can be modified. 2487 * If the destination VNIC is not specified in this command, 2488 * then the HWRM shall only create an l2 context id. 2489 */ 2490 2491 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2492 filter = STAILQ_FIRST(&vnic->filter); 2493 /* Check if the VLAN has already been added */ 2494 while (filter) { 2495 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2496 return -EEXIST; 2497 2498 filter = STAILQ_NEXT(filter, next); 2499 } 2500 2501 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2502 * command to create MAC+VLAN filter with the right flags, enables set. 2503 */ 2504 filter = bnxt_alloc_filter(bp); 2505 if (!filter) { 2506 PMD_DRV_LOG(ERR, 2507 "MAC/VLAN filter alloc failed\n"); 2508 return -ENOMEM; 2509 } 2510 /* MAC + VLAN ID filter */ 2511 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2512 * untagged packets are received 2513 * 2514 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2515 * packets and only the programmed vlan's packets are received 2516 */ 2517 filter->l2_ivlan = vlan_id; 2518 filter->l2_ivlan_mask = 0x0FFF; 2519 filter->enables |= en; 2520 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2521 2522 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2523 if (rc) { 2524 /* Free the newly allocated filter as we were 2525 * not able to create the filter in hardware. 2526 */ 2527 bnxt_free_filter(bp, filter); 2528 return rc; 2529 } 2530 2531 filter->mac_index = 0; 2532 /* Add this new filter to the list */ 2533 if (vlan_id == 0) 2534 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2535 else 2536 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2537 2538 PMD_DRV_LOG(INFO, 2539 "Added Vlan filter for %d\n", vlan_id); 2540 return rc; 2541 } 2542 2543 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2544 uint16_t vlan_id, int on) 2545 { 2546 struct bnxt *bp = eth_dev->data->dev_private; 2547 int rc; 2548 2549 rc = is_bnxt_in_error(bp); 2550 if (rc) 2551 return rc; 2552 2553 if (!eth_dev->data->dev_started) { 2554 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2555 return -EINVAL; 2556 } 2557 2558 /* These operations apply to ALL existing MAC/VLAN filters */ 2559 if (on) 2560 return bnxt_add_vlan_filter(bp, vlan_id); 2561 else 2562 return bnxt_del_vlan_filter(bp, vlan_id); 2563 } 2564 2565 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2566 struct bnxt_vnic_info *vnic) 2567 { 2568 struct bnxt_filter_info *filter; 2569 int rc; 2570 2571 filter = STAILQ_FIRST(&vnic->filter); 2572 while (filter) { 2573 if (filter->mac_index == 0 && 2574 !memcmp(filter->l2_addr, bp->mac_addr, 2575 RTE_ETHER_ADDR_LEN)) { 2576 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2577 if (!rc) { 2578 STAILQ_REMOVE(&vnic->filter, filter, 2579 bnxt_filter_info, next); 2580 bnxt_free_filter(bp, filter); 2581 } 2582 return rc; 2583 } 2584 filter = STAILQ_NEXT(filter, next); 2585 } 2586 return 0; 2587 } 2588 2589 static int 2590 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2591 { 2592 struct bnxt_vnic_info *vnic; 2593 unsigned int i; 2594 int rc; 2595 2596 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2597 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 2598 /* Remove any VLAN filters programmed */ 2599 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2600 bnxt_del_vlan_filter(bp, i); 2601 2602 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2603 if (rc) 2604 return rc; 2605 } else { 2606 /* Default filter will allow packets that match the 2607 * dest mac. So, it has to be deleted, otherwise, we 2608 * will endup receiving vlan packets for which the 2609 * filter is not programmed, when hw-vlan-filter 2610 * configuration is ON 2611 */ 2612 bnxt_del_dflt_mac_filter(bp, vnic); 2613 /* This filter will allow only untagged packets */ 2614 bnxt_add_vlan_filter(bp, 0); 2615 } 2616 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2617 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 2618 2619 return 0; 2620 } 2621 2622 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2623 { 2624 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2625 unsigned int i; 2626 int rc; 2627 2628 /* Destroy vnic filters and vnic */ 2629 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2630 DEV_RX_OFFLOAD_VLAN_FILTER) { 2631 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2632 bnxt_del_vlan_filter(bp, i); 2633 } 2634 bnxt_del_dflt_mac_filter(bp, vnic); 2635 2636 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2637 if (rc) 2638 return rc; 2639 2640 rc = bnxt_hwrm_vnic_free(bp, vnic); 2641 if (rc) 2642 return rc; 2643 2644 rte_free(vnic->fw_grp_ids); 2645 vnic->fw_grp_ids = NULL; 2646 2647 vnic->rx_queue_cnt = 0; 2648 2649 return 0; 2650 } 2651 2652 static int 2653 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2654 { 2655 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2656 int rc; 2657 2658 /* Destroy, recreate and reconfigure the default vnic */ 2659 rc = bnxt_free_one_vnic(bp, 0); 2660 if (rc) 2661 return rc; 2662 2663 /* default vnic 0 */ 2664 rc = bnxt_setup_one_vnic(bp, 0); 2665 if (rc) 2666 return rc; 2667 2668 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2669 DEV_RX_OFFLOAD_VLAN_FILTER) { 2670 rc = bnxt_add_vlan_filter(bp, 0); 2671 if (rc) 2672 return rc; 2673 rc = bnxt_restore_vlan_filters(bp); 2674 if (rc) 2675 return rc; 2676 } else { 2677 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2678 if (rc) 2679 return rc; 2680 } 2681 2682 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2683 if (rc) 2684 return rc; 2685 2686 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2687 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 2688 2689 return rc; 2690 } 2691 2692 static int 2693 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2694 { 2695 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2696 struct bnxt *bp = dev->data->dev_private; 2697 int rc; 2698 2699 rc = is_bnxt_in_error(bp); 2700 if (rc) 2701 return rc; 2702 2703 /* Filter settings will get applied when port is started */ 2704 if (!dev->data->dev_started) 2705 return 0; 2706 2707 if (mask & ETH_VLAN_FILTER_MASK) { 2708 /* Enable or disable VLAN filtering */ 2709 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2710 if (rc) 2711 return rc; 2712 } 2713 2714 if (mask & ETH_VLAN_STRIP_MASK) { 2715 /* Enable or disable VLAN stripping */ 2716 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2717 if (rc) 2718 return rc; 2719 } 2720 2721 if (mask & ETH_VLAN_EXTEND_MASK) { 2722 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2723 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2724 else 2725 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2726 } 2727 2728 return 0; 2729 } 2730 2731 static int 2732 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2733 uint16_t tpid) 2734 { 2735 struct bnxt *bp = dev->data->dev_private; 2736 int qinq = dev->data->dev_conf.rxmode.offloads & 2737 DEV_RX_OFFLOAD_VLAN_EXTEND; 2738 2739 if (vlan_type != ETH_VLAN_TYPE_INNER && 2740 vlan_type != ETH_VLAN_TYPE_OUTER) { 2741 PMD_DRV_LOG(ERR, 2742 "Unsupported vlan type."); 2743 return -EINVAL; 2744 } 2745 if (!qinq) { 2746 PMD_DRV_LOG(ERR, 2747 "QinQ not enabled. Needs to be ON as we can " 2748 "accelerate only outer vlan\n"); 2749 return -EINVAL; 2750 } 2751 2752 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2753 switch (tpid) { 2754 case RTE_ETHER_TYPE_QINQ: 2755 bp->outer_tpid_bd = 2756 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2757 break; 2758 case RTE_ETHER_TYPE_VLAN: 2759 bp->outer_tpid_bd = 2760 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2761 break; 2762 case RTE_ETHER_TYPE_QINQ1: 2763 bp->outer_tpid_bd = 2764 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2765 break; 2766 case RTE_ETHER_TYPE_QINQ2: 2767 bp->outer_tpid_bd = 2768 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2769 break; 2770 case RTE_ETHER_TYPE_QINQ3: 2771 bp->outer_tpid_bd = 2772 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2773 break; 2774 default: 2775 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2776 return -EINVAL; 2777 } 2778 bp->outer_tpid_bd |= tpid; 2779 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2780 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 2781 PMD_DRV_LOG(ERR, 2782 "Can accelerate only outer vlan in QinQ\n"); 2783 return -EINVAL; 2784 } 2785 2786 return 0; 2787 } 2788 2789 static int 2790 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2791 struct rte_ether_addr *addr) 2792 { 2793 struct bnxt *bp = dev->data->dev_private; 2794 /* Default Filter is tied to VNIC 0 */ 2795 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2796 int rc; 2797 2798 rc = is_bnxt_in_error(bp); 2799 if (rc) 2800 return rc; 2801 2802 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2803 return -EPERM; 2804 2805 if (rte_is_zero_ether_addr(addr)) 2806 return -EINVAL; 2807 2808 /* Filter settings will get applied when port is started */ 2809 if (!dev->data->dev_started) 2810 return 0; 2811 2812 /* Check if the requested MAC is already added */ 2813 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2814 return 0; 2815 2816 /* Destroy filter and re-create it */ 2817 bnxt_del_dflt_mac_filter(bp, vnic); 2818 2819 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2820 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 2821 /* This filter will allow only untagged packets */ 2822 rc = bnxt_add_vlan_filter(bp, 0); 2823 } else { 2824 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2825 } 2826 2827 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2828 return rc; 2829 } 2830 2831 static int 2832 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2833 struct rte_ether_addr *mc_addr_set, 2834 uint32_t nb_mc_addr) 2835 { 2836 struct bnxt *bp = eth_dev->data->dev_private; 2837 char *mc_addr_list = (char *)mc_addr_set; 2838 struct bnxt_vnic_info *vnic; 2839 uint32_t off = 0, i = 0; 2840 int rc; 2841 2842 rc = is_bnxt_in_error(bp); 2843 if (rc) 2844 return rc; 2845 2846 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2847 2848 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2849 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2850 goto allmulti; 2851 } 2852 2853 /* TODO Check for Duplicate mcast addresses */ 2854 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2855 for (i = 0; i < nb_mc_addr; i++) { 2856 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2857 RTE_ETHER_ADDR_LEN); 2858 off += RTE_ETHER_ADDR_LEN; 2859 } 2860 2861 vnic->mc_addr_cnt = i; 2862 if (vnic->mc_addr_cnt) 2863 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2864 else 2865 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2866 2867 allmulti: 2868 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2869 } 2870 2871 static int 2872 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2873 { 2874 struct bnxt *bp = dev->data->dev_private; 2875 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2876 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2877 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2878 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2879 int ret; 2880 2881 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2882 fw_major, fw_minor, fw_updt, fw_rsvd); 2883 if (ret < 0) 2884 return -EINVAL; 2885 2886 ret += 1; /* add the size of '\0' */ 2887 if (fw_size < (size_t)ret) 2888 return ret; 2889 else 2890 return 0; 2891 } 2892 2893 static void 2894 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2895 struct rte_eth_rxq_info *qinfo) 2896 { 2897 struct bnxt *bp = dev->data->dev_private; 2898 struct bnxt_rx_queue *rxq; 2899 2900 if (is_bnxt_in_error(bp)) 2901 return; 2902 2903 rxq = dev->data->rx_queues[queue_id]; 2904 2905 qinfo->mp = rxq->mb_pool; 2906 qinfo->scattered_rx = dev->data->scattered_rx; 2907 qinfo->nb_desc = rxq->nb_rx_desc; 2908 2909 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2910 qinfo->conf.rx_drop_en = rxq->drop_en; 2911 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2912 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2913 } 2914 2915 static void 2916 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2917 struct rte_eth_txq_info *qinfo) 2918 { 2919 struct bnxt *bp = dev->data->dev_private; 2920 struct bnxt_tx_queue *txq; 2921 2922 if (is_bnxt_in_error(bp)) 2923 return; 2924 2925 txq = dev->data->tx_queues[queue_id]; 2926 2927 qinfo->nb_desc = txq->nb_tx_desc; 2928 2929 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2930 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2931 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2932 2933 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2934 qinfo->conf.tx_rs_thresh = 0; 2935 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2936 qinfo->conf.offloads = txq->offloads; 2937 } 2938 2939 static const struct { 2940 eth_rx_burst_t pkt_burst; 2941 const char *info; 2942 } bnxt_rx_burst_info[] = { 2943 {bnxt_recv_pkts, "Scalar"}, 2944 #if defined(RTE_ARCH_X86) 2945 {bnxt_recv_pkts_vec, "Vector SSE"}, 2946 #endif 2947 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2948 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 2949 #endif 2950 #if defined(RTE_ARCH_ARM64) 2951 {bnxt_recv_pkts_vec, "Vector Neon"}, 2952 #endif 2953 }; 2954 2955 static int 2956 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2957 struct rte_eth_burst_mode *mode) 2958 { 2959 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2960 size_t i; 2961 2962 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2963 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2964 snprintf(mode->info, sizeof(mode->info), "%s", 2965 bnxt_rx_burst_info[i].info); 2966 return 0; 2967 } 2968 } 2969 2970 return -EINVAL; 2971 } 2972 2973 static const struct { 2974 eth_tx_burst_t pkt_burst; 2975 const char *info; 2976 } bnxt_tx_burst_info[] = { 2977 {bnxt_xmit_pkts, "Scalar"}, 2978 #if defined(RTE_ARCH_X86) 2979 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2980 #endif 2981 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2982 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 2983 #endif 2984 #if defined(RTE_ARCH_ARM64) 2985 {bnxt_xmit_pkts_vec, "Vector Neon"}, 2986 #endif 2987 }; 2988 2989 static int 2990 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2991 struct rte_eth_burst_mode *mode) 2992 { 2993 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 2994 size_t i; 2995 2996 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 2997 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 2998 snprintf(mode->info, sizeof(mode->info), "%s", 2999 bnxt_tx_burst_info[i].info); 3000 return 0; 3001 } 3002 } 3003 3004 return -EINVAL; 3005 } 3006 3007 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3008 { 3009 struct bnxt *bp = eth_dev->data->dev_private; 3010 uint32_t new_pkt_size; 3011 uint32_t rc = 0; 3012 uint32_t i; 3013 3014 rc = is_bnxt_in_error(bp); 3015 if (rc) 3016 return rc; 3017 3018 /* Exit if receive queues are not configured yet */ 3019 if (!eth_dev->data->nb_rx_queues) 3020 return rc; 3021 3022 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 3023 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 3024 3025 /* 3026 * Disallow any MTU change that would require scattered receive support 3027 * if it is not already enabled. 3028 */ 3029 if (eth_dev->data->dev_started && 3030 !eth_dev->data->scattered_rx && 3031 (new_pkt_size > 3032 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3033 PMD_DRV_LOG(ERR, 3034 "MTU change would require scattered rx support. "); 3035 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 3036 return -EINVAL; 3037 } 3038 3039 if (new_mtu > RTE_ETHER_MTU) { 3040 bp->flags |= BNXT_FLAG_JUMBO; 3041 bp->eth_dev->data->dev_conf.rxmode.offloads |= 3042 DEV_RX_OFFLOAD_JUMBO_FRAME; 3043 } else { 3044 bp->eth_dev->data->dev_conf.rxmode.offloads &= 3045 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 3046 bp->flags &= ~BNXT_FLAG_JUMBO; 3047 } 3048 3049 /* Is there a change in mtu setting? */ 3050 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) 3051 return rc; 3052 3053 for (i = 0; i < bp->nr_vnics; i++) { 3054 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3055 uint16_t size = 0; 3056 3057 vnic->mru = BNXT_VNIC_MRU(new_mtu); 3058 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 3059 if (rc) 3060 break; 3061 3062 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 3063 size -= RTE_PKTMBUF_HEADROOM; 3064 3065 if (size < new_mtu) { 3066 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 3067 if (rc) 3068 return rc; 3069 } 3070 } 3071 3072 if (!rc) 3073 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 3074 3075 if (bnxt_hwrm_config_host_mtu(bp)) 3076 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3077 3078 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3079 3080 return rc; 3081 } 3082 3083 static int 3084 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3085 { 3086 struct bnxt *bp = dev->data->dev_private; 3087 uint16_t vlan = bp->vlan; 3088 int rc; 3089 3090 rc = is_bnxt_in_error(bp); 3091 if (rc) 3092 return rc; 3093 3094 if (!BNXT_SINGLE_PF(bp)) { 3095 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3096 return -ENOTSUP; 3097 } 3098 bp->vlan = on ? pvid : 0; 3099 3100 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3101 if (rc) 3102 bp->vlan = vlan; 3103 return rc; 3104 } 3105 3106 static int 3107 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3108 { 3109 struct bnxt *bp = dev->data->dev_private; 3110 int rc; 3111 3112 rc = is_bnxt_in_error(bp); 3113 if (rc) 3114 return rc; 3115 3116 return bnxt_hwrm_port_led_cfg(bp, true); 3117 } 3118 3119 static int 3120 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3121 { 3122 struct bnxt *bp = dev->data->dev_private; 3123 int rc; 3124 3125 rc = is_bnxt_in_error(bp); 3126 if (rc) 3127 return rc; 3128 3129 return bnxt_hwrm_port_led_cfg(bp, false); 3130 } 3131 3132 static uint32_t 3133 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 3134 { 3135 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 3136 struct bnxt_cp_ring_info *cpr; 3137 uint32_t desc = 0, raw_cons, cp_ring_size; 3138 struct bnxt_rx_queue *rxq; 3139 struct rx_pkt_cmpl *rxcmp; 3140 int rc; 3141 3142 rc = is_bnxt_in_error(bp); 3143 if (rc) 3144 return rc; 3145 3146 rxq = dev->data->rx_queues[rx_queue_id]; 3147 cpr = rxq->cp_ring; 3148 raw_cons = cpr->cp_raw_cons; 3149 cp_ring_size = cpr->cp_ring_struct->ring_size; 3150 3151 while (1) { 3152 uint32_t agg_cnt, cons, cmpl_type; 3153 3154 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3155 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3156 3157 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3158 break; 3159 3160 cmpl_type = CMP_TYPE(rxcmp); 3161 3162 switch (cmpl_type) { 3163 case CMPL_BASE_TYPE_RX_L2: 3164 case CMPL_BASE_TYPE_RX_L2_V2: 3165 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3166 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3167 desc++; 3168 break; 3169 3170 case CMPL_BASE_TYPE_RX_TPA_END: 3171 if (BNXT_CHIP_P5(rxq->bp)) { 3172 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3173 3174 p5_tpa_end = (void *)rxcmp; 3175 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3176 } else { 3177 struct rx_tpa_end_cmpl *tpa_end; 3178 3179 tpa_end = (void *)rxcmp; 3180 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3181 } 3182 3183 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3184 desc++; 3185 break; 3186 3187 default: 3188 raw_cons += CMP_LEN(cmpl_type); 3189 } 3190 } 3191 3192 return desc; 3193 } 3194 3195 static int 3196 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3197 { 3198 struct bnxt_rx_queue *rxq = rx_queue; 3199 struct bnxt_cp_ring_info *cpr; 3200 struct bnxt_rx_ring_info *rxr; 3201 uint32_t desc, raw_cons, cp_ring_size; 3202 struct bnxt *bp = rxq->bp; 3203 struct rx_pkt_cmpl *rxcmp; 3204 int rc; 3205 3206 rc = is_bnxt_in_error(bp); 3207 if (rc) 3208 return rc; 3209 3210 if (offset >= rxq->nb_rx_desc) 3211 return -EINVAL; 3212 3213 rxr = rxq->rx_ring; 3214 cpr = rxq->cp_ring; 3215 cp_ring_size = cpr->cp_ring_struct->ring_size; 3216 3217 /* 3218 * For the vector receive case, the completion at the requested 3219 * offset can be indexed directly. 3220 */ 3221 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3222 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3223 struct rx_pkt_cmpl *rxcmp; 3224 uint32_t cons; 3225 3226 /* Check status of completion descriptor. */ 3227 raw_cons = cpr->cp_raw_cons + 3228 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3229 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3230 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3231 3232 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3233 return RTE_ETH_RX_DESC_DONE; 3234 3235 /* Check whether rx desc has an mbuf attached. */ 3236 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3237 if (cons >= rxq->rxrearm_start && 3238 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3239 return RTE_ETH_RX_DESC_UNAVAIL; 3240 } 3241 3242 return RTE_ETH_RX_DESC_AVAIL; 3243 } 3244 #endif 3245 3246 /* 3247 * For the non-vector receive case, scan the completion ring to 3248 * locate the completion descriptor for the requested offset. 3249 */ 3250 raw_cons = cpr->cp_raw_cons; 3251 desc = 0; 3252 while (1) { 3253 uint32_t agg_cnt, cons, cmpl_type; 3254 3255 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3256 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3257 3258 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3259 break; 3260 3261 cmpl_type = CMP_TYPE(rxcmp); 3262 3263 switch (cmpl_type) { 3264 case CMPL_BASE_TYPE_RX_L2: 3265 case CMPL_BASE_TYPE_RX_L2_V2: 3266 if (desc == offset) { 3267 cons = rxcmp->opaque; 3268 if (rxr->rx_buf_ring[cons]) 3269 return RTE_ETH_RX_DESC_DONE; 3270 else 3271 return RTE_ETH_RX_DESC_UNAVAIL; 3272 } 3273 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3274 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3275 desc++; 3276 break; 3277 3278 case CMPL_BASE_TYPE_RX_TPA_END: 3279 if (desc == offset) 3280 return RTE_ETH_RX_DESC_DONE; 3281 3282 if (BNXT_CHIP_P5(rxq->bp)) { 3283 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3284 3285 p5_tpa_end = (void *)rxcmp; 3286 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3287 } else { 3288 struct rx_tpa_end_cmpl *tpa_end; 3289 3290 tpa_end = (void *)rxcmp; 3291 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3292 } 3293 3294 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3295 desc++; 3296 break; 3297 3298 default: 3299 raw_cons += CMP_LEN(cmpl_type); 3300 } 3301 } 3302 3303 return RTE_ETH_RX_DESC_AVAIL; 3304 } 3305 3306 static int 3307 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3308 { 3309 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3310 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3311 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3312 struct cmpl_base *cp_desc_ring; 3313 int rc; 3314 3315 rc = is_bnxt_in_error(txq->bp); 3316 if (rc) 3317 return rc; 3318 3319 if (offset >= txq->nb_tx_desc) 3320 return -EINVAL; 3321 3322 /* Return "desc done" if descriptor is available for use. */ 3323 if (bnxt_tx_bds_in_hw(txq) <= offset) 3324 return RTE_ETH_TX_DESC_DONE; 3325 3326 raw_cons = cpr->cp_raw_cons; 3327 cp_desc_ring = cpr->cp_desc_ring; 3328 ring_mask = cpr->cp_ring_struct->ring_mask; 3329 3330 /* Check to see if hw has posted a completion for the descriptor. */ 3331 while (1) { 3332 struct tx_cmpl *txcmp; 3333 uint32_t cons; 3334 3335 cons = RING_CMPL(ring_mask, raw_cons); 3336 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3337 3338 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3339 break; 3340 3341 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3342 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3343 3344 if (nb_tx_pkts > offset) 3345 return RTE_ETH_TX_DESC_DONE; 3346 3347 raw_cons = NEXT_RAW_CMP(raw_cons); 3348 } 3349 3350 /* Descriptor is pending transmit, not yet completed by hardware. */ 3351 return RTE_ETH_TX_DESC_FULL; 3352 } 3353 3354 int 3355 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3356 const struct rte_flow_ops **ops) 3357 { 3358 struct bnxt *bp = dev->data->dev_private; 3359 int ret = 0; 3360 3361 if (!bp) 3362 return -EIO; 3363 3364 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3365 struct bnxt_representor *vfr = dev->data->dev_private; 3366 bp = vfr->parent_dev->data->dev_private; 3367 /* parent is deleted while children are still valid */ 3368 if (!bp) { 3369 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3370 dev->data->port_id); 3371 return -EIO; 3372 } 3373 } 3374 3375 ret = is_bnxt_in_error(bp); 3376 if (ret) 3377 return ret; 3378 3379 /* PMD supports thread-safe flow operations. rte_flow API 3380 * functions can avoid mutex for multi-thread safety. 3381 */ 3382 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3383 3384 if (BNXT_TRUFLOW_EN(bp)) 3385 *ops = &bnxt_ulp_rte_flow_ops; 3386 else 3387 *ops = &bnxt_flow_ops; 3388 3389 return ret; 3390 } 3391 3392 static const uint32_t * 3393 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3394 { 3395 static const uint32_t ptypes[] = { 3396 RTE_PTYPE_L2_ETHER_VLAN, 3397 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3398 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3399 RTE_PTYPE_L4_ICMP, 3400 RTE_PTYPE_L4_TCP, 3401 RTE_PTYPE_L4_UDP, 3402 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3403 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3404 RTE_PTYPE_INNER_L4_ICMP, 3405 RTE_PTYPE_INNER_L4_TCP, 3406 RTE_PTYPE_INNER_L4_UDP, 3407 RTE_PTYPE_UNKNOWN 3408 }; 3409 3410 if (!dev->rx_pkt_burst) 3411 return NULL; 3412 3413 return ptypes; 3414 } 3415 3416 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3417 int reg_win) 3418 { 3419 uint32_t reg_base = *reg_arr & 0xfffff000; 3420 uint32_t win_off; 3421 int i; 3422 3423 for (i = 0; i < count; i++) { 3424 if ((reg_arr[i] & 0xfffff000) != reg_base) 3425 return -ERANGE; 3426 } 3427 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3428 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3429 return 0; 3430 } 3431 3432 static int bnxt_map_ptp_regs(struct bnxt *bp) 3433 { 3434 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3435 uint32_t *reg_arr; 3436 int rc, i; 3437 3438 reg_arr = ptp->rx_regs; 3439 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3440 if (rc) 3441 return rc; 3442 3443 reg_arr = ptp->tx_regs; 3444 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3445 if (rc) 3446 return rc; 3447 3448 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3449 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3450 3451 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3452 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3453 3454 return 0; 3455 } 3456 3457 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3458 { 3459 rte_write32(0, (uint8_t *)bp->bar0 + 3460 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3461 rte_write32(0, (uint8_t *)bp->bar0 + 3462 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3463 } 3464 3465 static uint64_t bnxt_cc_read(struct bnxt *bp) 3466 { 3467 uint64_t ns; 3468 3469 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3470 BNXT_GRCPF_REG_SYNC_TIME)); 3471 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3472 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3473 return ns; 3474 } 3475 3476 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3477 { 3478 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3479 uint32_t fifo; 3480 3481 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3482 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3483 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3484 return -EAGAIN; 3485 3486 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3487 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3488 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3489 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3490 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3491 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3492 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3493 3494 return 0; 3495 } 3496 3497 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3498 { 3499 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3500 struct bnxt_pf_info *pf = bp->pf; 3501 uint16_t port_id; 3502 int i = 0; 3503 uint32_t fifo; 3504 3505 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3506 return -EINVAL; 3507 3508 port_id = pf->port_id; 3509 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3510 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3511 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3512 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3513 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3514 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3515 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3516 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3517 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3518 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3519 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3520 i++; 3521 } 3522 3523 if (i >= BNXT_PTP_RX_PND_CNT) 3524 return -EBUSY; 3525 3526 return 0; 3527 } 3528 3529 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3530 { 3531 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3532 struct bnxt_pf_info *pf = bp->pf; 3533 uint16_t port_id; 3534 uint32_t fifo; 3535 3536 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3537 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3538 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3539 return -EAGAIN; 3540 3541 port_id = pf->port_id; 3542 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3543 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3544 3545 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3546 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3547 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3548 return bnxt_clr_rx_ts(bp, ts); 3549 3550 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3551 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3552 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3553 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3554 3555 return 0; 3556 } 3557 3558 static int 3559 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3560 { 3561 uint64_t ns; 3562 struct bnxt *bp = dev->data->dev_private; 3563 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3564 3565 if (!ptp) 3566 return -ENOTSUP; 3567 3568 ns = rte_timespec_to_ns(ts); 3569 /* Set the timecounters to a new value. */ 3570 ptp->tc.nsec = ns; 3571 ptp->tx_tstamp_tc.nsec = ns; 3572 ptp->rx_tstamp_tc.nsec = ns; 3573 3574 return 0; 3575 } 3576 3577 static int 3578 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3579 { 3580 struct bnxt *bp = dev->data->dev_private; 3581 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3582 uint64_t ns, systime_cycles = 0; 3583 int rc = 0; 3584 3585 if (!ptp) 3586 return -ENOTSUP; 3587 3588 if (BNXT_CHIP_P5(bp)) 3589 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3590 &systime_cycles); 3591 else 3592 systime_cycles = bnxt_cc_read(bp); 3593 3594 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3595 *ts = rte_ns_to_timespec(ns); 3596 3597 return rc; 3598 } 3599 static int 3600 bnxt_timesync_enable(struct rte_eth_dev *dev) 3601 { 3602 struct bnxt *bp = dev->data->dev_private; 3603 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3604 uint32_t shift = 0; 3605 int rc; 3606 3607 if (!ptp) 3608 return -ENOTSUP; 3609 3610 ptp->rx_filter = 1; 3611 ptp->tx_tstamp_en = 1; 3612 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3613 3614 rc = bnxt_hwrm_ptp_cfg(bp); 3615 if (rc) 3616 return rc; 3617 3618 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3619 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3620 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3621 3622 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3623 ptp->tc.cc_shift = shift; 3624 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3625 3626 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3627 ptp->rx_tstamp_tc.cc_shift = shift; 3628 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3629 3630 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3631 ptp->tx_tstamp_tc.cc_shift = shift; 3632 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3633 3634 if (!BNXT_CHIP_P5(bp)) 3635 bnxt_map_ptp_regs(bp); 3636 else 3637 rc = bnxt_ptp_start(bp); 3638 3639 return rc; 3640 } 3641 3642 static int 3643 bnxt_timesync_disable(struct rte_eth_dev *dev) 3644 { 3645 struct bnxt *bp = dev->data->dev_private; 3646 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3647 3648 if (!ptp) 3649 return -ENOTSUP; 3650 3651 ptp->rx_filter = 0; 3652 ptp->tx_tstamp_en = 0; 3653 ptp->rxctl = 0; 3654 3655 bnxt_hwrm_ptp_cfg(bp); 3656 3657 if (!BNXT_CHIP_P5(bp)) 3658 bnxt_unmap_ptp_regs(bp); 3659 else 3660 bnxt_ptp_stop(bp); 3661 3662 return 0; 3663 } 3664 3665 static int 3666 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3667 struct timespec *timestamp, 3668 uint32_t flags __rte_unused) 3669 { 3670 struct bnxt *bp = dev->data->dev_private; 3671 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3672 uint64_t rx_tstamp_cycles = 0; 3673 uint64_t ns; 3674 3675 if (!ptp) 3676 return -ENOTSUP; 3677 3678 if (BNXT_CHIP_P5(bp)) 3679 rx_tstamp_cycles = ptp->rx_timestamp; 3680 else 3681 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3682 3683 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3684 *timestamp = rte_ns_to_timespec(ns); 3685 return 0; 3686 } 3687 3688 static int 3689 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3690 struct timespec *timestamp) 3691 { 3692 struct bnxt *bp = dev->data->dev_private; 3693 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3694 uint64_t tx_tstamp_cycles = 0; 3695 uint64_t ns; 3696 int rc = 0; 3697 3698 if (!ptp) 3699 return -ENOTSUP; 3700 3701 if (BNXT_CHIP_P5(bp)) 3702 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3703 &tx_tstamp_cycles); 3704 else 3705 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3706 3707 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3708 *timestamp = rte_ns_to_timespec(ns); 3709 3710 return rc; 3711 } 3712 3713 static int 3714 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3715 { 3716 struct bnxt *bp = dev->data->dev_private; 3717 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3718 3719 if (!ptp) 3720 return -ENOTSUP; 3721 3722 ptp->tc.nsec += delta; 3723 ptp->tx_tstamp_tc.nsec += delta; 3724 ptp->rx_tstamp_tc.nsec += delta; 3725 3726 return 0; 3727 } 3728 3729 static int 3730 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3731 { 3732 struct bnxt *bp = dev->data->dev_private; 3733 int rc; 3734 uint32_t dir_entries; 3735 uint32_t entry_length; 3736 3737 rc = is_bnxt_in_error(bp); 3738 if (rc) 3739 return rc; 3740 3741 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3742 bp->pdev->addr.domain, bp->pdev->addr.bus, 3743 bp->pdev->addr.devid, bp->pdev->addr.function); 3744 3745 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3746 if (rc != 0) 3747 return rc; 3748 3749 return dir_entries * entry_length; 3750 } 3751 3752 static int 3753 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3754 struct rte_dev_eeprom_info *in_eeprom) 3755 { 3756 struct bnxt *bp = dev->data->dev_private; 3757 uint32_t index; 3758 uint32_t offset; 3759 int rc; 3760 3761 rc = is_bnxt_in_error(bp); 3762 if (rc) 3763 return rc; 3764 3765 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3766 bp->pdev->addr.domain, bp->pdev->addr.bus, 3767 bp->pdev->addr.devid, bp->pdev->addr.function, 3768 in_eeprom->offset, in_eeprom->length); 3769 3770 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3771 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3772 in_eeprom->data); 3773 3774 index = in_eeprom->offset >> 24; 3775 offset = in_eeprom->offset & 0xffffff; 3776 3777 if (index != 0) 3778 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3779 in_eeprom->length, in_eeprom->data); 3780 3781 return 0; 3782 } 3783 3784 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3785 { 3786 switch (dir_type) { 3787 case BNX_DIR_TYPE_CHIMP_PATCH: 3788 case BNX_DIR_TYPE_BOOTCODE: 3789 case BNX_DIR_TYPE_BOOTCODE_2: 3790 case BNX_DIR_TYPE_APE_FW: 3791 case BNX_DIR_TYPE_APE_PATCH: 3792 case BNX_DIR_TYPE_KONG_FW: 3793 case BNX_DIR_TYPE_KONG_PATCH: 3794 case BNX_DIR_TYPE_BONO_FW: 3795 case BNX_DIR_TYPE_BONO_PATCH: 3796 /* FALLTHROUGH */ 3797 return true; 3798 } 3799 3800 return false; 3801 } 3802 3803 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3804 { 3805 switch (dir_type) { 3806 case BNX_DIR_TYPE_AVS: 3807 case BNX_DIR_TYPE_EXP_ROM_MBA: 3808 case BNX_DIR_TYPE_PCIE: 3809 case BNX_DIR_TYPE_TSCF_UCODE: 3810 case BNX_DIR_TYPE_EXT_PHY: 3811 case BNX_DIR_TYPE_CCM: 3812 case BNX_DIR_TYPE_ISCSI_BOOT: 3813 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3814 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3815 /* FALLTHROUGH */ 3816 return true; 3817 } 3818 3819 return false; 3820 } 3821 3822 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3823 { 3824 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3825 bnxt_dir_type_is_other_exec_format(dir_type); 3826 } 3827 3828 static int 3829 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3830 struct rte_dev_eeprom_info *in_eeprom) 3831 { 3832 struct bnxt *bp = dev->data->dev_private; 3833 uint8_t index, dir_op; 3834 uint16_t type, ext, ordinal, attr; 3835 int rc; 3836 3837 rc = is_bnxt_in_error(bp); 3838 if (rc) 3839 return rc; 3840 3841 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3842 bp->pdev->addr.domain, bp->pdev->addr.bus, 3843 bp->pdev->addr.devid, bp->pdev->addr.function, 3844 in_eeprom->offset, in_eeprom->length); 3845 3846 if (!BNXT_PF(bp)) { 3847 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3848 return -EINVAL; 3849 } 3850 3851 type = in_eeprom->magic >> 16; 3852 3853 if (type == 0xffff) { /* special value for directory operations */ 3854 index = in_eeprom->magic & 0xff; 3855 dir_op = in_eeprom->magic >> 8; 3856 if (index == 0) 3857 return -EINVAL; 3858 switch (dir_op) { 3859 case 0x0e: /* erase */ 3860 if (in_eeprom->offset != ~in_eeprom->magic) 3861 return -EINVAL; 3862 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3863 default: 3864 return -EINVAL; 3865 } 3866 } 3867 3868 /* Create or re-write an NVM item: */ 3869 if (bnxt_dir_type_is_executable(type) == true) 3870 return -EOPNOTSUPP; 3871 ext = in_eeprom->magic & 0xffff; 3872 ordinal = in_eeprom->offset >> 16; 3873 attr = in_eeprom->offset & 0xffff; 3874 3875 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3876 in_eeprom->data, in_eeprom->length); 3877 } 3878 3879 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3880 struct rte_eth_dev_module_info *modinfo) 3881 { 3882 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3883 struct bnxt *bp = dev->data->dev_private; 3884 int rc; 3885 3886 /* No point in going further if phy status indicates 3887 * module is not inserted or if it is powered down or 3888 * if it is of type 10GBase-T 3889 */ 3890 if (bp->link_info->module_status > 3891 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3892 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3893 dev->data->port_id); 3894 return -ENOTSUP; 3895 } 3896 3897 /* This feature is not supported in older firmware versions */ 3898 if (bp->hwrm_spec_code < 0x10202) { 3899 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3900 dev->data->port_id); 3901 return -ENOTSUP; 3902 } 3903 3904 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3905 SFF_DIAG_SUPPORT_OFFSET + 1, 3906 module_info); 3907 3908 if (rc) 3909 return rc; 3910 3911 switch (module_info[0]) { 3912 case SFF_MODULE_ID_SFP: 3913 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3914 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3915 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3916 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3917 break; 3918 case SFF_MODULE_ID_QSFP: 3919 case SFF_MODULE_ID_QSFP_PLUS: 3920 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3921 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3922 break; 3923 case SFF_MODULE_ID_QSFP28: 3924 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3925 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3926 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3927 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3928 break; 3929 default: 3930 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3931 return -ENOTSUP; 3932 } 3933 3934 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3935 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3936 3937 return 0; 3938 } 3939 3940 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3941 struct rte_dev_eeprom_info *info) 3942 { 3943 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3944 uint32_t offset = info->offset, length = info->length; 3945 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3946 struct bnxt *bp = dev->data->dev_private; 3947 uint8_t *data = info->data; 3948 uint8_t page = offset >> 7; 3949 uint8_t max_pages = 2; 3950 uint8_t opt_pages; 3951 int rc; 3952 3953 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3954 SFF_DIAG_SUPPORT_OFFSET + 1, 3955 module_info); 3956 if (rc) 3957 return rc; 3958 3959 switch (module_info[0]) { 3960 case SFF_MODULE_ID_SFP: 3961 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 3962 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 3963 pg_addr[2] = I2C_DEV_ADDR_A2; 3964 pg_addr[3] = I2C_DEV_ADDR_A2; 3965 max_pages = 4; 3966 } 3967 break; 3968 case SFF_MODULE_ID_QSFP28: 3969 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3970 SFF8636_OPT_PAGES_OFFSET, 3971 1, &opt_pages); 3972 if (rc) 3973 return rc; 3974 3975 if (opt_pages & SFF8636_PAGE1_MASK) { 3976 pg_addr[2] = I2C_DEV_ADDR_A0; 3977 max_pages = 3; 3978 } 3979 if (opt_pages & SFF8636_PAGE2_MASK) { 3980 pg_addr[3] = I2C_DEV_ADDR_A0; 3981 max_pages = 4; 3982 } 3983 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 3984 pg_addr[4] = I2C_DEV_ADDR_A0; 3985 max_pages = 5; 3986 } 3987 break; 3988 default: 3989 break; 3990 } 3991 3992 memset(data, 0, length); 3993 3994 offset &= 0xff; 3995 while (length && page < max_pages) { 3996 uint8_t raw_page = page ? page - 1 : 0; 3997 uint16_t chunk; 3998 3999 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4000 raw_page = 0; 4001 else if (page) 4002 offset |= 0x80; 4003 chunk = RTE_MIN(length, 256 - offset); 4004 4005 if (pg_addr[page]) { 4006 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4007 raw_page, offset, 4008 chunk, data); 4009 if (rc) 4010 return rc; 4011 } 4012 4013 data += chunk; 4014 length -= chunk; 4015 offset = 0; 4016 page += 1 + (chunk > 128); 4017 } 4018 4019 return length ? -EINVAL : 0; 4020 } 4021 4022 /* 4023 * Initialization 4024 */ 4025 4026 static const struct eth_dev_ops bnxt_dev_ops = { 4027 .dev_infos_get = bnxt_dev_info_get_op, 4028 .dev_close = bnxt_dev_close_op, 4029 .dev_configure = bnxt_dev_configure_op, 4030 .dev_start = bnxt_dev_start_op, 4031 .dev_stop = bnxt_dev_stop_op, 4032 .dev_set_link_up = bnxt_dev_set_link_up_op, 4033 .dev_set_link_down = bnxt_dev_set_link_down_op, 4034 .stats_get = bnxt_stats_get_op, 4035 .stats_reset = bnxt_stats_reset_op, 4036 .rx_queue_setup = bnxt_rx_queue_setup_op, 4037 .rx_queue_release = bnxt_rx_queue_release_op, 4038 .tx_queue_setup = bnxt_tx_queue_setup_op, 4039 .tx_queue_release = bnxt_tx_queue_release_op, 4040 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4041 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4042 .reta_update = bnxt_reta_update_op, 4043 .reta_query = bnxt_reta_query_op, 4044 .rss_hash_update = bnxt_rss_hash_update_op, 4045 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4046 .link_update = bnxt_link_update_op, 4047 .promiscuous_enable = bnxt_promiscuous_enable_op, 4048 .promiscuous_disable = bnxt_promiscuous_disable_op, 4049 .allmulticast_enable = bnxt_allmulticast_enable_op, 4050 .allmulticast_disable = bnxt_allmulticast_disable_op, 4051 .mac_addr_add = bnxt_mac_addr_add_op, 4052 .mac_addr_remove = bnxt_mac_addr_remove_op, 4053 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4054 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4055 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4056 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4057 .vlan_filter_set = bnxt_vlan_filter_set_op, 4058 .vlan_offload_set = bnxt_vlan_offload_set_op, 4059 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4060 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4061 .mtu_set = bnxt_mtu_set_op, 4062 .mac_addr_set = bnxt_set_default_mac_addr_op, 4063 .xstats_get = bnxt_dev_xstats_get_op, 4064 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4065 .xstats_reset = bnxt_dev_xstats_reset_op, 4066 .fw_version_get = bnxt_fw_version_get, 4067 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4068 .rxq_info_get = bnxt_rxq_info_get_op, 4069 .txq_info_get = bnxt_txq_info_get_op, 4070 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4071 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4072 .dev_led_on = bnxt_dev_led_on_op, 4073 .dev_led_off = bnxt_dev_led_off_op, 4074 .rx_queue_start = bnxt_rx_queue_start, 4075 .rx_queue_stop = bnxt_rx_queue_stop, 4076 .tx_queue_start = bnxt_tx_queue_start, 4077 .tx_queue_stop = bnxt_tx_queue_stop, 4078 .flow_ops_get = bnxt_flow_ops_get_op, 4079 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4080 .get_eeprom_length = bnxt_get_eeprom_length_op, 4081 .get_eeprom = bnxt_get_eeprom_op, 4082 .set_eeprom = bnxt_set_eeprom_op, 4083 .get_module_info = bnxt_get_module_info, 4084 .get_module_eeprom = bnxt_get_module_eeprom, 4085 .timesync_enable = bnxt_timesync_enable, 4086 .timesync_disable = bnxt_timesync_disable, 4087 .timesync_read_time = bnxt_timesync_read_time, 4088 .timesync_write_time = bnxt_timesync_write_time, 4089 .timesync_adjust_time = bnxt_timesync_adjust_time, 4090 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4091 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4092 }; 4093 4094 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4095 { 4096 uint32_t offset; 4097 4098 /* Only pre-map the reset GRC registers using window 3 */ 4099 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4100 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4101 4102 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4103 4104 return offset; 4105 } 4106 4107 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4108 { 4109 struct bnxt_error_recovery_info *info = bp->recovery_info; 4110 uint32_t reg_base = 0xffffffff; 4111 int i; 4112 4113 /* Only pre-map the monitoring GRC registers using window 2 */ 4114 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4115 uint32_t reg = info->status_regs[i]; 4116 4117 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4118 continue; 4119 4120 if (reg_base == 0xffffffff) 4121 reg_base = reg & 0xfffff000; 4122 if ((reg & 0xfffff000) != reg_base) 4123 return -ERANGE; 4124 4125 /* Use mask 0xffc as the Lower 2 bits indicates 4126 * address space location 4127 */ 4128 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4129 (reg & 0xffc); 4130 } 4131 4132 if (reg_base == 0xffffffff) 4133 return 0; 4134 4135 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4136 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4137 4138 return 0; 4139 } 4140 4141 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4142 { 4143 struct bnxt_error_recovery_info *info = bp->recovery_info; 4144 uint32_t delay = info->delay_after_reset[index]; 4145 uint32_t val = info->reset_reg_val[index]; 4146 uint32_t reg = info->reset_reg[index]; 4147 uint32_t type, offset; 4148 int ret; 4149 4150 type = BNXT_FW_STATUS_REG_TYPE(reg); 4151 offset = BNXT_FW_STATUS_REG_OFF(reg); 4152 4153 switch (type) { 4154 case BNXT_FW_STATUS_REG_TYPE_CFG: 4155 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4156 if (ret < 0) { 4157 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4158 val, offset); 4159 return; 4160 } 4161 break; 4162 case BNXT_FW_STATUS_REG_TYPE_GRC: 4163 offset = bnxt_map_reset_regs(bp, offset); 4164 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4165 break; 4166 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4167 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4168 break; 4169 } 4170 /* wait on a specific interval of time until core reset is complete */ 4171 if (delay) 4172 rte_delay_ms(delay); 4173 } 4174 4175 static void bnxt_dev_cleanup(struct bnxt *bp) 4176 { 4177 bp->eth_dev->data->dev_link.link_status = 0; 4178 bp->link_info->link_up = 0; 4179 if (bp->eth_dev->data->dev_started) 4180 bnxt_dev_stop(bp->eth_dev); 4181 4182 bnxt_uninit_resources(bp, true); 4183 } 4184 4185 static int 4186 bnxt_check_fw_reset_done(struct bnxt *bp) 4187 { 4188 int timeout = bp->fw_reset_max_msecs; 4189 uint16_t val = 0; 4190 int rc; 4191 4192 do { 4193 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4194 if (rc < 0) { 4195 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4196 return rc; 4197 } 4198 if (val != 0xffff) 4199 break; 4200 rte_delay_ms(1); 4201 } while (timeout--); 4202 4203 if (val == 0xffff) { 4204 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4205 return -1; 4206 } 4207 4208 return 0; 4209 } 4210 4211 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4212 { 4213 struct rte_eth_dev *dev = bp->eth_dev; 4214 struct rte_vlan_filter_conf *vfc; 4215 int vidx, vbit, rc; 4216 uint16_t vlan_id; 4217 4218 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4219 vfc = &dev->data->vlan_filter_conf; 4220 vidx = vlan_id / 64; 4221 vbit = vlan_id % 64; 4222 4223 /* Each bit corresponds to a VLAN id */ 4224 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4225 rc = bnxt_add_vlan_filter(bp, vlan_id); 4226 if (rc) 4227 return rc; 4228 } 4229 } 4230 4231 return 0; 4232 } 4233 4234 static int bnxt_restore_mac_filters(struct bnxt *bp) 4235 { 4236 struct rte_eth_dev *dev = bp->eth_dev; 4237 struct rte_eth_dev_info dev_info; 4238 struct rte_ether_addr *addr; 4239 uint64_t pool_mask; 4240 uint32_t pool = 0; 4241 uint32_t i; 4242 int rc; 4243 4244 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4245 return 0; 4246 4247 rc = bnxt_dev_info_get_op(dev, &dev_info); 4248 if (rc) 4249 return rc; 4250 4251 /* replay MAC address configuration */ 4252 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4253 addr = &dev->data->mac_addrs[i]; 4254 4255 /* skip zero address */ 4256 if (rte_is_zero_ether_addr(addr)) 4257 continue; 4258 4259 pool = 0; 4260 pool_mask = dev->data->mac_pool_sel[i]; 4261 4262 do { 4263 if (pool_mask & 1ULL) { 4264 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4265 if (rc) 4266 return rc; 4267 } 4268 pool_mask >>= 1; 4269 pool++; 4270 } while (pool_mask); 4271 } 4272 4273 return 0; 4274 } 4275 4276 static int bnxt_restore_filters(struct bnxt *bp) 4277 { 4278 struct rte_eth_dev *dev = bp->eth_dev; 4279 int ret = 0; 4280 4281 if (dev->data->all_multicast) { 4282 ret = bnxt_allmulticast_enable_op(dev); 4283 if (ret) 4284 return ret; 4285 } 4286 if (dev->data->promiscuous) { 4287 ret = bnxt_promiscuous_enable_op(dev); 4288 if (ret) 4289 return ret; 4290 } 4291 4292 ret = bnxt_restore_mac_filters(bp); 4293 if (ret) 4294 return ret; 4295 4296 ret = bnxt_restore_vlan_filters(bp); 4297 /* TODO restore other filters as well */ 4298 return ret; 4299 } 4300 4301 static int bnxt_check_fw_ready(struct bnxt *bp) 4302 { 4303 int timeout = bp->fw_reset_max_msecs; 4304 int rc = 0; 4305 4306 do { 4307 rc = bnxt_hwrm_poll_ver_get(bp); 4308 if (rc == 0) 4309 break; 4310 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4311 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4312 } while (rc && timeout > 0); 4313 4314 if (rc) 4315 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4316 4317 return rc; 4318 } 4319 4320 static void bnxt_dev_recover(void *arg) 4321 { 4322 struct bnxt *bp = arg; 4323 int rc = 0; 4324 4325 pthread_mutex_lock(&bp->err_recovery_lock); 4326 4327 if (!bp->fw_reset_min_msecs) { 4328 rc = bnxt_check_fw_reset_done(bp); 4329 if (rc) 4330 goto err; 4331 } 4332 4333 /* Clear Error flag so that device re-init should happen */ 4334 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4335 4336 rc = bnxt_check_fw_ready(bp); 4337 if (rc) 4338 goto err; 4339 4340 rc = bnxt_init_resources(bp, true); 4341 if (rc) { 4342 PMD_DRV_LOG(ERR, 4343 "Failed to initialize resources after reset\n"); 4344 goto err; 4345 } 4346 /* clear reset flag as the device is initialized now */ 4347 bp->flags &= ~BNXT_FLAG_FW_RESET; 4348 4349 rc = bnxt_dev_start_op(bp->eth_dev); 4350 if (rc) { 4351 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4352 goto err_start; 4353 } 4354 4355 rc = bnxt_restore_filters(bp); 4356 if (rc) 4357 goto err_start; 4358 4359 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 4360 pthread_mutex_unlock(&bp->err_recovery_lock); 4361 4362 return; 4363 err_start: 4364 bnxt_dev_stop(bp->eth_dev); 4365 err: 4366 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4367 bnxt_uninit_resources(bp, false); 4368 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4369 rte_eth_dev_callback_process(bp->eth_dev, 4370 RTE_ETH_EVENT_INTR_RMV, 4371 NULL); 4372 pthread_mutex_unlock(&bp->err_recovery_lock); 4373 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4374 } 4375 4376 void bnxt_dev_reset_and_resume(void *arg) 4377 { 4378 struct bnxt *bp = arg; 4379 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4380 uint16_t val = 0; 4381 int rc; 4382 4383 bnxt_dev_cleanup(bp); 4384 4385 bnxt_wait_for_device_shutdown(bp); 4386 4387 /* During some fatal firmware error conditions, the PCI config space 4388 * register 0x2e which normally contains the subsystem ID will become 4389 * 0xffff. This register will revert back to the normal value after 4390 * the chip has completed core reset. If we detect this condition, 4391 * we can poll this config register immediately for the value to revert. 4392 */ 4393 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4394 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4395 if (rc < 0) { 4396 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4397 return; 4398 } 4399 if (val == 0xffff) { 4400 bp->fw_reset_min_msecs = 0; 4401 us = 1; 4402 } 4403 } 4404 4405 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4406 if (rc) 4407 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4408 } 4409 4410 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4411 { 4412 struct bnxt_error_recovery_info *info = bp->recovery_info; 4413 uint32_t reg = info->status_regs[index]; 4414 uint32_t type, offset, val = 0; 4415 int ret = 0; 4416 4417 type = BNXT_FW_STATUS_REG_TYPE(reg); 4418 offset = BNXT_FW_STATUS_REG_OFF(reg); 4419 4420 switch (type) { 4421 case BNXT_FW_STATUS_REG_TYPE_CFG: 4422 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4423 if (ret < 0) 4424 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4425 offset); 4426 break; 4427 case BNXT_FW_STATUS_REG_TYPE_GRC: 4428 offset = info->mapped_status_regs[index]; 4429 /* FALLTHROUGH */ 4430 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4431 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4432 offset)); 4433 break; 4434 } 4435 4436 return val; 4437 } 4438 4439 static int bnxt_fw_reset_all(struct bnxt *bp) 4440 { 4441 struct bnxt_error_recovery_info *info = bp->recovery_info; 4442 uint32_t i; 4443 int rc = 0; 4444 4445 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4446 /* Reset through master function driver */ 4447 for (i = 0; i < info->reg_array_cnt; i++) 4448 bnxt_write_fw_reset_reg(bp, i); 4449 /* Wait for time specified by FW after triggering reset */ 4450 rte_delay_ms(info->master_func_wait_period_after_reset); 4451 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4452 /* Reset with the help of Kong processor */ 4453 rc = bnxt_hwrm_fw_reset(bp); 4454 if (rc) 4455 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4456 } 4457 4458 return rc; 4459 } 4460 4461 static void bnxt_fw_reset_cb(void *arg) 4462 { 4463 struct bnxt *bp = arg; 4464 struct bnxt_error_recovery_info *info = bp->recovery_info; 4465 int rc = 0; 4466 4467 /* Only Master function can do FW reset */ 4468 if (bnxt_is_master_func(bp) && 4469 bnxt_is_recovery_enabled(bp)) { 4470 rc = bnxt_fw_reset_all(bp); 4471 if (rc) { 4472 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4473 return; 4474 } 4475 } 4476 4477 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4478 * EXCEPTION_FATAL_ASYNC event to all the functions 4479 * (including MASTER FUNC). After receiving this Async, all the active 4480 * drivers should treat this case as FW initiated recovery 4481 */ 4482 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4483 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4484 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4485 4486 /* To recover from error */ 4487 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4488 (void *)bp); 4489 } 4490 } 4491 4492 /* Driver should poll FW heartbeat, reset_counter with the frequency 4493 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4494 * When the driver detects heartbeat stop or change in reset_counter, 4495 * it has to trigger a reset to recover from the error condition. 4496 * A “master PF” is the function who will have the privilege to 4497 * initiate the chimp reset. The master PF will be elected by the 4498 * firmware and will be notified through async message. 4499 */ 4500 static void bnxt_check_fw_health(void *arg) 4501 { 4502 struct bnxt *bp = arg; 4503 struct bnxt_error_recovery_info *info = bp->recovery_info; 4504 uint32_t val = 0, wait_msec; 4505 4506 if (!info || !bnxt_is_recovery_enabled(bp) || 4507 is_bnxt_in_error(bp)) 4508 return; 4509 4510 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4511 if (val == info->last_heart_beat) 4512 goto reset; 4513 4514 info->last_heart_beat = val; 4515 4516 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4517 if (val != info->last_reset_counter) 4518 goto reset; 4519 4520 info->last_reset_counter = val; 4521 4522 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4523 bnxt_check_fw_health, (void *)bp); 4524 4525 return; 4526 reset: 4527 /* Stop DMA to/from device */ 4528 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4529 bp->flags |= BNXT_FLAG_FW_RESET; 4530 4531 bnxt_stop_rxtx(bp); 4532 4533 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4534 4535 if (bnxt_is_master_func(bp)) 4536 wait_msec = info->master_func_wait_period; 4537 else 4538 wait_msec = info->normal_func_wait_period; 4539 4540 rte_eal_alarm_set(US_PER_MS * wait_msec, 4541 bnxt_fw_reset_cb, (void *)bp); 4542 } 4543 4544 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4545 { 4546 uint32_t polling_freq; 4547 4548 pthread_mutex_lock(&bp->health_check_lock); 4549 4550 if (!bnxt_is_recovery_enabled(bp)) 4551 goto done; 4552 4553 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4554 goto done; 4555 4556 polling_freq = bp->recovery_info->driver_polling_freq; 4557 4558 rte_eal_alarm_set(US_PER_MS * polling_freq, 4559 bnxt_check_fw_health, (void *)bp); 4560 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4561 4562 done: 4563 pthread_mutex_unlock(&bp->health_check_lock); 4564 } 4565 4566 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4567 { 4568 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4569 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4570 } 4571 4572 static bool bnxt_vf_pciid(uint16_t device_id) 4573 { 4574 switch (device_id) { 4575 case BROADCOM_DEV_ID_57304_VF: 4576 case BROADCOM_DEV_ID_57406_VF: 4577 case BROADCOM_DEV_ID_5731X_VF: 4578 case BROADCOM_DEV_ID_5741X_VF: 4579 case BROADCOM_DEV_ID_57414_VF: 4580 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4581 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4582 case BROADCOM_DEV_ID_58802_VF: 4583 case BROADCOM_DEV_ID_57500_VF1: 4584 case BROADCOM_DEV_ID_57500_VF2: 4585 case BROADCOM_DEV_ID_58818_VF: 4586 /* FALLTHROUGH */ 4587 return true; 4588 default: 4589 return false; 4590 } 4591 } 4592 4593 /* Phase 5 device */ 4594 static bool bnxt_p5_device(uint16_t device_id) 4595 { 4596 switch (device_id) { 4597 case BROADCOM_DEV_ID_57508: 4598 case BROADCOM_DEV_ID_57504: 4599 case BROADCOM_DEV_ID_57502: 4600 case BROADCOM_DEV_ID_57508_MF1: 4601 case BROADCOM_DEV_ID_57504_MF1: 4602 case BROADCOM_DEV_ID_57502_MF1: 4603 case BROADCOM_DEV_ID_57508_MF2: 4604 case BROADCOM_DEV_ID_57504_MF2: 4605 case BROADCOM_DEV_ID_57502_MF2: 4606 case BROADCOM_DEV_ID_57500_VF1: 4607 case BROADCOM_DEV_ID_57500_VF2: 4608 case BROADCOM_DEV_ID_58812: 4609 case BROADCOM_DEV_ID_58814: 4610 case BROADCOM_DEV_ID_58818: 4611 case BROADCOM_DEV_ID_58818_VF: 4612 /* FALLTHROUGH */ 4613 return true; 4614 default: 4615 return false; 4616 } 4617 } 4618 4619 bool bnxt_stratus_device(struct bnxt *bp) 4620 { 4621 uint16_t device_id = bp->pdev->id.device_id; 4622 4623 switch (device_id) { 4624 case BROADCOM_DEV_ID_STRATUS_NIC: 4625 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4626 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4627 /* FALLTHROUGH */ 4628 return true; 4629 default: 4630 return false; 4631 } 4632 } 4633 4634 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4635 { 4636 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4637 struct bnxt *bp = eth_dev->data->dev_private; 4638 4639 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4640 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4641 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4642 if (!bp->bar0 || !bp->doorbell_base) { 4643 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4644 return -ENODEV; 4645 } 4646 4647 bp->eth_dev = eth_dev; 4648 bp->pdev = pci_dev; 4649 4650 return 0; 4651 } 4652 4653 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4654 struct bnxt_ctx_pg_info *ctx_pg, 4655 uint32_t mem_size, 4656 const char *suffix, 4657 uint16_t idx) 4658 { 4659 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4660 const struct rte_memzone *mz = NULL; 4661 char mz_name[RTE_MEMZONE_NAMESIZE]; 4662 rte_iova_t mz_phys_addr; 4663 uint64_t valid_bits = 0; 4664 uint32_t sz; 4665 int i; 4666 4667 if (!mem_size) 4668 return 0; 4669 4670 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4671 BNXT_PAGE_SIZE; 4672 rmem->page_size = BNXT_PAGE_SIZE; 4673 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4674 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4675 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4676 4677 valid_bits = PTU_PTE_VALID; 4678 4679 if (rmem->nr_pages > 1) { 4680 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4681 "bnxt_ctx_pg_tbl%s_%x_%d", 4682 suffix, idx, bp->eth_dev->data->port_id); 4683 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4684 mz = rte_memzone_lookup(mz_name); 4685 if (!mz) { 4686 mz = rte_memzone_reserve_aligned(mz_name, 4687 rmem->nr_pages * 8, 4688 bp->eth_dev->device->numa_node, 4689 RTE_MEMZONE_2MB | 4690 RTE_MEMZONE_SIZE_HINT_ONLY | 4691 RTE_MEMZONE_IOVA_CONTIG, 4692 BNXT_PAGE_SIZE); 4693 if (mz == NULL) 4694 return -ENOMEM; 4695 } 4696 4697 memset(mz->addr, 0, mz->len); 4698 mz_phys_addr = mz->iova; 4699 4700 rmem->pg_tbl = mz->addr; 4701 rmem->pg_tbl_map = mz_phys_addr; 4702 rmem->pg_tbl_mz = mz; 4703 } 4704 4705 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4706 suffix, idx, bp->eth_dev->data->port_id); 4707 mz = rte_memzone_lookup(mz_name); 4708 if (!mz) { 4709 mz = rte_memzone_reserve_aligned(mz_name, 4710 mem_size, 4711 bp->eth_dev->device->numa_node, 4712 RTE_MEMZONE_1GB | 4713 RTE_MEMZONE_SIZE_HINT_ONLY | 4714 RTE_MEMZONE_IOVA_CONTIG, 4715 BNXT_PAGE_SIZE); 4716 if (mz == NULL) 4717 return -ENOMEM; 4718 } 4719 4720 memset(mz->addr, 0, mz->len); 4721 mz_phys_addr = mz->iova; 4722 4723 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4724 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4725 rmem->dma_arr[i] = mz_phys_addr + sz; 4726 4727 if (rmem->nr_pages > 1) { 4728 if (i == rmem->nr_pages - 2 && 4729 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4730 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4731 else if (i == rmem->nr_pages - 1 && 4732 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4733 valid_bits |= PTU_PTE_LAST; 4734 4735 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4736 valid_bits); 4737 } 4738 } 4739 4740 rmem->mz = mz; 4741 if (rmem->vmem_size) 4742 rmem->vmem = (void **)mz->addr; 4743 rmem->dma_arr[0] = mz_phys_addr; 4744 return 0; 4745 } 4746 4747 static void bnxt_free_ctx_mem(struct bnxt *bp) 4748 { 4749 int i; 4750 4751 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4752 return; 4753 4754 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4755 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4756 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4757 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4758 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4759 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4760 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4761 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4762 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4763 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4764 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4765 4766 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4767 if (bp->ctx->tqm_mem[i]) 4768 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4769 } 4770 4771 rte_free(bp->ctx); 4772 bp->ctx = NULL; 4773 } 4774 4775 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4776 4777 #define min_t(type, x, y) ({ \ 4778 type __min1 = (x); \ 4779 type __min2 = (y); \ 4780 __min1 < __min2 ? __min1 : __min2; }) 4781 4782 #define max_t(type, x, y) ({ \ 4783 type __max1 = (x); \ 4784 type __max2 = (y); \ 4785 __max1 > __max2 ? __max1 : __max2; }) 4786 4787 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4788 4789 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4790 { 4791 struct bnxt_ctx_pg_info *ctx_pg; 4792 struct bnxt_ctx_mem_info *ctx; 4793 uint32_t mem_size, ena, entries; 4794 uint32_t entries_sp, min; 4795 int i, rc; 4796 4797 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4798 if (rc) { 4799 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4800 return rc; 4801 } 4802 ctx = bp->ctx; 4803 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4804 return 0; 4805 4806 ctx_pg = &ctx->qp_mem; 4807 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4808 if (ctx->qp_entry_size) { 4809 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4810 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4811 if (rc) 4812 return rc; 4813 } 4814 4815 ctx_pg = &ctx->srq_mem; 4816 ctx_pg->entries = ctx->srq_max_l2_entries; 4817 if (ctx->srq_entry_size) { 4818 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4819 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4820 if (rc) 4821 return rc; 4822 } 4823 4824 ctx_pg = &ctx->cq_mem; 4825 ctx_pg->entries = ctx->cq_max_l2_entries; 4826 if (ctx->cq_entry_size) { 4827 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4828 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4829 if (rc) 4830 return rc; 4831 } 4832 4833 ctx_pg = &ctx->vnic_mem; 4834 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4835 ctx->vnic_max_ring_table_entries; 4836 if (ctx->vnic_entry_size) { 4837 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4838 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4839 if (rc) 4840 return rc; 4841 } 4842 4843 ctx_pg = &ctx->stat_mem; 4844 ctx_pg->entries = ctx->stat_max_entries; 4845 if (ctx->stat_entry_size) { 4846 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4847 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4848 if (rc) 4849 return rc; 4850 } 4851 4852 min = ctx->tqm_min_entries_per_ring; 4853 4854 entries_sp = ctx->qp_max_l2_entries + 4855 ctx->vnic_max_vnic_entries + 4856 2 * ctx->qp_min_qp1_entries + min; 4857 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4858 4859 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4860 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4861 entries = clamp_t(uint32_t, entries, min, 4862 ctx->tqm_max_entries_per_ring); 4863 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4864 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4865 * i > 8 is other ext rings. 4866 */ 4867 ctx_pg = ctx->tqm_mem[i]; 4868 ctx_pg->entries = i ? entries : entries_sp; 4869 if (ctx->tqm_entry_size) { 4870 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4871 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4872 "tqm_mem", i); 4873 if (rc) 4874 return rc; 4875 } 4876 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4877 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4878 else 4879 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4880 } 4881 4882 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4883 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4884 if (rc) 4885 PMD_DRV_LOG(ERR, 4886 "Failed to configure context mem: rc = %d\n", rc); 4887 else 4888 ctx->flags |= BNXT_CTX_FLAG_INITED; 4889 4890 return rc; 4891 } 4892 4893 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4894 { 4895 struct rte_pci_device *pci_dev = bp->pdev; 4896 char mz_name[RTE_MEMZONE_NAMESIZE]; 4897 const struct rte_memzone *mz = NULL; 4898 uint32_t total_alloc_len; 4899 rte_iova_t mz_phys_addr; 4900 4901 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4902 return 0; 4903 4904 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4905 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4906 pci_dev->addr.bus, pci_dev->addr.devid, 4907 pci_dev->addr.function, "rx_port_stats"); 4908 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4909 mz = rte_memzone_lookup(mz_name); 4910 total_alloc_len = 4911 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4912 sizeof(struct rx_port_stats_ext) + 512); 4913 if (!mz) { 4914 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4915 SOCKET_ID_ANY, 4916 RTE_MEMZONE_2MB | 4917 RTE_MEMZONE_SIZE_HINT_ONLY | 4918 RTE_MEMZONE_IOVA_CONTIG); 4919 if (mz == NULL) 4920 return -ENOMEM; 4921 } 4922 memset(mz->addr, 0, mz->len); 4923 mz_phys_addr = mz->iova; 4924 4925 bp->rx_mem_zone = (const void *)mz; 4926 bp->hw_rx_port_stats = mz->addr; 4927 bp->hw_rx_port_stats_map = mz_phys_addr; 4928 4929 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4930 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4931 pci_dev->addr.bus, pci_dev->addr.devid, 4932 pci_dev->addr.function, "tx_port_stats"); 4933 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4934 mz = rte_memzone_lookup(mz_name); 4935 total_alloc_len = 4936 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4937 sizeof(struct tx_port_stats_ext) + 512); 4938 if (!mz) { 4939 mz = rte_memzone_reserve(mz_name, 4940 total_alloc_len, 4941 SOCKET_ID_ANY, 4942 RTE_MEMZONE_2MB | 4943 RTE_MEMZONE_SIZE_HINT_ONLY | 4944 RTE_MEMZONE_IOVA_CONTIG); 4945 if (mz == NULL) 4946 return -ENOMEM; 4947 } 4948 memset(mz->addr, 0, mz->len); 4949 mz_phys_addr = mz->iova; 4950 4951 bp->tx_mem_zone = (const void *)mz; 4952 bp->hw_tx_port_stats = mz->addr; 4953 bp->hw_tx_port_stats_map = mz_phys_addr; 4954 bp->flags |= BNXT_FLAG_PORT_STATS; 4955 4956 /* Display extended statistics if FW supports it */ 4957 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4958 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4959 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4960 return 0; 4961 4962 bp->hw_rx_port_stats_ext = (void *) 4963 ((uint8_t *)bp->hw_rx_port_stats + 4964 sizeof(struct rx_port_stats)); 4965 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4966 sizeof(struct rx_port_stats); 4967 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4968 4969 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4970 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4971 bp->hw_tx_port_stats_ext = (void *) 4972 ((uint8_t *)bp->hw_tx_port_stats + 4973 sizeof(struct tx_port_stats)); 4974 bp->hw_tx_port_stats_ext_map = 4975 bp->hw_tx_port_stats_map + 4976 sizeof(struct tx_port_stats); 4977 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 4978 } 4979 4980 return 0; 4981 } 4982 4983 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 4984 { 4985 struct bnxt *bp = eth_dev->data->dev_private; 4986 int rc = 0; 4987 4988 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 4989 RTE_ETHER_ADDR_LEN * 4990 bp->max_l2_ctx, 4991 0); 4992 if (eth_dev->data->mac_addrs == NULL) { 4993 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 4994 return -ENOMEM; 4995 } 4996 4997 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 4998 if (BNXT_PF(bp)) 4999 return -EINVAL; 5000 5001 /* Generate a random MAC address, if none was assigned by PF */ 5002 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5003 bnxt_eth_hw_addr_random(bp->mac_addr); 5004 PMD_DRV_LOG(INFO, 5005 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5006 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5007 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5008 5009 rc = bnxt_hwrm_set_mac(bp); 5010 if (rc) 5011 return rc; 5012 } 5013 5014 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5015 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5016 5017 return rc; 5018 } 5019 5020 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5021 { 5022 int rc = 0; 5023 5024 /* MAC is already configured in FW */ 5025 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5026 return 0; 5027 5028 /* Restore the old MAC configured */ 5029 rc = bnxt_hwrm_set_mac(bp); 5030 if (rc) 5031 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5032 5033 return rc; 5034 } 5035 5036 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5037 { 5038 if (!BNXT_PF(bp)) 5039 return; 5040 5041 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5042 5043 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5044 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5045 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5046 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5047 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5048 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5049 } 5050 5051 struct bnxt * 5052 bnxt_get_bp(uint16_t port) 5053 { 5054 struct bnxt *bp; 5055 struct rte_eth_dev *dev; 5056 5057 if (!rte_eth_dev_is_valid_port(port)) { 5058 PMD_DRV_LOG(ERR, "Invalid port %d\n", port); 5059 return NULL; 5060 } 5061 5062 dev = &rte_eth_devices[port]; 5063 if (!is_bnxt_supported(dev)) { 5064 PMD_DRV_LOG(ERR, "Device %d not supported\n", port); 5065 return NULL; 5066 } 5067 5068 bp = (struct bnxt *)dev->data->dev_private; 5069 if (!BNXT_TRUFLOW_EN(bp)) { 5070 PMD_DRV_LOG(ERR, "TRUFLOW not enabled\n"); 5071 return NULL; 5072 } 5073 5074 return bp; 5075 } 5076 5077 uint16_t 5078 bnxt_get_svif(uint16_t port_id, bool func_svif, 5079 enum bnxt_ulp_intf_type type) 5080 { 5081 struct rte_eth_dev *eth_dev; 5082 struct bnxt *bp; 5083 5084 eth_dev = &rte_eth_devices[port_id]; 5085 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5086 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5087 if (!vfr) 5088 return 0; 5089 5090 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5091 return vfr->svif; 5092 5093 eth_dev = vfr->parent_dev; 5094 } 5095 5096 bp = eth_dev->data->dev_private; 5097 5098 return func_svif ? bp->func_svif : bp->port_svif; 5099 } 5100 5101 void 5102 bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type, 5103 uint8_t *mac, uint8_t *parent_mac) 5104 { 5105 struct rte_eth_dev *eth_dev; 5106 struct bnxt *bp; 5107 5108 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF && 5109 type != BNXT_ULP_INTF_TYPE_PF) 5110 return; 5111 5112 eth_dev = &rte_eth_devices[port]; 5113 bp = eth_dev->data->dev_private; 5114 memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN); 5115 5116 if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5117 memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN); 5118 } 5119 5120 uint16_t 5121 bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5122 { 5123 struct rte_eth_dev *eth_dev; 5124 struct bnxt *bp; 5125 5126 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5127 return 0; 5128 5129 eth_dev = &rte_eth_devices[port]; 5130 bp = eth_dev->data->dev_private; 5131 5132 return bp->parent->vnic; 5133 } 5134 uint16_t 5135 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5136 { 5137 struct rte_eth_dev *eth_dev; 5138 struct bnxt_vnic_info *vnic; 5139 struct bnxt *bp; 5140 5141 eth_dev = &rte_eth_devices[port]; 5142 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5143 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5144 if (!vfr) 5145 return 0; 5146 5147 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5148 return vfr->dflt_vnic_id; 5149 5150 eth_dev = vfr->parent_dev; 5151 } 5152 5153 bp = eth_dev->data->dev_private; 5154 5155 vnic = BNXT_GET_DEFAULT_VNIC(bp); 5156 5157 return vnic->fw_vnic_id; 5158 } 5159 5160 uint16_t 5161 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 5162 { 5163 struct rte_eth_dev *eth_dev; 5164 struct bnxt *bp; 5165 5166 eth_dev = &rte_eth_devices[port]; 5167 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5168 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5169 if (!vfr) 5170 return 0; 5171 5172 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5173 return vfr->fw_fid; 5174 5175 eth_dev = vfr->parent_dev; 5176 } 5177 5178 bp = eth_dev->data->dev_private; 5179 5180 return bp->fw_fid; 5181 } 5182 5183 enum bnxt_ulp_intf_type 5184 bnxt_get_interface_type(uint16_t port) 5185 { 5186 struct rte_eth_dev *eth_dev; 5187 struct bnxt *bp; 5188 5189 eth_dev = &rte_eth_devices[port]; 5190 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 5191 return BNXT_ULP_INTF_TYPE_VF_REP; 5192 5193 bp = eth_dev->data->dev_private; 5194 if (BNXT_PF(bp)) 5195 return BNXT_ULP_INTF_TYPE_PF; 5196 else if (BNXT_VF_IS_TRUSTED(bp)) 5197 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 5198 else if (BNXT_VF(bp)) 5199 return BNXT_ULP_INTF_TYPE_VF; 5200 5201 return BNXT_ULP_INTF_TYPE_INVALID; 5202 } 5203 5204 uint16_t 5205 bnxt_get_phy_port_id(uint16_t port_id) 5206 { 5207 struct bnxt_representor *vfr; 5208 struct rte_eth_dev *eth_dev; 5209 struct bnxt *bp; 5210 5211 eth_dev = &rte_eth_devices[port_id]; 5212 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5213 vfr = eth_dev->data->dev_private; 5214 if (!vfr) 5215 return 0; 5216 5217 eth_dev = vfr->parent_dev; 5218 } 5219 5220 bp = eth_dev->data->dev_private; 5221 5222 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 5223 } 5224 5225 uint16_t 5226 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 5227 { 5228 struct rte_eth_dev *eth_dev; 5229 struct bnxt *bp; 5230 5231 eth_dev = &rte_eth_devices[port_id]; 5232 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5233 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5234 if (!vfr) 5235 return 0; 5236 5237 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5238 return vfr->fw_fid - 1; 5239 5240 eth_dev = vfr->parent_dev; 5241 } 5242 5243 bp = eth_dev->data->dev_private; 5244 5245 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 5246 } 5247 5248 uint16_t 5249 bnxt_get_vport(uint16_t port_id) 5250 { 5251 return (1 << bnxt_get_phy_port_id(port_id)); 5252 } 5253 5254 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5255 { 5256 struct bnxt_error_recovery_info *info = bp->recovery_info; 5257 5258 if (info) { 5259 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5260 memset(info, 0, sizeof(*info)); 5261 return; 5262 } 5263 5264 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5265 return; 5266 5267 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5268 sizeof(*info), 0); 5269 if (!info) 5270 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5271 5272 bp->recovery_info = info; 5273 } 5274 5275 static void bnxt_check_fw_status(struct bnxt *bp) 5276 { 5277 uint32_t fw_status; 5278 5279 if (!(bp->recovery_info && 5280 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5281 return; 5282 5283 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5284 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5285 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5286 fw_status); 5287 } 5288 5289 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5290 { 5291 struct bnxt_error_recovery_info *info = bp->recovery_info; 5292 uint32_t status_loc; 5293 uint32_t sig_ver; 5294 5295 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5296 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5297 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5298 BNXT_GRCP_WINDOW_2_BASE + 5299 offsetof(struct hcomm_status, 5300 sig_ver))); 5301 /* If the signature is absent, then FW does not support this feature */ 5302 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5303 HCOMM_STATUS_SIGNATURE_VAL) 5304 return 0; 5305 5306 if (!info) { 5307 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5308 sizeof(*info), 0); 5309 if (!info) 5310 return -ENOMEM; 5311 bp->recovery_info = info; 5312 } else { 5313 memset(info, 0, sizeof(*info)); 5314 } 5315 5316 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5317 BNXT_GRCP_WINDOW_2_BASE + 5318 offsetof(struct hcomm_status, 5319 fw_status_loc))); 5320 5321 /* Only pre-map the FW health status GRC register */ 5322 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5323 return 0; 5324 5325 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5326 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5327 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5328 5329 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5330 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5331 5332 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5333 5334 return 0; 5335 } 5336 5337 /* This function gets the FW version along with the 5338 * capabilities(MAX and current) of the function, vnic, 5339 * error recovery, phy and other chip related info 5340 */ 5341 static int bnxt_get_config(struct bnxt *bp) 5342 { 5343 uint16_t mtu; 5344 int rc = 0; 5345 5346 bp->fw_cap = 0; 5347 5348 rc = bnxt_map_hcomm_fw_status_reg(bp); 5349 if (rc) 5350 return rc; 5351 5352 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5353 if (rc) { 5354 bnxt_check_fw_status(bp); 5355 return rc; 5356 } 5357 5358 rc = bnxt_hwrm_func_reset(bp); 5359 if (rc) 5360 return -EIO; 5361 5362 rc = bnxt_hwrm_vnic_qcaps(bp); 5363 if (rc) 5364 return rc; 5365 5366 rc = bnxt_hwrm_queue_qportcfg(bp); 5367 if (rc) 5368 return rc; 5369 5370 /* Get the MAX capabilities for this function. 5371 * This function also allocates context memory for TQM rings and 5372 * informs the firmware about this allocated backing store memory. 5373 */ 5374 rc = bnxt_hwrm_func_qcaps(bp); 5375 if (rc) 5376 return rc; 5377 5378 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5379 if (rc) 5380 return rc; 5381 5382 rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); 5383 if (rc) 5384 return rc; 5385 5386 bnxt_hwrm_port_mac_qcfg(bp); 5387 5388 bnxt_hwrm_parent_pf_qcfg(bp); 5389 5390 bnxt_hwrm_port_phy_qcaps(bp); 5391 5392 bnxt_alloc_error_recovery_info(bp); 5393 /* Get the adapter error recovery support info */ 5394 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5395 if (rc) 5396 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5397 5398 bnxt_hwrm_port_led_qcaps(bp); 5399 5400 return 0; 5401 } 5402 5403 static int 5404 bnxt_init_locks(struct bnxt *bp) 5405 { 5406 int err; 5407 5408 err = pthread_mutex_init(&bp->flow_lock, NULL); 5409 if (err) { 5410 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5411 return err; 5412 } 5413 5414 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5415 if (err) { 5416 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5417 return err; 5418 } 5419 5420 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5421 if (err) { 5422 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5423 return err; 5424 } 5425 5426 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5427 if (err) 5428 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5429 5430 return err; 5431 } 5432 5433 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5434 { 5435 int rc = 0; 5436 5437 rc = bnxt_get_config(bp); 5438 if (rc) 5439 return rc; 5440 5441 if (!reconfig_dev) { 5442 rc = bnxt_setup_mac_addr(bp->eth_dev); 5443 if (rc) 5444 return rc; 5445 } else { 5446 rc = bnxt_restore_dflt_mac(bp); 5447 if (rc) 5448 return rc; 5449 } 5450 5451 bnxt_config_vf_req_fwd(bp); 5452 5453 rc = bnxt_hwrm_func_driver_register(bp); 5454 if (rc) { 5455 PMD_DRV_LOG(ERR, "Failed to register driver"); 5456 return -EBUSY; 5457 } 5458 5459 if (BNXT_PF(bp)) { 5460 if (bp->pdev->max_vfs) { 5461 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5462 if (rc) { 5463 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5464 return rc; 5465 } 5466 } else { 5467 rc = bnxt_hwrm_allocate_pf_only(bp); 5468 if (rc) { 5469 PMD_DRV_LOG(ERR, 5470 "Failed to allocate PF resources"); 5471 return rc; 5472 } 5473 } 5474 } 5475 5476 rc = bnxt_alloc_mem(bp, reconfig_dev); 5477 if (rc) 5478 return rc; 5479 5480 rc = bnxt_setup_int(bp); 5481 if (rc) 5482 return rc; 5483 5484 rc = bnxt_request_int(bp); 5485 if (rc) 5486 return rc; 5487 5488 rc = bnxt_init_ctx_mem(bp); 5489 if (rc) { 5490 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5491 return rc; 5492 } 5493 5494 return 0; 5495 } 5496 5497 static int 5498 bnxt_parse_devarg_accum_stats(__rte_unused const char *key, 5499 const char *value, void *opaque_arg) 5500 { 5501 struct bnxt *bp = opaque_arg; 5502 unsigned long accum_stats; 5503 char *end = NULL; 5504 5505 if (!value || !opaque_arg) { 5506 PMD_DRV_LOG(ERR, 5507 "Invalid parameter passed to accum-stats devargs.\n"); 5508 return -EINVAL; 5509 } 5510 5511 accum_stats = strtoul(value, &end, 10); 5512 if (end == NULL || *end != '\0' || 5513 (accum_stats == ULONG_MAX && errno == ERANGE)) { 5514 PMD_DRV_LOG(ERR, 5515 "Invalid parameter passed to accum-stats devargs.\n"); 5516 return -EINVAL; 5517 } 5518 5519 if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) { 5520 PMD_DRV_LOG(ERR, 5521 "Invalid value passed to accum-stats devargs.\n"); 5522 return -EINVAL; 5523 } 5524 5525 if (accum_stats) { 5526 bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN; 5527 PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n"); 5528 } else { 5529 bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN; 5530 PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n"); 5531 } 5532 5533 return 0; 5534 } 5535 5536 static int 5537 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5538 const char *value, void *opaque_arg) 5539 { 5540 struct bnxt *bp = opaque_arg; 5541 unsigned long flow_xstat; 5542 char *end = NULL; 5543 5544 if (!value || !opaque_arg) { 5545 PMD_DRV_LOG(ERR, 5546 "Invalid parameter passed to flow_xstat devarg.\n"); 5547 return -EINVAL; 5548 } 5549 5550 flow_xstat = strtoul(value, &end, 10); 5551 if (end == NULL || *end != '\0' || 5552 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5553 PMD_DRV_LOG(ERR, 5554 "Invalid parameter passed to flow_xstat devarg.\n"); 5555 return -EINVAL; 5556 } 5557 5558 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5559 PMD_DRV_LOG(ERR, 5560 "Invalid value passed to flow_xstat devarg.\n"); 5561 return -EINVAL; 5562 } 5563 5564 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5565 if (BNXT_FLOW_XSTATS_EN(bp)) 5566 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5567 5568 return 0; 5569 } 5570 5571 static int 5572 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5573 const char *value, void *opaque_arg) 5574 { 5575 struct bnxt *bp = opaque_arg; 5576 unsigned long max_num_kflows; 5577 char *end = NULL; 5578 5579 if (!value || !opaque_arg) { 5580 PMD_DRV_LOG(ERR, 5581 "Invalid parameter passed to max_num_kflows devarg.\n"); 5582 return -EINVAL; 5583 } 5584 5585 max_num_kflows = strtoul(value, &end, 10); 5586 if (end == NULL || *end != '\0' || 5587 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5588 PMD_DRV_LOG(ERR, 5589 "Invalid parameter passed to max_num_kflows devarg.\n"); 5590 return -EINVAL; 5591 } 5592 5593 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5594 PMD_DRV_LOG(ERR, 5595 "Invalid value passed to max_num_kflows devarg.\n"); 5596 return -EINVAL; 5597 } 5598 5599 bp->max_num_kflows = max_num_kflows; 5600 if (bp->max_num_kflows) 5601 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5602 max_num_kflows); 5603 5604 return 0; 5605 } 5606 5607 static int 5608 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5609 const char *value, void *opaque_arg) 5610 { 5611 struct bnxt *bp = opaque_arg; 5612 unsigned long app_id; 5613 char *end = NULL; 5614 5615 if (!value || !opaque_arg) { 5616 PMD_DRV_LOG(ERR, 5617 "Invalid parameter passed to app-id " 5618 "devargs.\n"); 5619 return -EINVAL; 5620 } 5621 5622 app_id = strtoul(value, &end, 10); 5623 if (end == NULL || *end != '\0' || 5624 (app_id == ULONG_MAX && errno == ERANGE)) { 5625 PMD_DRV_LOG(ERR, 5626 "Invalid parameter passed to app_id " 5627 "devargs.\n"); 5628 return -EINVAL; 5629 } 5630 5631 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5632 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5633 (uint16_t)app_id); 5634 return -EINVAL; 5635 } 5636 5637 bp->app_id = app_id; 5638 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5639 5640 return 0; 5641 } 5642 5643 static int 5644 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5645 const char *value, void *opaque_arg) 5646 { 5647 struct bnxt_representor *vfr_bp = opaque_arg; 5648 unsigned long rep_is_pf; 5649 char *end = NULL; 5650 5651 if (!value || !opaque_arg) { 5652 PMD_DRV_LOG(ERR, 5653 "Invalid parameter passed to rep_is_pf devargs.\n"); 5654 return -EINVAL; 5655 } 5656 5657 rep_is_pf = strtoul(value, &end, 10); 5658 if (end == NULL || *end != '\0' || 5659 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5660 PMD_DRV_LOG(ERR, 5661 "Invalid parameter passed to rep_is_pf devargs.\n"); 5662 return -EINVAL; 5663 } 5664 5665 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5666 PMD_DRV_LOG(ERR, 5667 "Invalid value passed to rep_is_pf devargs.\n"); 5668 return -EINVAL; 5669 } 5670 5671 vfr_bp->flags |= rep_is_pf; 5672 if (BNXT_REP_PF(vfr_bp)) 5673 PMD_DRV_LOG(INFO, "PF representor\n"); 5674 else 5675 PMD_DRV_LOG(INFO, "VF representor\n"); 5676 5677 return 0; 5678 } 5679 5680 static int 5681 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5682 const char *value, void *opaque_arg) 5683 { 5684 struct bnxt_representor *vfr_bp = opaque_arg; 5685 unsigned long rep_based_pf; 5686 char *end = NULL; 5687 5688 if (!value || !opaque_arg) { 5689 PMD_DRV_LOG(ERR, 5690 "Invalid parameter passed to rep_based_pf " 5691 "devargs.\n"); 5692 return -EINVAL; 5693 } 5694 5695 rep_based_pf = strtoul(value, &end, 10); 5696 if (end == NULL || *end != '\0' || 5697 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5698 PMD_DRV_LOG(ERR, 5699 "Invalid parameter passed to rep_based_pf " 5700 "devargs.\n"); 5701 return -EINVAL; 5702 } 5703 5704 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5705 PMD_DRV_LOG(ERR, 5706 "Invalid value passed to rep_based_pf devargs.\n"); 5707 return -EINVAL; 5708 } 5709 5710 vfr_bp->rep_based_pf = rep_based_pf; 5711 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5712 5713 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5714 5715 return 0; 5716 } 5717 5718 static int 5719 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5720 const char *value, void *opaque_arg) 5721 { 5722 struct bnxt_representor *vfr_bp = opaque_arg; 5723 unsigned long rep_q_r2f; 5724 char *end = NULL; 5725 5726 if (!value || !opaque_arg) { 5727 PMD_DRV_LOG(ERR, 5728 "Invalid parameter passed to rep_q_r2f " 5729 "devargs.\n"); 5730 return -EINVAL; 5731 } 5732 5733 rep_q_r2f = strtoul(value, &end, 10); 5734 if (end == NULL || *end != '\0' || 5735 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5736 PMD_DRV_LOG(ERR, 5737 "Invalid parameter passed to rep_q_r2f " 5738 "devargs.\n"); 5739 return -EINVAL; 5740 } 5741 5742 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5743 PMD_DRV_LOG(ERR, 5744 "Invalid value passed to rep_q_r2f devargs.\n"); 5745 return -EINVAL; 5746 } 5747 5748 vfr_bp->rep_q_r2f = rep_q_r2f; 5749 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5750 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5751 5752 return 0; 5753 } 5754 5755 static int 5756 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5757 const char *value, void *opaque_arg) 5758 { 5759 struct bnxt_representor *vfr_bp = opaque_arg; 5760 unsigned long rep_q_f2r; 5761 char *end = NULL; 5762 5763 if (!value || !opaque_arg) { 5764 PMD_DRV_LOG(ERR, 5765 "Invalid parameter passed to rep_q_f2r " 5766 "devargs.\n"); 5767 return -EINVAL; 5768 } 5769 5770 rep_q_f2r = strtoul(value, &end, 10); 5771 if (end == NULL || *end != '\0' || 5772 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5773 PMD_DRV_LOG(ERR, 5774 "Invalid parameter passed to rep_q_f2r " 5775 "devargs.\n"); 5776 return -EINVAL; 5777 } 5778 5779 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5780 PMD_DRV_LOG(ERR, 5781 "Invalid value passed to rep_q_f2r devargs.\n"); 5782 return -EINVAL; 5783 } 5784 5785 vfr_bp->rep_q_f2r = rep_q_f2r; 5786 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5787 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5788 5789 return 0; 5790 } 5791 5792 static int 5793 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5794 const char *value, void *opaque_arg) 5795 { 5796 struct bnxt_representor *vfr_bp = opaque_arg; 5797 unsigned long rep_fc_r2f; 5798 char *end = NULL; 5799 5800 if (!value || !opaque_arg) { 5801 PMD_DRV_LOG(ERR, 5802 "Invalid parameter passed to rep_fc_r2f " 5803 "devargs.\n"); 5804 return -EINVAL; 5805 } 5806 5807 rep_fc_r2f = strtoul(value, &end, 10); 5808 if (end == NULL || *end != '\0' || 5809 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5810 PMD_DRV_LOG(ERR, 5811 "Invalid parameter passed to rep_fc_r2f " 5812 "devargs.\n"); 5813 return -EINVAL; 5814 } 5815 5816 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5817 PMD_DRV_LOG(ERR, 5818 "Invalid value passed to rep_fc_r2f devargs.\n"); 5819 return -EINVAL; 5820 } 5821 5822 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5823 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5824 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5825 5826 return 0; 5827 } 5828 5829 static int 5830 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5831 const char *value, void *opaque_arg) 5832 { 5833 struct bnxt_representor *vfr_bp = opaque_arg; 5834 unsigned long rep_fc_f2r; 5835 char *end = NULL; 5836 5837 if (!value || !opaque_arg) { 5838 PMD_DRV_LOG(ERR, 5839 "Invalid parameter passed to rep_fc_f2r " 5840 "devargs.\n"); 5841 return -EINVAL; 5842 } 5843 5844 rep_fc_f2r = strtoul(value, &end, 10); 5845 if (end == NULL || *end != '\0' || 5846 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5847 PMD_DRV_LOG(ERR, 5848 "Invalid parameter passed to rep_fc_f2r " 5849 "devargs.\n"); 5850 return -EINVAL; 5851 } 5852 5853 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5854 PMD_DRV_LOG(ERR, 5855 "Invalid value passed to rep_fc_f2r devargs.\n"); 5856 return -EINVAL; 5857 } 5858 5859 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5860 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5861 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5862 5863 return 0; 5864 } 5865 5866 static int 5867 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5868 { 5869 struct rte_kvargs *kvlist; 5870 int ret; 5871 5872 if (devargs == NULL) 5873 return 0; 5874 5875 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5876 if (kvlist == NULL) 5877 return -EINVAL; 5878 5879 /* 5880 * Handler for "flow_xstat" devarg. 5881 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5882 */ 5883 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5884 bnxt_parse_devarg_flow_xstat, bp); 5885 if (ret) 5886 goto err; 5887 5888 /* 5889 * Handler for "accum-stats" devarg. 5890 * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1" 5891 */ 5892 rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS, 5893 bnxt_parse_devarg_accum_stats, bp); 5894 /* 5895 * Handler for "max_num_kflows" devarg. 5896 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5897 */ 5898 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5899 bnxt_parse_devarg_max_num_kflows, bp); 5900 if (ret) 5901 goto err; 5902 5903 err: 5904 /* 5905 * Handler for "app-id" devarg. 5906 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5907 */ 5908 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5909 bnxt_parse_devarg_app_id, bp); 5910 5911 rte_kvargs_free(kvlist); 5912 return ret; 5913 } 5914 5915 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5916 { 5917 int rc = 0; 5918 5919 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5920 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5921 if (rc) 5922 PMD_DRV_LOG(ERR, 5923 "Failed to alloc switch domain: %d\n", rc); 5924 else 5925 PMD_DRV_LOG(INFO, 5926 "Switch domain allocated %d\n", 5927 bp->switch_domain_id); 5928 } 5929 5930 return rc; 5931 } 5932 5933 /* Allocate and initialize various fields in bnxt struct that 5934 * need to be allocated/destroyed only once in the lifetime of the driver 5935 */ 5936 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5937 { 5938 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5939 struct bnxt *bp = eth_dev->data->dev_private; 5940 int rc = 0; 5941 5942 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5943 5944 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5945 bp->flags |= BNXT_FLAG_VF; 5946 5947 if (bnxt_p5_device(pci_dev->id.device_id)) 5948 bp->flags |= BNXT_FLAG_CHIP_P5; 5949 5950 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5951 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5952 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5953 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5954 bp->flags |= BNXT_FLAG_STINGRAY; 5955 5956 if (BNXT_TRUFLOW_EN(bp)) { 5957 /* extra mbuf field is required to store CFA code from mark */ 5958 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5959 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5960 .size = sizeof(bnxt_cfa_code_dynfield_t), 5961 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5962 }; 5963 bnxt_cfa_code_dynfield_offset = 5964 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5965 if (bnxt_cfa_code_dynfield_offset < 0) { 5966 PMD_DRV_LOG(ERR, 5967 "Failed to register mbuf field for TruFlow mark\n"); 5968 return -rte_errno; 5969 } 5970 } 5971 5972 rc = bnxt_map_pci_bars(eth_dev); 5973 if (rc) { 5974 PMD_DRV_LOG(ERR, 5975 "Failed to initialize board rc: %x\n", rc); 5976 return rc; 5977 } 5978 5979 rc = bnxt_alloc_pf_info(bp); 5980 if (rc) 5981 return rc; 5982 5983 rc = bnxt_alloc_link_info(bp); 5984 if (rc) 5985 return rc; 5986 5987 rc = bnxt_alloc_parent_info(bp); 5988 if (rc) 5989 return rc; 5990 5991 rc = bnxt_alloc_hwrm_resources(bp); 5992 if (rc) { 5993 PMD_DRV_LOG(ERR, 5994 "Failed to allocate response buffer rc: %x\n", rc); 5995 return rc; 5996 } 5997 rc = bnxt_alloc_leds_info(bp); 5998 if (rc) 5999 return rc; 6000 6001 rc = bnxt_alloc_cos_queues(bp); 6002 if (rc) 6003 return rc; 6004 6005 rc = bnxt_init_locks(bp); 6006 if (rc) 6007 return rc; 6008 6009 rc = bnxt_alloc_switch_domain(bp); 6010 if (rc) 6011 return rc; 6012 6013 return rc; 6014 } 6015 6016 static int 6017 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 6018 { 6019 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 6020 static int version_printed; 6021 struct bnxt *bp; 6022 int rc; 6023 6024 if (version_printed++ == 0) 6025 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 6026 6027 eth_dev->dev_ops = &bnxt_dev_ops; 6028 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 6029 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 6030 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 6031 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 6032 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 6033 6034 /* 6035 * For secondary processes, we don't initialise any further 6036 * as primary has already done this work. 6037 */ 6038 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6039 return 0; 6040 6041 rte_eth_copy_pci_info(eth_dev, pci_dev); 6042 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 6043 6044 bp = eth_dev->data->dev_private; 6045 6046 /* Parse dev arguments passed on when starting the DPDK application. */ 6047 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 6048 if (rc) 6049 goto error_free; 6050 6051 rc = bnxt_drv_init(eth_dev); 6052 if (rc) 6053 goto error_free; 6054 6055 rc = bnxt_init_resources(bp, false); 6056 if (rc) 6057 goto error_free; 6058 6059 rc = bnxt_alloc_stats_mem(bp); 6060 if (rc) 6061 goto error_free; 6062 6063 PMD_DRV_LOG(INFO, 6064 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 6065 DRV_MODULE_NAME, 6066 pci_dev->mem_resource[0].phys_addr, 6067 pci_dev->mem_resource[0].addr); 6068 6069 return 0; 6070 6071 error_free: 6072 bnxt_dev_uninit(eth_dev); 6073 return rc; 6074 } 6075 6076 6077 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 6078 { 6079 if (!ctx) 6080 return; 6081 6082 if (ctx->va) 6083 rte_free(ctx->va); 6084 6085 ctx->va = NULL; 6086 ctx->dma = RTE_BAD_IOVA; 6087 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6088 } 6089 6090 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6091 { 6092 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6093 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6094 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6095 bp->flow_stat->max_fc, 6096 false); 6097 6098 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6099 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6100 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6101 bp->flow_stat->max_fc, 6102 false); 6103 6104 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6105 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6106 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6107 6108 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6109 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6110 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6111 6112 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6113 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6114 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6115 6116 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6117 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6118 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6119 } 6120 6121 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6122 { 6123 bnxt_unregister_fc_ctx_mem(bp); 6124 6125 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6126 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6127 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6128 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6129 } 6130 6131 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6132 { 6133 if (BNXT_FLOW_XSTATS_EN(bp)) 6134 bnxt_uninit_fc_ctx_mem(bp); 6135 } 6136 6137 static void 6138 bnxt_free_error_recovery_info(struct bnxt *bp) 6139 { 6140 rte_free(bp->recovery_info); 6141 bp->recovery_info = NULL; 6142 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6143 } 6144 6145 static int 6146 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6147 { 6148 int rc; 6149 6150 bnxt_free_int(bp); 6151 bnxt_free_mem(bp, reconfig_dev); 6152 6153 bnxt_hwrm_func_buf_unrgtr(bp); 6154 if (bp->pf != NULL) { 6155 rte_free(bp->pf->vf_req_buf); 6156 bp->pf->vf_req_buf = NULL; 6157 } 6158 6159 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 6160 bp->flags &= ~BNXT_FLAG_REGISTERED; 6161 bnxt_free_ctx_mem(bp); 6162 if (!reconfig_dev) { 6163 bnxt_free_hwrm_resources(bp); 6164 bnxt_free_error_recovery_info(bp); 6165 } 6166 6167 bnxt_uninit_ctx_mem(bp); 6168 6169 bnxt_free_flow_stats_info(bp); 6170 if (bp->rep_info != NULL) 6171 bnxt_free_switch_domain(bp); 6172 bnxt_free_rep_info(bp); 6173 rte_free(bp->ptp_cfg); 6174 bp->ptp_cfg = NULL; 6175 return rc; 6176 } 6177 6178 static int 6179 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6180 { 6181 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6182 return -EPERM; 6183 6184 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6185 6186 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6187 bnxt_dev_close_op(eth_dev); 6188 6189 return 0; 6190 } 6191 6192 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6193 { 6194 struct bnxt *bp = eth_dev->data->dev_private; 6195 struct rte_eth_dev *vf_rep_eth_dev; 6196 int ret = 0, i; 6197 6198 if (!bp) 6199 return -EINVAL; 6200 6201 for (i = 0; i < bp->num_reps; i++) { 6202 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6203 if (!vf_rep_eth_dev) 6204 continue; 6205 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6206 vf_rep_eth_dev->data->port_id); 6207 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6208 } 6209 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6210 eth_dev->data->port_id); 6211 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6212 6213 return ret; 6214 } 6215 6216 static void bnxt_free_rep_info(struct bnxt *bp) 6217 { 6218 rte_free(bp->rep_info); 6219 bp->rep_info = NULL; 6220 rte_free(bp->cfa_code_map); 6221 bp->cfa_code_map = NULL; 6222 } 6223 6224 static int bnxt_init_rep_info(struct bnxt *bp) 6225 { 6226 int i = 0, rc; 6227 6228 if (bp->rep_info) 6229 return 0; 6230 6231 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6232 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 6233 0); 6234 if (!bp->rep_info) { 6235 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6236 return -ENOMEM; 6237 } 6238 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6239 sizeof(*bp->cfa_code_map) * 6240 BNXT_MAX_CFA_CODE, 0); 6241 if (!bp->cfa_code_map) { 6242 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6243 bnxt_free_rep_info(bp); 6244 return -ENOMEM; 6245 } 6246 6247 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6248 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6249 6250 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6251 if (rc) { 6252 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6253 bnxt_free_rep_info(bp); 6254 return rc; 6255 } 6256 6257 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6258 if (rc) { 6259 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6260 bnxt_free_rep_info(bp); 6261 return rc; 6262 } 6263 6264 return rc; 6265 } 6266 6267 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6268 struct rte_eth_devargs *eth_da, 6269 struct rte_eth_dev *backing_eth_dev, 6270 const char *dev_args) 6271 { 6272 struct rte_eth_dev *vf_rep_eth_dev; 6273 char name[RTE_ETH_NAME_MAX_LEN]; 6274 struct bnxt *backing_bp; 6275 uint16_t num_rep; 6276 int i, ret = 0; 6277 struct rte_kvargs *kvlist = NULL; 6278 6279 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6280 return 0; 6281 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6282 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6283 eth_da->type); 6284 return -ENOTSUP; 6285 } 6286 num_rep = eth_da->nb_representor_ports; 6287 if (num_rep > BNXT_MAX_VF_REPS) { 6288 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6289 num_rep, BNXT_MAX_VF_REPS); 6290 return -EINVAL; 6291 } 6292 6293 if (num_rep >= RTE_MAX_ETHPORTS) { 6294 PMD_DRV_LOG(ERR, 6295 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6296 num_rep, RTE_MAX_ETHPORTS); 6297 return -EINVAL; 6298 } 6299 6300 backing_bp = backing_eth_dev->data->dev_private; 6301 6302 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6303 PMD_DRV_LOG(ERR, 6304 "Not a PF or trusted VF. No Representor support\n"); 6305 /* Returning an error is not an option. 6306 * Applications are not handling this correctly 6307 */ 6308 return 0; 6309 } 6310 6311 if (bnxt_init_rep_info(backing_bp)) 6312 return 0; 6313 6314 for (i = 0; i < num_rep; i++) { 6315 struct bnxt_representor representor = { 6316 .vf_id = eth_da->representor_ports[i], 6317 .switch_domain_id = backing_bp->switch_domain_id, 6318 .parent_dev = backing_eth_dev 6319 }; 6320 6321 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 6322 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6323 representor.vf_id, BNXT_MAX_VF_REPS); 6324 continue; 6325 } 6326 6327 /* representor port net_bdf_port */ 6328 snprintf(name, sizeof(name), "net_%s_representor_%d", 6329 pci_dev->device.name, eth_da->representor_ports[i]); 6330 6331 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6332 if (kvlist) { 6333 /* 6334 * Handler for "rep_is_pf" devarg. 6335 * Invoked as for ex: "-a 000:00:0d.0, 6336 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6337 */ 6338 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6339 bnxt_parse_devarg_rep_is_pf, 6340 (void *)&representor); 6341 if (ret) { 6342 ret = -EINVAL; 6343 goto err; 6344 } 6345 /* 6346 * Handler for "rep_based_pf" devarg. 6347 * Invoked as for ex: "-a 000:00:0d.0, 6348 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6349 */ 6350 ret = rte_kvargs_process(kvlist, 6351 BNXT_DEVARG_REP_BASED_PF, 6352 bnxt_parse_devarg_rep_based_pf, 6353 (void *)&representor); 6354 if (ret) { 6355 ret = -EINVAL; 6356 goto err; 6357 } 6358 /* 6359 * Handler for "rep_based_pf" devarg. 6360 * Invoked as for ex: "-a 000:00:0d.0, 6361 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6362 */ 6363 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6364 bnxt_parse_devarg_rep_q_r2f, 6365 (void *)&representor); 6366 if (ret) { 6367 ret = -EINVAL; 6368 goto err; 6369 } 6370 /* 6371 * Handler for "rep_based_pf" devarg. 6372 * Invoked as for ex: "-a 000:00:0d.0, 6373 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6374 */ 6375 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6376 bnxt_parse_devarg_rep_q_f2r, 6377 (void *)&representor); 6378 if (ret) { 6379 ret = -EINVAL; 6380 goto err; 6381 } 6382 /* 6383 * Handler for "rep_based_pf" devarg. 6384 * Invoked as for ex: "-a 000:00:0d.0, 6385 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6386 */ 6387 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6388 bnxt_parse_devarg_rep_fc_r2f, 6389 (void *)&representor); 6390 if (ret) { 6391 ret = -EINVAL; 6392 goto err; 6393 } 6394 /* 6395 * Handler for "rep_based_pf" devarg. 6396 * Invoked as for ex: "-a 000:00:0d.0, 6397 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6398 */ 6399 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6400 bnxt_parse_devarg_rep_fc_f2r, 6401 (void *)&representor); 6402 if (ret) { 6403 ret = -EINVAL; 6404 goto err; 6405 } 6406 } 6407 6408 ret = rte_eth_dev_create(&pci_dev->device, name, 6409 sizeof(struct bnxt_representor), 6410 NULL, NULL, 6411 bnxt_representor_init, 6412 &representor); 6413 if (ret) { 6414 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6415 "representor %s.", name); 6416 goto err; 6417 } 6418 6419 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6420 if (!vf_rep_eth_dev) { 6421 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6422 " for VF-Rep: %s.", name); 6423 ret = -ENODEV; 6424 goto err; 6425 } 6426 6427 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6428 backing_eth_dev->data->port_id); 6429 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6430 vf_rep_eth_dev; 6431 backing_bp->num_reps++; 6432 6433 } 6434 6435 rte_kvargs_free(kvlist); 6436 return 0; 6437 6438 err: 6439 /* If num_rep > 1, then rollback already created 6440 * ports, since we'll be failing the probe anyway 6441 */ 6442 if (num_rep > 1) 6443 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6444 rte_errno = -ret; 6445 rte_kvargs_free(kvlist); 6446 6447 return ret; 6448 } 6449 6450 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6451 struct rte_pci_device *pci_dev) 6452 { 6453 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6454 struct rte_eth_dev *backing_eth_dev; 6455 uint16_t num_rep; 6456 int ret = 0; 6457 6458 if (pci_dev->device.devargs) { 6459 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6460 ð_da); 6461 if (ret) 6462 return ret; 6463 } 6464 6465 num_rep = eth_da.nb_representor_ports; 6466 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6467 num_rep); 6468 6469 /* We could come here after first level of probe is already invoked 6470 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6471 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6472 */ 6473 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6474 if (backing_eth_dev == NULL) { 6475 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6476 sizeof(struct bnxt), 6477 eth_dev_pci_specific_init, pci_dev, 6478 bnxt_dev_init, NULL); 6479 6480 if (ret || !num_rep) 6481 return ret; 6482 6483 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6484 } 6485 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6486 backing_eth_dev->data->port_id); 6487 6488 if (!num_rep) 6489 return ret; 6490 6491 /* probe representor ports now */ 6492 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6493 pci_dev->device.devargs->args); 6494 6495 return ret; 6496 } 6497 6498 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6499 { 6500 struct rte_eth_dev *eth_dev; 6501 6502 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6503 if (!eth_dev) 6504 return 0; /* Invoked typically only by OVS-DPDK, by the 6505 * time it comes here the eth_dev is already 6506 * deleted by rte_eth_dev_close(), so returning 6507 * +ve value will at least help in proper cleanup 6508 */ 6509 6510 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6511 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6512 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6513 return rte_eth_dev_destroy(eth_dev, 6514 bnxt_representor_uninit); 6515 else 6516 return rte_eth_dev_destroy(eth_dev, 6517 bnxt_dev_uninit); 6518 } else { 6519 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6520 } 6521 } 6522 6523 static struct rte_pci_driver bnxt_rte_pmd = { 6524 .id_table = bnxt_pci_id_map, 6525 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6526 RTE_PCI_DRV_INTR_RMV | 6527 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6528 * and OVS-DPDK 6529 */ 6530 .probe = bnxt_pci_probe, 6531 .remove = bnxt_pci_remove, 6532 }; 6533 6534 static bool 6535 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6536 { 6537 if (strcmp(dev->device->driver->name, drv->driver.name)) 6538 return false; 6539 6540 return true; 6541 } 6542 6543 bool is_bnxt_supported(struct rte_eth_dev *dev) 6544 { 6545 return is_device_supported(dev, &bnxt_rte_pmd); 6546 } 6547 6548 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6549 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6550 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6551 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6552