1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_ACCUM_STATS "accum-stats" 91 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 92 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 93 #define BNXT_DEVARG_REPRESENTOR "representor" 94 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 95 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 96 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 97 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 98 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 99 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 100 #define BNXT_DEVARG_APP_ID "app-id" 101 102 static const char *const bnxt_dev_args[] = { 103 BNXT_DEVARG_REPRESENTOR, 104 BNXT_DEVARG_ACCUM_STATS, 105 BNXT_DEVARG_FLOW_XSTAT, 106 BNXT_DEVARG_MAX_NUM_KFLOWS, 107 BNXT_DEVARG_REP_BASED_PF, 108 BNXT_DEVARG_REP_IS_PF, 109 BNXT_DEVARG_REP_Q_R2F, 110 BNXT_DEVARG_REP_Q_F2R, 111 BNXT_DEVARG_REP_FC_R2F, 112 BNXT_DEVARG_REP_FC_F2R, 113 BNXT_DEVARG_APP_ID, 114 NULL 115 }; 116 117 /* 118 * accum-stats == false to disable flow counter accumulation 119 * accum-stats == true to enable flow counter accumulation 120 */ 121 #define BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats) ((accum_stats) > 1) 122 123 /* 124 * app-id = an non-negative 8-bit number 125 */ 126 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 127 128 /* 129 * flow_xstat == false to disable the feature 130 * flow_xstat == true to enable the feature 131 */ 132 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 133 134 /* 135 * rep_is_pf == false to indicate VF representor 136 * rep_is_pf == true to indicate PF representor 137 */ 138 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 139 140 /* 141 * rep_based_pf == Physical index of the PF 142 */ 143 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 144 /* 145 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 146 */ 147 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 148 149 /* 150 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 151 */ 152 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 153 154 /* 155 * rep_fc_r2f == Flow control for the representor to endpoint direction 156 */ 157 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 158 159 /* 160 * rep_fc_f2r == Flow control for the endpoint to representor direction 161 */ 162 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 163 164 int bnxt_cfa_code_dynfield_offset = -1; 165 166 /* 167 * max_num_kflows must be >= 32 168 * and must be a power-of-2 supported value 169 * return: 1 -> invalid 170 * 0 -> valid 171 */ 172 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 173 { 174 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 175 return 1; 176 return 0; 177 } 178 179 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 180 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 181 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 182 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 183 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 184 static int bnxt_restore_vlan_filters(struct bnxt *bp); 185 static void bnxt_dev_recover(void *arg); 186 static void bnxt_free_error_recovery_info(struct bnxt *bp); 187 static void bnxt_free_rep_info(struct bnxt *bp); 188 189 int is_bnxt_in_error(struct bnxt *bp) 190 { 191 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 192 return -EIO; 193 if (bp->flags & BNXT_FLAG_FW_RESET) 194 return -EBUSY; 195 196 return 0; 197 } 198 199 /***********************/ 200 201 /* 202 * High level utility functions 203 */ 204 205 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 206 { 207 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 208 BNXT_RSS_TBL_SIZE_P5); 209 210 if (!BNXT_CHIP_P5(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_P5) / 215 BNXT_RSS_ENTRIES_PER_CTX_P5; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_P5(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 bp->parent = NULL; 230 } 231 232 static void bnxt_free_pf_info(struct bnxt *bp) 233 { 234 rte_free(bp->pf); 235 bp->pf = NULL; 236 } 237 238 static void bnxt_free_link_info(struct bnxt *bp) 239 { 240 rte_free(bp->link_info); 241 bp->link_info = NULL; 242 } 243 244 static void bnxt_free_leds_info(struct bnxt *bp) 245 { 246 if (BNXT_VF(bp)) 247 return; 248 249 rte_free(bp->leds); 250 bp->leds = NULL; 251 } 252 253 static void bnxt_free_flow_stats_info(struct bnxt *bp) 254 { 255 rte_free(bp->flow_stat); 256 bp->flow_stat = NULL; 257 } 258 259 static void bnxt_free_cos_queues(struct bnxt *bp) 260 { 261 rte_free(bp->rx_cos_queue); 262 bp->rx_cos_queue = NULL; 263 rte_free(bp->tx_cos_queue); 264 bp->tx_cos_queue = NULL; 265 } 266 267 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 268 { 269 bnxt_free_filter_mem(bp); 270 bnxt_free_vnic_attributes(bp); 271 bnxt_free_vnic_mem(bp); 272 273 /* tx/rx rings are configured as part of *_queue_setup callbacks. 274 * If the number of rings change across fw update, 275 * we don't have much choice except to warn the user. 276 */ 277 if (!reconfig) { 278 bnxt_free_stats(bp); 279 bnxt_free_tx_rings(bp); 280 bnxt_free_rx_rings(bp); 281 } 282 bnxt_free_async_cp_ring(bp); 283 bnxt_free_rxtx_nq_ring(bp); 284 285 rte_free(bp->grp_info); 286 bp->grp_info = NULL; 287 } 288 289 static int bnxt_alloc_parent_info(struct bnxt *bp) 290 { 291 bp->parent = rte_zmalloc("bnxt_parent_info", 292 sizeof(struct bnxt_parent_info), 0); 293 if (bp->parent == NULL) 294 return -ENOMEM; 295 296 return 0; 297 } 298 299 static int bnxt_alloc_pf_info(struct bnxt *bp) 300 { 301 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 302 if (bp->pf == NULL) 303 return -ENOMEM; 304 305 return 0; 306 } 307 308 static int bnxt_alloc_link_info(struct bnxt *bp) 309 { 310 bp->link_info = 311 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 312 if (bp->link_info == NULL) 313 return -ENOMEM; 314 315 return 0; 316 } 317 318 static int bnxt_alloc_leds_info(struct bnxt *bp) 319 { 320 if (BNXT_VF(bp)) 321 return 0; 322 323 bp->leds = rte_zmalloc("bnxt_leds", 324 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 325 0); 326 if (bp->leds == NULL) 327 return -ENOMEM; 328 329 return 0; 330 } 331 332 static int bnxt_alloc_cos_queues(struct bnxt *bp) 333 { 334 bp->rx_cos_queue = 335 rte_zmalloc("bnxt_rx_cosq", 336 BNXT_COS_QUEUE_COUNT * 337 sizeof(struct bnxt_cos_queue_info), 338 0); 339 if (bp->rx_cos_queue == NULL) 340 return -ENOMEM; 341 342 bp->tx_cos_queue = 343 rte_zmalloc("bnxt_tx_cosq", 344 BNXT_COS_QUEUE_COUNT * 345 sizeof(struct bnxt_cos_queue_info), 346 0); 347 if (bp->tx_cos_queue == NULL) 348 return -ENOMEM; 349 350 return 0; 351 } 352 353 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 354 { 355 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 356 sizeof(struct bnxt_flow_stat_info), 0); 357 if (bp->flow_stat == NULL) 358 return -ENOMEM; 359 360 return 0; 361 } 362 363 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 364 { 365 int rc; 366 367 rc = bnxt_alloc_ring_grps(bp); 368 if (rc) 369 goto alloc_mem_err; 370 371 rc = bnxt_alloc_async_ring_struct(bp); 372 if (rc) 373 goto alloc_mem_err; 374 375 rc = bnxt_alloc_vnic_mem(bp); 376 if (rc) 377 goto alloc_mem_err; 378 379 rc = bnxt_alloc_vnic_attributes(bp); 380 if (rc) 381 goto alloc_mem_err; 382 383 rc = bnxt_alloc_filter_mem(bp); 384 if (rc) 385 goto alloc_mem_err; 386 387 rc = bnxt_alloc_async_cp_ring(bp); 388 if (rc) 389 goto alloc_mem_err; 390 391 rc = bnxt_alloc_rxtx_nq_ring(bp); 392 if (rc) 393 goto alloc_mem_err; 394 395 if (BNXT_FLOW_XSTATS_EN(bp)) { 396 rc = bnxt_alloc_flow_stats_info(bp); 397 if (rc) 398 goto alloc_mem_err; 399 } 400 401 return 0; 402 403 alloc_mem_err: 404 bnxt_free_mem(bp, reconfig); 405 return rc; 406 } 407 408 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 409 { 410 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 411 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 412 uint64_t rx_offloads = dev_conf->rxmode.offloads; 413 struct bnxt_rx_queue *rxq; 414 unsigned int j; 415 int rc; 416 417 rc = bnxt_vnic_grp_alloc(bp, vnic); 418 if (rc) 419 goto err_out; 420 421 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 422 vnic_id, vnic, vnic->fw_grp_ids); 423 424 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 425 if (rc) 426 goto err_out; 427 428 /* Alloc RSS context only if RSS mode is enabled */ 429 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 430 int j, nr_ctxs = bnxt_rss_ctxts(bp); 431 432 /* RSS table size in Thor is 512. 433 * Cap max Rx rings to same value 434 */ 435 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 436 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 437 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 438 goto err_out; 439 } 440 441 rc = 0; 442 for (j = 0; j < nr_ctxs; j++) { 443 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 444 if (rc) 445 break; 446 } 447 if (rc) { 448 PMD_DRV_LOG(ERR, 449 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 450 vnic_id, j, rc); 451 goto err_out; 452 } 453 vnic->num_lb_ctxts = nr_ctxs; 454 } 455 456 /* 457 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 458 * setting is not available at this time, it will not be 459 * configured correctly in the CFA. 460 */ 461 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 462 vnic->vlan_strip = true; 463 else 464 vnic->vlan_strip = false; 465 466 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 467 if (rc) 468 goto err_out; 469 470 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 471 if (rc) 472 goto err_out; 473 474 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 475 rxq = bp->eth_dev->data->rx_queues[j]; 476 477 PMD_DRV_LOG(DEBUG, 478 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 479 j, rxq->vnic, rxq->vnic->fw_grp_ids); 480 481 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 482 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 483 else 484 vnic->rx_queue_cnt++; 485 } 486 487 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 488 489 rc = bnxt_vnic_rss_configure(bp, vnic); 490 if (rc) 491 goto err_out; 492 493 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 494 495 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 496 (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ? 497 true : false); 498 if (rc) 499 goto err_out; 500 501 return 0; 502 err_out: 503 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 504 vnic_id, rc); 505 return rc; 506 } 507 508 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 509 { 510 int rc = 0; 511 512 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 513 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 514 if (rc) 515 return rc; 516 517 PMD_DRV_LOG(DEBUG, 518 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 519 " rx_fc_in_tbl.ctx_id = %d\n", 520 bp->flow_stat->rx_fc_in_tbl.va, 521 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 522 bp->flow_stat->rx_fc_in_tbl.ctx_id); 523 524 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 525 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 526 if (rc) 527 return rc; 528 529 PMD_DRV_LOG(DEBUG, 530 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 531 " rx_fc_out_tbl.ctx_id = %d\n", 532 bp->flow_stat->rx_fc_out_tbl.va, 533 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 534 bp->flow_stat->rx_fc_out_tbl.ctx_id); 535 536 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 537 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 538 if (rc) 539 return rc; 540 541 PMD_DRV_LOG(DEBUG, 542 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 543 " tx_fc_in_tbl.ctx_id = %d\n", 544 bp->flow_stat->tx_fc_in_tbl.va, 545 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 546 bp->flow_stat->tx_fc_in_tbl.ctx_id); 547 548 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 549 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 550 if (rc) 551 return rc; 552 553 PMD_DRV_LOG(DEBUG, 554 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 555 " tx_fc_out_tbl.ctx_id = %d\n", 556 bp->flow_stat->tx_fc_out_tbl.va, 557 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 558 bp->flow_stat->tx_fc_out_tbl.ctx_id); 559 560 memset(bp->flow_stat->rx_fc_out_tbl.va, 561 0, 562 bp->flow_stat->rx_fc_out_tbl.size); 563 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 564 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 565 bp->flow_stat->rx_fc_out_tbl.ctx_id, 566 bp->flow_stat->max_fc, 567 true); 568 if (rc) 569 return rc; 570 571 memset(bp->flow_stat->tx_fc_out_tbl.va, 572 0, 573 bp->flow_stat->tx_fc_out_tbl.size); 574 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 575 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 576 bp->flow_stat->tx_fc_out_tbl.ctx_id, 577 bp->flow_stat->max_fc, 578 true); 579 580 return rc; 581 } 582 583 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 584 struct bnxt_ctx_mem_buf_info *ctx) 585 { 586 if (!ctx) 587 return -EINVAL; 588 589 ctx->va = rte_zmalloc_socket(type, size, 0, 590 bp->eth_dev->device->numa_node); 591 if (ctx->va == NULL) 592 return -ENOMEM; 593 rte_mem_lock_page(ctx->va); 594 ctx->size = size; 595 ctx->dma = rte_mem_virt2iova(ctx->va); 596 if (ctx->dma == RTE_BAD_IOVA) 597 return -ENOMEM; 598 599 return 0; 600 } 601 602 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 603 { 604 struct rte_pci_device *pdev = bp->pdev; 605 char type[RTE_MEMZONE_NAMESIZE]; 606 uint16_t max_fc; 607 int rc = 0; 608 609 max_fc = bp->flow_stat->max_fc; 610 611 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 612 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 613 /* 4 bytes for each counter-id */ 614 rc = bnxt_alloc_ctx_mem_buf(bp, type, 615 max_fc * 4, 616 &bp->flow_stat->rx_fc_in_tbl); 617 if (rc) 618 return rc; 619 620 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 621 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 622 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 623 rc = bnxt_alloc_ctx_mem_buf(bp, type, 624 max_fc * 16, 625 &bp->flow_stat->rx_fc_out_tbl); 626 if (rc) 627 return rc; 628 629 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 630 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 631 /* 4 bytes for each counter-id */ 632 rc = bnxt_alloc_ctx_mem_buf(bp, type, 633 max_fc * 4, 634 &bp->flow_stat->tx_fc_in_tbl); 635 if (rc) 636 return rc; 637 638 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 639 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 640 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 641 rc = bnxt_alloc_ctx_mem_buf(bp, type, 642 max_fc * 16, 643 &bp->flow_stat->tx_fc_out_tbl); 644 if (rc) 645 return rc; 646 647 rc = bnxt_register_fc_ctx_mem(bp); 648 649 return rc; 650 } 651 652 static int bnxt_init_ctx_mem(struct bnxt *bp) 653 { 654 int rc = 0; 655 656 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 657 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 658 !BNXT_FLOW_XSTATS_EN(bp)) 659 return 0; 660 661 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 662 if (rc) 663 return rc; 664 665 rc = bnxt_init_fc_ctx_mem(bp); 666 667 return rc; 668 } 669 670 static int bnxt_update_phy_setting(struct bnxt *bp) 671 { 672 struct rte_eth_link new; 673 int rc; 674 675 rc = bnxt_get_hwrm_link_config(bp, &new); 676 if (rc) { 677 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 678 return rc; 679 } 680 681 /* 682 * On BCM957508-N2100 adapters, FW will not allow any user other 683 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 684 * always returns link up. Force phy update always in that case. 685 */ 686 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 687 rc = bnxt_set_hwrm_link_config(bp, true); 688 if (rc) { 689 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 690 return rc; 691 } 692 } 693 694 return rc; 695 } 696 697 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 698 { 699 rte_free(bp->prev_rx_ring_stats); 700 rte_free(bp->prev_tx_ring_stats); 701 702 bp->prev_rx_ring_stats = NULL; 703 bp->prev_tx_ring_stats = NULL; 704 } 705 706 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 707 { 708 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 709 sizeof(struct bnxt_ring_stats) * 710 bp->rx_cp_nr_rings, 711 0); 712 if (bp->prev_rx_ring_stats == NULL) 713 return -ENOMEM; 714 715 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 716 sizeof(struct bnxt_ring_stats) * 717 bp->tx_cp_nr_rings, 718 0); 719 if (bp->prev_tx_ring_stats == NULL) 720 goto error; 721 722 return 0; 723 724 error: 725 bnxt_free_prev_ring_stats(bp); 726 return -ENOMEM; 727 } 728 729 static int bnxt_start_nic(struct bnxt *bp) 730 { 731 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 732 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 733 uint32_t intr_vector = 0; 734 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 735 uint32_t vec = BNXT_MISC_VEC_ID; 736 unsigned int i, j; 737 int rc; 738 739 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 740 bp->eth_dev->data->dev_conf.rxmode.offloads |= 741 DEV_RX_OFFLOAD_JUMBO_FRAME; 742 bp->flags |= BNXT_FLAG_JUMBO; 743 } else { 744 bp->eth_dev->data->dev_conf.rxmode.offloads &= 745 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 746 bp->flags &= ~BNXT_FLAG_JUMBO; 747 } 748 749 /* THOR does not support ring groups. 750 * But we will use the array to save RSS context IDs. 751 */ 752 if (BNXT_CHIP_P5(bp)) 753 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 754 755 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 756 if (rc) { 757 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 758 goto err_out; 759 } 760 761 rc = bnxt_alloc_hwrm_rings(bp); 762 if (rc) { 763 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 764 goto err_out; 765 } 766 767 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 768 if (rc) { 769 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 770 goto err_out; 771 } 772 773 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 774 goto skip_cosq_cfg; 775 776 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 777 if (bp->rx_cos_queue[i].id != 0xff) { 778 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 779 780 if (!vnic) { 781 PMD_DRV_LOG(ERR, 782 "Num pools more than FW profile\n"); 783 rc = -EINVAL; 784 goto err_out; 785 } 786 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 787 bp->rx_cosq_cnt++; 788 } 789 } 790 791 skip_cosq_cfg: 792 rc = bnxt_mq_rx_configure(bp); 793 if (rc) { 794 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 795 goto err_out; 796 } 797 798 /* default vnic 0 */ 799 rc = bnxt_setup_one_vnic(bp, 0); 800 if (rc) 801 goto err_out; 802 /* VNIC configuration */ 803 if (BNXT_RFS_NEEDS_VNIC(bp)) { 804 for (i = 1; i < bp->nr_vnics; i++) { 805 rc = bnxt_setup_one_vnic(bp, i); 806 if (rc) 807 goto err_out; 808 } 809 } 810 811 for (j = 0; j < bp->tx_nr_rings; j++) { 812 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 813 814 if (!txq->tx_deferred_start) { 815 bp->eth_dev->data->tx_queue_state[j] = 816 RTE_ETH_QUEUE_STATE_STARTED; 817 txq->tx_started = true; 818 } 819 } 820 821 for (j = 0; j < bp->rx_nr_rings; j++) { 822 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 823 824 if (!rxq->rx_deferred_start) { 825 bp->eth_dev->data->rx_queue_state[j] = 826 RTE_ETH_QUEUE_STATE_STARTED; 827 rxq->rx_started = true; 828 } 829 } 830 831 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 832 if (rc) { 833 PMD_DRV_LOG(ERR, 834 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 835 goto err_out; 836 } 837 838 /* check and configure queue intr-vector mapping */ 839 if ((rte_intr_cap_multiple(intr_handle) || 840 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 841 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 842 intr_vector = bp->eth_dev->data->nb_rx_queues; 843 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 844 if (intr_vector > bp->rx_cp_nr_rings) { 845 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 846 bp->rx_cp_nr_rings); 847 return -ENOTSUP; 848 } 849 rc = rte_intr_efd_enable(intr_handle, intr_vector); 850 if (rc) 851 return rc; 852 } 853 854 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 855 intr_handle->intr_vec = 856 rte_zmalloc("intr_vec", 857 bp->eth_dev->data->nb_rx_queues * 858 sizeof(int), 0); 859 if (intr_handle->intr_vec == NULL) { 860 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 861 " intr_vec", bp->eth_dev->data->nb_rx_queues); 862 rc = -ENOMEM; 863 goto err_out; 864 } 865 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 866 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 867 intr_handle->intr_vec, intr_handle->nb_efd, 868 intr_handle->max_intr); 869 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 870 queue_id++) { 871 intr_handle->intr_vec[queue_id] = 872 vec + BNXT_RX_VEC_START; 873 if (vec < base + intr_handle->nb_efd - 1) 874 vec++; 875 } 876 } 877 878 /* enable uio/vfio intr/eventfd mapping */ 879 rc = rte_intr_enable(intr_handle); 880 #ifndef RTE_EXEC_ENV_FREEBSD 881 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 882 if (rc) 883 goto err_out; 884 #endif 885 886 rc = bnxt_update_phy_setting(bp); 887 if (rc) 888 goto err_out; 889 890 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 891 if (!bp->mark_table) 892 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 893 894 return 0; 895 896 err_out: 897 /* Some of the error status returned by FW may not be from errno.h */ 898 if (rc > 0) 899 rc = -EIO; 900 901 return rc; 902 } 903 904 static int bnxt_shutdown_nic(struct bnxt *bp) 905 { 906 bnxt_free_all_hwrm_resources(bp); 907 bnxt_free_all_filters(bp); 908 bnxt_free_all_vnics(bp); 909 return 0; 910 } 911 912 /* 913 * Device configuration and status function 914 */ 915 916 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 917 { 918 uint32_t link_speed = 0; 919 uint32_t speed_capa = 0; 920 921 if (bp->link_info == NULL) 922 return 0; 923 924 link_speed = bp->link_info->support_speeds; 925 926 /* If PAM4 is configured, use PAM4 supported speed */ 927 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 928 link_speed = bp->link_info->support_pam4_speeds; 929 930 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 931 speed_capa |= ETH_LINK_SPEED_100M; 932 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 933 speed_capa |= ETH_LINK_SPEED_100M_HD; 934 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 935 speed_capa |= ETH_LINK_SPEED_1G; 936 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 937 speed_capa |= ETH_LINK_SPEED_2_5G; 938 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 939 speed_capa |= ETH_LINK_SPEED_10G; 940 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 941 speed_capa |= ETH_LINK_SPEED_20G; 942 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 943 speed_capa |= ETH_LINK_SPEED_25G; 944 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 945 speed_capa |= ETH_LINK_SPEED_40G; 946 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 947 speed_capa |= ETH_LINK_SPEED_50G; 948 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 949 speed_capa |= ETH_LINK_SPEED_100G; 950 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 951 speed_capa |= ETH_LINK_SPEED_50G; 952 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 953 speed_capa |= ETH_LINK_SPEED_100G; 954 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 955 speed_capa |= ETH_LINK_SPEED_200G; 956 957 if (bp->link_info->auto_mode == 958 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 959 speed_capa |= ETH_LINK_SPEED_FIXED; 960 961 return speed_capa; 962 } 963 964 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 965 struct rte_eth_dev_info *dev_info) 966 { 967 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 968 struct bnxt *bp = eth_dev->data->dev_private; 969 uint16_t max_vnics, i, j, vpool, vrxq; 970 unsigned int max_rx_rings; 971 int rc; 972 973 rc = is_bnxt_in_error(bp); 974 if (rc) 975 return rc; 976 977 /* MAC Specifics */ 978 dev_info->max_mac_addrs = bp->max_l2_ctx; 979 dev_info->max_hash_mac_addrs = 0; 980 981 /* PF/VF specifics */ 982 if (BNXT_PF(bp)) 983 dev_info->max_vfs = pdev->max_vfs; 984 985 max_rx_rings = bnxt_max_rings(bp); 986 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 987 dev_info->max_rx_queues = max_rx_rings; 988 dev_info->max_tx_queues = max_rx_rings; 989 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 990 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 991 max_vnics = bp->max_vnics; 992 993 /* MTU specifics */ 994 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 995 dev_info->max_mtu = BNXT_MAX_MTU; 996 997 /* Fast path specifics */ 998 dev_info->min_rx_bufsize = 1; 999 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 1000 1001 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 1002 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 1003 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 1004 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) 1005 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP; 1006 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1007 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 1008 dev_info->tx_queue_offload_capa; 1009 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 1010 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 1011 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 1012 1013 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 1014 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1015 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1016 1017 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1018 .rx_thresh = { 1019 .pthresh = 8, 1020 .hthresh = 8, 1021 .wthresh = 0, 1022 }, 1023 .rx_free_thresh = 32, 1024 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1025 }; 1026 1027 dev_info->default_txconf = (struct rte_eth_txconf) { 1028 .tx_thresh = { 1029 .pthresh = 32, 1030 .hthresh = 0, 1031 .wthresh = 0, 1032 }, 1033 .tx_free_thresh = 32, 1034 .tx_rs_thresh = 32, 1035 }; 1036 eth_dev->data->dev_conf.intr_conf.lsc = 1; 1037 1038 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1039 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1040 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1041 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1042 1043 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1044 dev_info->switch_info.name = eth_dev->device->name; 1045 dev_info->switch_info.domain_id = bp->switch_domain_id; 1046 dev_info->switch_info.port_id = 1047 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1048 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1049 } 1050 1051 /* 1052 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1053 * need further investigation. 1054 */ 1055 1056 /* VMDq resources */ 1057 vpool = 64; /* ETH_64_POOLS */ 1058 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 1059 for (i = 0; i < 4; vpool >>= 1, i++) { 1060 if (max_vnics > vpool) { 1061 for (j = 0; j < 5; vrxq >>= 1, j++) { 1062 if (dev_info->max_rx_queues > vrxq) { 1063 if (vpool > vrxq) 1064 vpool = vrxq; 1065 goto found; 1066 } 1067 } 1068 /* Not enough resources to support VMDq */ 1069 break; 1070 } 1071 } 1072 /* Not enough resources to support VMDq */ 1073 vpool = 0; 1074 vrxq = 0; 1075 found: 1076 dev_info->max_vmdq_pools = vpool; 1077 dev_info->vmdq_queue_num = vrxq; 1078 1079 dev_info->vmdq_pool_base = 0; 1080 dev_info->vmdq_queue_base = 0; 1081 1082 return 0; 1083 } 1084 1085 /* Configure the device based on the configuration provided */ 1086 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1087 { 1088 struct bnxt *bp = eth_dev->data->dev_private; 1089 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1090 int rc; 1091 1092 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1093 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1094 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1095 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1096 1097 rc = is_bnxt_in_error(bp); 1098 if (rc) 1099 return rc; 1100 1101 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1102 rc = bnxt_hwrm_check_vf_rings(bp); 1103 if (rc) { 1104 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1105 return -ENOSPC; 1106 } 1107 1108 /* If a resource has already been allocated - in this case 1109 * it is the async completion ring, free it. Reallocate it after 1110 * resource reservation. This will ensure the resource counts 1111 * are calculated correctly. 1112 */ 1113 1114 pthread_mutex_lock(&bp->def_cp_lock); 1115 1116 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1117 bnxt_disable_int(bp); 1118 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1119 } 1120 1121 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1122 if (rc) { 1123 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1124 pthread_mutex_unlock(&bp->def_cp_lock); 1125 return -ENOSPC; 1126 } 1127 1128 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1129 rc = bnxt_alloc_async_cp_ring(bp); 1130 if (rc) { 1131 pthread_mutex_unlock(&bp->def_cp_lock); 1132 return rc; 1133 } 1134 bnxt_enable_int(bp); 1135 } 1136 1137 pthread_mutex_unlock(&bp->def_cp_lock); 1138 } 1139 1140 /* Inherit new configurations */ 1141 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1142 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1143 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1144 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1145 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1146 bp->max_stat_ctx) 1147 goto resource_error; 1148 1149 if (BNXT_HAS_RING_GRPS(bp) && 1150 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1151 goto resource_error; 1152 1153 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 1154 bp->max_vnics < eth_dev->data->nb_rx_queues) 1155 goto resource_error; 1156 1157 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1158 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1159 1160 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1161 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1162 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1163 1164 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1165 eth_dev->data->mtu = 1166 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1167 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 1168 BNXT_NUM_VLANS; 1169 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1170 } 1171 return 0; 1172 1173 resource_error: 1174 PMD_DRV_LOG(ERR, 1175 "Insufficient resources to support requested config\n"); 1176 PMD_DRV_LOG(ERR, 1177 "Num Queues Requested: Tx %d, Rx %d\n", 1178 eth_dev->data->nb_tx_queues, 1179 eth_dev->data->nb_rx_queues); 1180 PMD_DRV_LOG(ERR, 1181 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1182 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1183 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1184 return -ENOSPC; 1185 } 1186 1187 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1188 { 1189 struct rte_eth_link *link = ð_dev->data->dev_link; 1190 1191 if (link->link_status) 1192 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1193 eth_dev->data->port_id, 1194 (uint32_t)link->link_speed, 1195 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 1196 ("full-duplex") : ("half-duplex\n")); 1197 else 1198 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1199 eth_dev->data->port_id); 1200 } 1201 1202 /* 1203 * Determine whether the current configuration requires support for scattered 1204 * receive; return 1 if scattered receive is required and 0 if not. 1205 */ 1206 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1207 { 1208 uint16_t buf_size; 1209 int i; 1210 1211 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) 1212 return 1; 1213 1214 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) 1215 return 1; 1216 1217 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1218 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1219 1220 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1221 RTE_PKTMBUF_HEADROOM); 1222 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 1223 return 1; 1224 } 1225 return 0; 1226 } 1227 1228 static eth_rx_burst_t 1229 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1230 { 1231 struct bnxt *bp = eth_dev->data->dev_private; 1232 1233 /* Disable vector mode RX for Stingray2 for now */ 1234 if (BNXT_CHIP_SR2(bp)) { 1235 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1236 return bnxt_recv_pkts; 1237 } 1238 1239 #if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \ 1240 !defined(RTE_LIBRTE_IEEE1588) 1241 1242 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1243 if (eth_dev->data->scattered_rx) 1244 goto use_scalar_rx; 1245 1246 /* 1247 * Vector mode receive cannot be enabled if Truflow is enabled or if 1248 * asynchronous completions and receive completions can be placed in 1249 * the same completion ring. 1250 */ 1251 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1252 goto use_scalar_rx; 1253 1254 /* 1255 * Vector mode receive cannot be enabled if any receive offloads outside 1256 * a limited subset have been enabled. 1257 */ 1258 if (eth_dev->data->dev_conf.rxmode.offloads & 1259 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 1260 DEV_RX_OFFLOAD_KEEP_CRC | 1261 DEV_RX_OFFLOAD_JUMBO_FRAME | 1262 DEV_RX_OFFLOAD_IPV4_CKSUM | 1263 DEV_RX_OFFLOAD_UDP_CKSUM | 1264 DEV_RX_OFFLOAD_TCP_CKSUM | 1265 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1266 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 1267 DEV_RX_OFFLOAD_RSS_HASH | 1268 DEV_RX_OFFLOAD_VLAN_FILTER)) 1269 goto use_scalar_rx; 1270 1271 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1272 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1273 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1274 PMD_DRV_LOG(INFO, 1275 "Using AVX2 vector mode receive for port %d\n", 1276 eth_dev->data->port_id); 1277 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1278 return bnxt_recv_pkts_vec_avx2; 1279 } 1280 #endif 1281 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1282 PMD_DRV_LOG(INFO, 1283 "Using SSE vector mode receive for port %d\n", 1284 eth_dev->data->port_id); 1285 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1286 return bnxt_recv_pkts_vec; 1287 } 1288 1289 use_scalar_rx: 1290 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1291 eth_dev->data->port_id); 1292 PMD_DRV_LOG(INFO, 1293 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1294 eth_dev->data->port_id, 1295 eth_dev->data->scattered_rx, 1296 eth_dev->data->dev_conf.rxmode.offloads); 1297 #endif 1298 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1299 return bnxt_recv_pkts; 1300 } 1301 1302 static eth_tx_burst_t 1303 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1304 { 1305 struct bnxt *bp = eth_dev->data->dev_private; 1306 1307 /* Disable vector mode TX for Stingray2 for now */ 1308 if (BNXT_CHIP_SR2(bp)) 1309 return bnxt_xmit_pkts; 1310 1311 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \ 1312 !defined(RTE_LIBRTE_IEEE1588) 1313 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1314 1315 /* 1316 * Vector mode transmit can be enabled only if not using scatter rx 1317 * or tx offloads. 1318 */ 1319 if (eth_dev->data->scattered_rx || 1320 (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) || 1321 BNXT_TRUFLOW_EN(bp)) 1322 goto use_scalar_tx; 1323 1324 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1325 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1326 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1327 PMD_DRV_LOG(INFO, 1328 "Using AVX2 vector mode transmit for port %d\n", 1329 eth_dev->data->port_id); 1330 return bnxt_xmit_pkts_vec_avx2; 1331 } 1332 #endif 1333 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1334 PMD_DRV_LOG(INFO, 1335 "Using SSE vector mode transmit for port %d\n", 1336 eth_dev->data->port_id); 1337 return bnxt_xmit_pkts_vec; 1338 } 1339 1340 use_scalar_tx: 1341 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1342 eth_dev->data->port_id); 1343 PMD_DRV_LOG(INFO, 1344 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1345 eth_dev->data->port_id, 1346 eth_dev->data->scattered_rx, 1347 offloads); 1348 #endif 1349 return bnxt_xmit_pkts; 1350 } 1351 1352 static int bnxt_handle_if_change_status(struct bnxt *bp) 1353 { 1354 int rc; 1355 1356 /* Since fw has undergone a reset and lost all contexts, 1357 * set fatal flag to not issue hwrm during cleanup 1358 */ 1359 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1360 bnxt_uninit_resources(bp, true); 1361 1362 /* clear fatal flag so that re-init happens */ 1363 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1364 rc = bnxt_init_resources(bp, true); 1365 1366 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1367 1368 return rc; 1369 } 1370 1371 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1372 { 1373 struct bnxt *bp = eth_dev->data->dev_private; 1374 int rc = 0; 1375 1376 if (!BNXT_SINGLE_PF(bp)) 1377 return -ENOTSUP; 1378 1379 if (!bp->link_info->link_up) 1380 rc = bnxt_set_hwrm_link_config(bp, true); 1381 if (!rc) 1382 eth_dev->data->dev_link.link_status = 1; 1383 1384 bnxt_print_link_info(eth_dev); 1385 return rc; 1386 } 1387 1388 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1389 { 1390 struct bnxt *bp = eth_dev->data->dev_private; 1391 1392 if (!BNXT_SINGLE_PF(bp)) 1393 return -ENOTSUP; 1394 1395 eth_dev->data->dev_link.link_status = 0; 1396 bnxt_set_hwrm_link_config(bp, false); 1397 bp->link_info->link_up = 0; 1398 1399 return 0; 1400 } 1401 1402 static void bnxt_free_switch_domain(struct bnxt *bp) 1403 { 1404 int rc = 0; 1405 1406 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1407 return; 1408 1409 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1410 if (rc) 1411 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1412 bp->switch_domain_id, rc); 1413 } 1414 1415 static void bnxt_ptp_get_current_time(void *arg) 1416 { 1417 struct bnxt *bp = arg; 1418 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1419 int rc; 1420 1421 rc = is_bnxt_in_error(bp); 1422 if (rc) 1423 return; 1424 1425 if (!ptp) 1426 return; 1427 1428 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1429 &ptp->current_time); 1430 1431 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1432 if (rc != 0) { 1433 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1434 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1435 } 1436 } 1437 1438 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1439 { 1440 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1441 int rc; 1442 1443 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1444 return 0; 1445 1446 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1447 &ptp->current_time); 1448 1449 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1450 return rc; 1451 } 1452 1453 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1454 { 1455 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1456 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1457 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1458 } 1459 } 1460 1461 static void bnxt_ptp_stop(struct bnxt *bp) 1462 { 1463 bnxt_cancel_ptp_alarm(bp); 1464 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1465 } 1466 1467 static int bnxt_ptp_start(struct bnxt *bp) 1468 { 1469 int rc; 1470 1471 rc = bnxt_schedule_ptp_alarm(bp); 1472 if (rc != 0) { 1473 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1474 } else { 1475 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1476 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1477 } 1478 1479 return rc; 1480 } 1481 1482 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1483 { 1484 struct bnxt *bp = eth_dev->data->dev_private; 1485 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1486 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1487 struct rte_eth_link link; 1488 int ret; 1489 1490 eth_dev->data->dev_started = 0; 1491 eth_dev->data->scattered_rx = 0; 1492 1493 /* Prevent crashes when queues are still in use */ 1494 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1495 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1496 1497 bnxt_disable_int(bp); 1498 1499 /* disable uio/vfio intr/eventfd mapping */ 1500 rte_intr_disable(intr_handle); 1501 1502 /* Stop the child representors for this device */ 1503 ret = bnxt_rep_stop_all(bp); 1504 if (ret != 0) 1505 return ret; 1506 1507 /* delete the bnxt ULP port details */ 1508 bnxt_ulp_port_deinit(bp); 1509 1510 bnxt_cancel_fw_health_check(bp); 1511 1512 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1513 bnxt_cancel_ptp_alarm(bp); 1514 1515 /* Do not bring link down during reset recovery */ 1516 if (!is_bnxt_in_error(bp)) { 1517 bnxt_dev_set_link_down_op(eth_dev); 1518 /* Wait for link to be reset */ 1519 if (BNXT_SINGLE_PF(bp)) 1520 rte_delay_ms(500); 1521 /* clear the recorded link status */ 1522 memset(&link, 0, sizeof(link)); 1523 rte_eth_linkstatus_set(eth_dev, &link); 1524 } 1525 1526 /* Clean queue intr-vector mapping */ 1527 rte_intr_efd_disable(intr_handle); 1528 if (intr_handle->intr_vec != NULL) { 1529 rte_free(intr_handle->intr_vec); 1530 intr_handle->intr_vec = NULL; 1531 } 1532 1533 bnxt_hwrm_port_clr_stats(bp); 1534 bnxt_free_tx_mbufs(bp); 1535 bnxt_free_rx_mbufs(bp); 1536 /* Process any remaining notifications in default completion queue */ 1537 bnxt_int_handler(eth_dev); 1538 bnxt_shutdown_nic(bp); 1539 bnxt_hwrm_if_change(bp, false); 1540 1541 bnxt_free_prev_ring_stats(bp); 1542 rte_free(bp->mark_table); 1543 bp->mark_table = NULL; 1544 1545 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1546 bp->rx_cosq_cnt = 0; 1547 /* All filters are deleted on a port stop. */ 1548 if (BNXT_FLOW_XSTATS_EN(bp)) 1549 bp->flow_stat->flow_count = 0; 1550 1551 return 0; 1552 } 1553 1554 /* Unload the driver, release resources */ 1555 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1556 { 1557 struct bnxt *bp = eth_dev->data->dev_private; 1558 1559 pthread_mutex_lock(&bp->err_recovery_lock); 1560 if (bp->flags & BNXT_FLAG_FW_RESET) { 1561 PMD_DRV_LOG(ERR, 1562 "Adapter recovering from error..Please retry\n"); 1563 pthread_mutex_unlock(&bp->err_recovery_lock); 1564 return -EAGAIN; 1565 } 1566 pthread_mutex_unlock(&bp->err_recovery_lock); 1567 1568 return bnxt_dev_stop(eth_dev); 1569 } 1570 1571 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1572 { 1573 struct bnxt *bp = eth_dev->data->dev_private; 1574 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1575 int vlan_mask = 0; 1576 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1577 1578 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1579 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1580 return -EINVAL; 1581 } 1582 1583 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1584 PMD_DRV_LOG(ERR, 1585 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1586 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1587 1588 do { 1589 rc = bnxt_hwrm_if_change(bp, true); 1590 if (rc == 0 || rc != -EAGAIN) 1591 break; 1592 1593 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1594 } while (retry_cnt--); 1595 1596 if (rc) 1597 return rc; 1598 1599 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1600 rc = bnxt_handle_if_change_status(bp); 1601 if (rc) 1602 return rc; 1603 } 1604 1605 bnxt_enable_int(bp); 1606 1607 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1608 1609 rc = bnxt_start_nic(bp); 1610 if (rc) 1611 goto error; 1612 1613 rc = bnxt_alloc_prev_ring_stats(bp); 1614 if (rc) 1615 goto error; 1616 1617 eth_dev->data->dev_started = 1; 1618 1619 bnxt_link_update_op(eth_dev, 1); 1620 1621 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1622 vlan_mask |= ETH_VLAN_FILTER_MASK; 1623 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1624 vlan_mask |= ETH_VLAN_STRIP_MASK; 1625 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1626 if (rc) 1627 goto error; 1628 1629 /* Initialize bnxt ULP port details */ 1630 rc = bnxt_ulp_port_init(bp); 1631 if (rc) 1632 goto error; 1633 1634 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1635 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1636 1637 bnxt_schedule_fw_health_check(bp); 1638 1639 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1640 bnxt_schedule_ptp_alarm(bp); 1641 1642 return 0; 1643 1644 error: 1645 bnxt_dev_stop(eth_dev); 1646 return rc; 1647 } 1648 1649 static void 1650 bnxt_uninit_locks(struct bnxt *bp) 1651 { 1652 pthread_mutex_destroy(&bp->flow_lock); 1653 pthread_mutex_destroy(&bp->def_cp_lock); 1654 pthread_mutex_destroy(&bp->health_check_lock); 1655 pthread_mutex_destroy(&bp->err_recovery_lock); 1656 if (bp->rep_info) { 1657 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1658 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1659 } 1660 } 1661 1662 static void bnxt_drv_uninit(struct bnxt *bp) 1663 { 1664 bnxt_free_leds_info(bp); 1665 bnxt_free_cos_queues(bp); 1666 bnxt_free_link_info(bp); 1667 bnxt_free_parent_info(bp); 1668 bnxt_uninit_locks(bp); 1669 1670 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1671 bp->tx_mem_zone = NULL; 1672 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1673 bp->rx_mem_zone = NULL; 1674 1675 bnxt_free_vf_info(bp); 1676 bnxt_free_pf_info(bp); 1677 1678 rte_free(bp->grp_info); 1679 bp->grp_info = NULL; 1680 } 1681 1682 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1683 { 1684 struct bnxt *bp = eth_dev->data->dev_private; 1685 int ret = 0; 1686 1687 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1688 return 0; 1689 1690 pthread_mutex_lock(&bp->err_recovery_lock); 1691 if (bp->flags & BNXT_FLAG_FW_RESET) { 1692 PMD_DRV_LOG(ERR, 1693 "Adapter recovering from error...Please retry\n"); 1694 pthread_mutex_unlock(&bp->err_recovery_lock); 1695 return -EAGAIN; 1696 } 1697 pthread_mutex_unlock(&bp->err_recovery_lock); 1698 1699 /* cancel the recovery handler before remove dev */ 1700 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1701 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1702 bnxt_cancel_fc_thread(bp); 1703 1704 if (eth_dev->data->dev_started) 1705 ret = bnxt_dev_stop(eth_dev); 1706 1707 bnxt_uninit_resources(bp, false); 1708 1709 bnxt_drv_uninit(bp); 1710 1711 return ret; 1712 } 1713 1714 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1715 uint32_t index) 1716 { 1717 struct bnxt *bp = eth_dev->data->dev_private; 1718 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1719 struct bnxt_vnic_info *vnic; 1720 struct bnxt_filter_info *filter, *temp_filter; 1721 uint32_t i; 1722 1723 if (is_bnxt_in_error(bp)) 1724 return; 1725 1726 /* 1727 * Loop through all VNICs from the specified filter flow pools to 1728 * remove the corresponding MAC addr filter 1729 */ 1730 for (i = 0; i < bp->nr_vnics; i++) { 1731 if (!(pool_mask & (1ULL << i))) 1732 continue; 1733 1734 vnic = &bp->vnic_info[i]; 1735 filter = STAILQ_FIRST(&vnic->filter); 1736 while (filter) { 1737 temp_filter = STAILQ_NEXT(filter, next); 1738 if (filter->mac_index == index) { 1739 STAILQ_REMOVE(&vnic->filter, filter, 1740 bnxt_filter_info, next); 1741 bnxt_hwrm_clear_l2_filter(bp, filter); 1742 bnxt_free_filter(bp, filter); 1743 } 1744 filter = temp_filter; 1745 } 1746 } 1747 } 1748 1749 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1750 struct rte_ether_addr *mac_addr, uint32_t index, 1751 uint32_t pool) 1752 { 1753 struct bnxt_filter_info *filter; 1754 int rc = 0; 1755 1756 /* Attach requested MAC address to the new l2_filter */ 1757 STAILQ_FOREACH(filter, &vnic->filter, next) { 1758 if (filter->mac_index == index) { 1759 PMD_DRV_LOG(DEBUG, 1760 "MAC addr already existed for pool %d\n", 1761 pool); 1762 return 0; 1763 } 1764 } 1765 1766 filter = bnxt_alloc_filter(bp); 1767 if (!filter) { 1768 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1769 return -ENODEV; 1770 } 1771 1772 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1773 * if the MAC that's been programmed now is a different one, then, 1774 * copy that addr to filter->l2_addr 1775 */ 1776 if (mac_addr) 1777 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1778 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1779 1780 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1781 if (!rc) { 1782 filter->mac_index = index; 1783 if (filter->mac_index == 0) 1784 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1785 else 1786 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1787 } else { 1788 bnxt_free_filter(bp, filter); 1789 } 1790 1791 return rc; 1792 } 1793 1794 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1795 struct rte_ether_addr *mac_addr, 1796 uint32_t index, uint32_t pool) 1797 { 1798 struct bnxt *bp = eth_dev->data->dev_private; 1799 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1800 int rc = 0; 1801 1802 rc = is_bnxt_in_error(bp); 1803 if (rc) 1804 return rc; 1805 1806 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1807 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1808 return -ENOTSUP; 1809 } 1810 1811 if (!vnic) { 1812 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1813 return -EINVAL; 1814 } 1815 1816 /* Filter settings will get applied when port is started */ 1817 if (!eth_dev->data->dev_started) 1818 return 0; 1819 1820 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1821 1822 return rc; 1823 } 1824 1825 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1826 { 1827 int rc = 0; 1828 struct bnxt *bp = eth_dev->data->dev_private; 1829 struct rte_eth_link new; 1830 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1831 BNXT_MIN_LINK_WAIT_CNT; 1832 1833 rc = is_bnxt_in_error(bp); 1834 if (rc) 1835 return rc; 1836 1837 memset(&new, 0, sizeof(new)); 1838 1839 if (bp->link_info == NULL) 1840 goto out; 1841 1842 do { 1843 /* Retrieve link info from hardware */ 1844 rc = bnxt_get_hwrm_link_config(bp, &new); 1845 if (rc) { 1846 new.link_speed = ETH_LINK_SPEED_100M; 1847 new.link_duplex = ETH_LINK_FULL_DUPLEX; 1848 PMD_DRV_LOG(ERR, 1849 "Failed to retrieve link rc = 0x%x!\n", rc); 1850 goto out; 1851 } 1852 1853 if (!wait_to_complete || new.link_status) 1854 break; 1855 1856 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1857 } while (cnt--); 1858 1859 /* Only single function PF can bring phy down. 1860 * When port is stopped, report link down for VF/MH/NPAR functions. 1861 */ 1862 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1863 memset(&new, 0, sizeof(new)); 1864 1865 out: 1866 /* Timed out or success */ 1867 if (new.link_status != eth_dev->data->dev_link.link_status || 1868 new.link_speed != eth_dev->data->dev_link.link_speed) { 1869 rte_eth_linkstatus_set(eth_dev, &new); 1870 bnxt_print_link_info(eth_dev); 1871 } 1872 1873 return rc; 1874 } 1875 1876 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1877 { 1878 struct bnxt *bp = eth_dev->data->dev_private; 1879 struct bnxt_vnic_info *vnic; 1880 uint32_t old_flags; 1881 int rc; 1882 1883 rc = is_bnxt_in_error(bp); 1884 if (rc) 1885 return rc; 1886 1887 /* Filter settings will get applied when port is started */ 1888 if (!eth_dev->data->dev_started) 1889 return 0; 1890 1891 if (bp->vnic_info == NULL) 1892 return 0; 1893 1894 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1895 1896 old_flags = vnic->flags; 1897 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1898 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1899 if (rc != 0) 1900 vnic->flags = old_flags; 1901 1902 return rc; 1903 } 1904 1905 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1906 { 1907 struct bnxt *bp = eth_dev->data->dev_private; 1908 struct bnxt_vnic_info *vnic; 1909 uint32_t old_flags; 1910 int rc; 1911 1912 rc = is_bnxt_in_error(bp); 1913 if (rc) 1914 return rc; 1915 1916 /* Filter settings will get applied when port is started */ 1917 if (!eth_dev->data->dev_started) 1918 return 0; 1919 1920 if (bp->vnic_info == NULL) 1921 return 0; 1922 1923 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1924 1925 old_flags = vnic->flags; 1926 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1927 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1928 if (rc != 0) 1929 vnic->flags = old_flags; 1930 1931 return rc; 1932 } 1933 1934 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1935 { 1936 struct bnxt *bp = eth_dev->data->dev_private; 1937 struct bnxt_vnic_info *vnic; 1938 uint32_t old_flags; 1939 int rc; 1940 1941 rc = is_bnxt_in_error(bp); 1942 if (rc) 1943 return rc; 1944 1945 /* Filter settings will get applied when port is started */ 1946 if (!eth_dev->data->dev_started) 1947 return 0; 1948 1949 if (bp->vnic_info == NULL) 1950 return 0; 1951 1952 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1953 1954 old_flags = vnic->flags; 1955 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1956 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1957 if (rc != 0) 1958 vnic->flags = old_flags; 1959 1960 return rc; 1961 } 1962 1963 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1964 { 1965 struct bnxt *bp = eth_dev->data->dev_private; 1966 struct bnxt_vnic_info *vnic; 1967 uint32_t old_flags; 1968 int rc; 1969 1970 rc = is_bnxt_in_error(bp); 1971 if (rc) 1972 return rc; 1973 1974 /* Filter settings will get applied when port is started */ 1975 if (!eth_dev->data->dev_started) 1976 return 0; 1977 1978 if (bp->vnic_info == NULL) 1979 return 0; 1980 1981 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1982 1983 old_flags = vnic->flags; 1984 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1985 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1986 if (rc != 0) 1987 vnic->flags = old_flags; 1988 1989 return rc; 1990 } 1991 1992 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1993 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1994 { 1995 if (qid >= bp->rx_nr_rings) 1996 return NULL; 1997 1998 return bp->eth_dev->data->rx_queues[qid]; 1999 } 2000 2001 /* Return rxq corresponding to a given rss table ring/group ID. */ 2002 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 2003 { 2004 struct bnxt_rx_queue *rxq; 2005 unsigned int i; 2006 2007 if (!BNXT_HAS_RING_GRPS(bp)) { 2008 for (i = 0; i < bp->rx_nr_rings; i++) { 2009 rxq = bp->eth_dev->data->rx_queues[i]; 2010 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 2011 return rxq->index; 2012 } 2013 } else { 2014 for (i = 0; i < bp->rx_nr_rings; i++) { 2015 if (bp->grp_info[i].fw_grp_id == fwr) 2016 return i; 2017 } 2018 } 2019 2020 return INVALID_HW_RING_ID; 2021 } 2022 2023 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2024 struct rte_eth_rss_reta_entry64 *reta_conf, 2025 uint16_t reta_size) 2026 { 2027 struct bnxt *bp = eth_dev->data->dev_private; 2028 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2029 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2030 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2031 uint16_t idx, sft; 2032 int i, rc; 2033 2034 rc = is_bnxt_in_error(bp); 2035 if (rc) 2036 return rc; 2037 2038 if (!vnic->rss_table) 2039 return -EINVAL; 2040 2041 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 2042 return -EINVAL; 2043 2044 if (reta_size != tbl_size) { 2045 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2046 "(%d) must equal the size supported by the hardware " 2047 "(%d)\n", reta_size, tbl_size); 2048 return -EINVAL; 2049 } 2050 2051 for (i = 0; i < reta_size; i++) { 2052 struct bnxt_rx_queue *rxq; 2053 2054 idx = i / RTE_RETA_GROUP_SIZE; 2055 sft = i % RTE_RETA_GROUP_SIZE; 2056 2057 if (!(reta_conf[idx].mask & (1ULL << sft))) 2058 continue; 2059 2060 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2061 if (!rxq) { 2062 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 2063 return -EINVAL; 2064 } 2065 2066 if (BNXT_CHIP_P5(bp)) { 2067 vnic->rss_table[i * 2] = 2068 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2069 vnic->rss_table[i * 2 + 1] = 2070 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2071 } else { 2072 vnic->rss_table[i] = 2073 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2074 } 2075 } 2076 2077 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2078 return rc; 2079 } 2080 2081 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2082 struct rte_eth_rss_reta_entry64 *reta_conf, 2083 uint16_t reta_size) 2084 { 2085 struct bnxt *bp = eth_dev->data->dev_private; 2086 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2087 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2088 uint16_t idx, sft, i; 2089 int rc; 2090 2091 rc = is_bnxt_in_error(bp); 2092 if (rc) 2093 return rc; 2094 2095 if (!vnic) 2096 return -EINVAL; 2097 if (!vnic->rss_table) 2098 return -EINVAL; 2099 2100 if (reta_size != tbl_size) { 2101 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2102 "(%d) must equal the size supported by the hardware " 2103 "(%d)\n", reta_size, tbl_size); 2104 return -EINVAL; 2105 } 2106 2107 for (idx = 0, i = 0; i < reta_size; i++) { 2108 idx = i / RTE_RETA_GROUP_SIZE; 2109 sft = i % RTE_RETA_GROUP_SIZE; 2110 2111 if (reta_conf[idx].mask & (1ULL << sft)) { 2112 uint16_t qid; 2113 2114 if (BNXT_CHIP_P5(bp)) 2115 qid = bnxt_rss_to_qid(bp, 2116 vnic->rss_table[i * 2]); 2117 else 2118 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2119 2120 if (qid == INVALID_HW_RING_ID) { 2121 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2122 return -EINVAL; 2123 } 2124 reta_conf[idx].reta[sft] = qid; 2125 } 2126 } 2127 2128 return 0; 2129 } 2130 2131 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2132 struct rte_eth_rss_conf *rss_conf) 2133 { 2134 struct bnxt *bp = eth_dev->data->dev_private; 2135 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2136 struct bnxt_vnic_info *vnic; 2137 int rc; 2138 2139 rc = is_bnxt_in_error(bp); 2140 if (rc) 2141 return rc; 2142 2143 /* 2144 * If RSS enablement were different than dev_configure, 2145 * then return -EINVAL 2146 */ 2147 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 2148 if (!rss_conf->rss_hf) 2149 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2150 } else { 2151 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2152 return -EINVAL; 2153 } 2154 2155 bp->flags |= BNXT_FLAG_UPDATE_HASH; 2156 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 2157 rss_conf, 2158 sizeof(*rss_conf)); 2159 2160 /* Update the default RSS VNIC(s) */ 2161 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2162 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2163 vnic->hash_mode = 2164 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2165 ETH_RSS_LEVEL(rss_conf->rss_hf)); 2166 2167 /* 2168 * If hashkey is not specified, use the previously configured 2169 * hashkey 2170 */ 2171 if (!rss_conf->rss_key) 2172 goto rss_config; 2173 2174 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2175 PMD_DRV_LOG(ERR, 2176 "Invalid hashkey length, should be %d bytes\n", 2177 HW_HASH_KEY_SIZE); 2178 return -EINVAL; 2179 } 2180 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2181 2182 rss_config: 2183 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2184 return rc; 2185 } 2186 2187 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2188 struct rte_eth_rss_conf *rss_conf) 2189 { 2190 struct bnxt *bp = eth_dev->data->dev_private; 2191 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2192 int len, rc; 2193 uint32_t hash_types; 2194 2195 rc = is_bnxt_in_error(bp); 2196 if (rc) 2197 return rc; 2198 2199 /* RSS configuration is the same for all VNICs */ 2200 if (vnic && vnic->rss_hash_key) { 2201 if (rss_conf->rss_key) { 2202 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2203 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2204 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2205 } 2206 2207 hash_types = vnic->hash_type; 2208 rss_conf->rss_hf = 0; 2209 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2210 rss_conf->rss_hf |= ETH_RSS_IPV4; 2211 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2212 } 2213 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2214 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 2215 hash_types &= 2216 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2217 } 2218 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2219 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 2220 hash_types &= 2221 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2222 } 2223 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2224 rss_conf->rss_hf |= ETH_RSS_IPV6; 2225 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2226 } 2227 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2228 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 2229 hash_types &= 2230 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2231 } 2232 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2233 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 2234 hash_types &= 2235 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2236 } 2237 2238 rss_conf->rss_hf |= 2239 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2240 2241 if (hash_types) { 2242 PMD_DRV_LOG(ERR, 2243 "Unknown RSS config from firmware (%08x), RSS disabled", 2244 vnic->hash_type); 2245 return -ENOTSUP; 2246 } 2247 } else { 2248 rss_conf->rss_hf = 0; 2249 } 2250 return 0; 2251 } 2252 2253 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2254 struct rte_eth_fc_conf *fc_conf) 2255 { 2256 struct bnxt *bp = dev->data->dev_private; 2257 struct rte_eth_link link_info; 2258 int rc; 2259 2260 rc = is_bnxt_in_error(bp); 2261 if (rc) 2262 return rc; 2263 2264 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2265 if (rc) 2266 return rc; 2267 2268 memset(fc_conf, 0, sizeof(*fc_conf)); 2269 if (bp->link_info->auto_pause) 2270 fc_conf->autoneg = 1; 2271 switch (bp->link_info->pause) { 2272 case 0: 2273 fc_conf->mode = RTE_FC_NONE; 2274 break; 2275 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2276 fc_conf->mode = RTE_FC_TX_PAUSE; 2277 break; 2278 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2279 fc_conf->mode = RTE_FC_RX_PAUSE; 2280 break; 2281 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2282 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2283 fc_conf->mode = RTE_FC_FULL; 2284 break; 2285 } 2286 return 0; 2287 } 2288 2289 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2290 struct rte_eth_fc_conf *fc_conf) 2291 { 2292 struct bnxt *bp = dev->data->dev_private; 2293 int rc; 2294 2295 rc = is_bnxt_in_error(bp); 2296 if (rc) 2297 return rc; 2298 2299 if (!BNXT_SINGLE_PF(bp)) { 2300 PMD_DRV_LOG(ERR, 2301 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2302 return -ENOTSUP; 2303 } 2304 2305 switch (fc_conf->mode) { 2306 case RTE_FC_NONE: 2307 bp->link_info->auto_pause = 0; 2308 bp->link_info->force_pause = 0; 2309 break; 2310 case RTE_FC_RX_PAUSE: 2311 if (fc_conf->autoneg) { 2312 bp->link_info->auto_pause = 2313 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2314 bp->link_info->force_pause = 0; 2315 } else { 2316 bp->link_info->auto_pause = 0; 2317 bp->link_info->force_pause = 2318 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2319 } 2320 break; 2321 case RTE_FC_TX_PAUSE: 2322 if (fc_conf->autoneg) { 2323 bp->link_info->auto_pause = 2324 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2325 bp->link_info->force_pause = 0; 2326 } else { 2327 bp->link_info->auto_pause = 0; 2328 bp->link_info->force_pause = 2329 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2330 } 2331 break; 2332 case RTE_FC_FULL: 2333 if (fc_conf->autoneg) { 2334 bp->link_info->auto_pause = 2335 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2336 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2337 bp->link_info->force_pause = 0; 2338 } else { 2339 bp->link_info->auto_pause = 0; 2340 bp->link_info->force_pause = 2341 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2342 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2343 } 2344 break; 2345 } 2346 return bnxt_set_hwrm_link_config(bp, true); 2347 } 2348 2349 /* Add UDP tunneling port */ 2350 static int 2351 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2352 struct rte_eth_udp_tunnel *udp_tunnel) 2353 { 2354 struct bnxt *bp = eth_dev->data->dev_private; 2355 uint16_t tunnel_type = 0; 2356 int rc = 0; 2357 2358 rc = is_bnxt_in_error(bp); 2359 if (rc) 2360 return rc; 2361 2362 switch (udp_tunnel->prot_type) { 2363 case RTE_TUNNEL_TYPE_VXLAN: 2364 if (bp->vxlan_port_cnt) { 2365 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2366 udp_tunnel->udp_port); 2367 if (bp->vxlan_port != udp_tunnel->udp_port) { 2368 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2369 return -ENOSPC; 2370 } 2371 bp->vxlan_port_cnt++; 2372 return 0; 2373 } 2374 tunnel_type = 2375 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2376 break; 2377 case RTE_TUNNEL_TYPE_GENEVE: 2378 if (bp->geneve_port_cnt) { 2379 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2380 udp_tunnel->udp_port); 2381 if (bp->geneve_port != udp_tunnel->udp_port) { 2382 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2383 return -ENOSPC; 2384 } 2385 bp->geneve_port_cnt++; 2386 return 0; 2387 } 2388 tunnel_type = 2389 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2390 break; 2391 default: 2392 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2393 return -ENOTSUP; 2394 } 2395 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2396 tunnel_type); 2397 2398 if (rc != 0) 2399 return rc; 2400 2401 if (tunnel_type == 2402 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2403 bp->vxlan_port_cnt++; 2404 2405 if (tunnel_type == 2406 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2407 bp->geneve_port_cnt++; 2408 2409 return rc; 2410 } 2411 2412 static int 2413 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2414 struct rte_eth_udp_tunnel *udp_tunnel) 2415 { 2416 struct bnxt *bp = eth_dev->data->dev_private; 2417 uint16_t tunnel_type = 0; 2418 uint16_t port = 0; 2419 int rc = 0; 2420 2421 rc = is_bnxt_in_error(bp); 2422 if (rc) 2423 return rc; 2424 2425 switch (udp_tunnel->prot_type) { 2426 case RTE_TUNNEL_TYPE_VXLAN: 2427 if (!bp->vxlan_port_cnt) { 2428 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2429 return -EINVAL; 2430 } 2431 if (bp->vxlan_port != udp_tunnel->udp_port) { 2432 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2433 udp_tunnel->udp_port, bp->vxlan_port); 2434 return -EINVAL; 2435 } 2436 if (--bp->vxlan_port_cnt) 2437 return 0; 2438 2439 tunnel_type = 2440 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2441 port = bp->vxlan_fw_dst_port_id; 2442 break; 2443 case RTE_TUNNEL_TYPE_GENEVE: 2444 if (!bp->geneve_port_cnt) { 2445 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2446 return -EINVAL; 2447 } 2448 if (bp->geneve_port != udp_tunnel->udp_port) { 2449 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2450 udp_tunnel->udp_port, bp->geneve_port); 2451 return -EINVAL; 2452 } 2453 if (--bp->geneve_port_cnt) 2454 return 0; 2455 2456 tunnel_type = 2457 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2458 port = bp->geneve_fw_dst_port_id; 2459 break; 2460 default: 2461 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2462 return -ENOTSUP; 2463 } 2464 2465 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2466 return rc; 2467 } 2468 2469 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2470 { 2471 struct bnxt_filter_info *filter; 2472 struct bnxt_vnic_info *vnic; 2473 int rc = 0; 2474 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2475 2476 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2477 filter = STAILQ_FIRST(&vnic->filter); 2478 while (filter) { 2479 /* Search for this matching MAC+VLAN filter */ 2480 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2481 /* Delete the filter */ 2482 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2483 if (rc) 2484 return rc; 2485 STAILQ_REMOVE(&vnic->filter, filter, 2486 bnxt_filter_info, next); 2487 bnxt_free_filter(bp, filter); 2488 PMD_DRV_LOG(INFO, 2489 "Deleted vlan filter for %d\n", 2490 vlan_id); 2491 return 0; 2492 } 2493 filter = STAILQ_NEXT(filter, next); 2494 } 2495 return -ENOENT; 2496 } 2497 2498 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2499 { 2500 struct bnxt_filter_info *filter; 2501 struct bnxt_vnic_info *vnic; 2502 int rc = 0; 2503 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2504 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2505 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2506 2507 /* Implementation notes on the use of VNIC in this command: 2508 * 2509 * By default, these filters belong to default vnic for the function. 2510 * Once these filters are set up, only destination VNIC can be modified. 2511 * If the destination VNIC is not specified in this command, 2512 * then the HWRM shall only create an l2 context id. 2513 */ 2514 2515 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2516 filter = STAILQ_FIRST(&vnic->filter); 2517 /* Check if the VLAN has already been added */ 2518 while (filter) { 2519 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2520 return -EEXIST; 2521 2522 filter = STAILQ_NEXT(filter, next); 2523 } 2524 2525 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2526 * command to create MAC+VLAN filter with the right flags, enables set. 2527 */ 2528 filter = bnxt_alloc_filter(bp); 2529 if (!filter) { 2530 PMD_DRV_LOG(ERR, 2531 "MAC/VLAN filter alloc failed\n"); 2532 return -ENOMEM; 2533 } 2534 /* MAC + VLAN ID filter */ 2535 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2536 * untagged packets are received 2537 * 2538 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2539 * packets and only the programmed vlan's packets are received 2540 */ 2541 filter->l2_ivlan = vlan_id; 2542 filter->l2_ivlan_mask = 0x0FFF; 2543 filter->enables |= en; 2544 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2545 2546 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2547 if (rc) { 2548 /* Free the newly allocated filter as we were 2549 * not able to create the filter in hardware. 2550 */ 2551 bnxt_free_filter(bp, filter); 2552 return rc; 2553 } 2554 2555 filter->mac_index = 0; 2556 /* Add this new filter to the list */ 2557 if (vlan_id == 0) 2558 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2559 else 2560 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2561 2562 PMD_DRV_LOG(INFO, 2563 "Added Vlan filter for %d\n", vlan_id); 2564 return rc; 2565 } 2566 2567 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2568 uint16_t vlan_id, int on) 2569 { 2570 struct bnxt *bp = eth_dev->data->dev_private; 2571 int rc; 2572 2573 rc = is_bnxt_in_error(bp); 2574 if (rc) 2575 return rc; 2576 2577 if (!eth_dev->data->dev_started) { 2578 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2579 return -EINVAL; 2580 } 2581 2582 /* These operations apply to ALL existing MAC/VLAN filters */ 2583 if (on) 2584 return bnxt_add_vlan_filter(bp, vlan_id); 2585 else 2586 return bnxt_del_vlan_filter(bp, vlan_id); 2587 } 2588 2589 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2590 struct bnxt_vnic_info *vnic) 2591 { 2592 struct bnxt_filter_info *filter; 2593 int rc; 2594 2595 filter = STAILQ_FIRST(&vnic->filter); 2596 while (filter) { 2597 if (filter->mac_index == 0 && 2598 !memcmp(filter->l2_addr, bp->mac_addr, 2599 RTE_ETHER_ADDR_LEN)) { 2600 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2601 if (!rc) { 2602 STAILQ_REMOVE(&vnic->filter, filter, 2603 bnxt_filter_info, next); 2604 bnxt_free_filter(bp, filter); 2605 } 2606 return rc; 2607 } 2608 filter = STAILQ_NEXT(filter, next); 2609 } 2610 return 0; 2611 } 2612 2613 static int 2614 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2615 { 2616 struct bnxt_vnic_info *vnic; 2617 unsigned int i; 2618 int rc; 2619 2620 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2621 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 2622 /* Remove any VLAN filters programmed */ 2623 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2624 bnxt_del_vlan_filter(bp, i); 2625 2626 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2627 if (rc) 2628 return rc; 2629 } else { 2630 /* Default filter will allow packets that match the 2631 * dest mac. So, it has to be deleted, otherwise, we 2632 * will endup receiving vlan packets for which the 2633 * filter is not programmed, when hw-vlan-filter 2634 * configuration is ON 2635 */ 2636 bnxt_del_dflt_mac_filter(bp, vnic); 2637 /* This filter will allow only untagged packets */ 2638 bnxt_add_vlan_filter(bp, 0); 2639 } 2640 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2641 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 2642 2643 return 0; 2644 } 2645 2646 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2647 { 2648 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2649 unsigned int i; 2650 int rc; 2651 2652 /* Destroy vnic filters and vnic */ 2653 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2654 DEV_RX_OFFLOAD_VLAN_FILTER) { 2655 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2656 bnxt_del_vlan_filter(bp, i); 2657 } 2658 bnxt_del_dflt_mac_filter(bp, vnic); 2659 2660 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2661 if (rc) 2662 return rc; 2663 2664 rc = bnxt_hwrm_vnic_free(bp, vnic); 2665 if (rc) 2666 return rc; 2667 2668 rte_free(vnic->fw_grp_ids); 2669 vnic->fw_grp_ids = NULL; 2670 2671 vnic->rx_queue_cnt = 0; 2672 2673 return 0; 2674 } 2675 2676 static int 2677 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2678 { 2679 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2680 int rc; 2681 2682 /* Destroy, recreate and reconfigure the default vnic */ 2683 rc = bnxt_free_one_vnic(bp, 0); 2684 if (rc) 2685 return rc; 2686 2687 /* default vnic 0 */ 2688 rc = bnxt_setup_one_vnic(bp, 0); 2689 if (rc) 2690 return rc; 2691 2692 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2693 DEV_RX_OFFLOAD_VLAN_FILTER) { 2694 rc = bnxt_add_vlan_filter(bp, 0); 2695 if (rc) 2696 return rc; 2697 rc = bnxt_restore_vlan_filters(bp); 2698 if (rc) 2699 return rc; 2700 } else { 2701 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2702 if (rc) 2703 return rc; 2704 } 2705 2706 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2707 if (rc) 2708 return rc; 2709 2710 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2711 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 2712 2713 return rc; 2714 } 2715 2716 static int 2717 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2718 { 2719 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2720 struct bnxt *bp = dev->data->dev_private; 2721 int rc; 2722 2723 rc = is_bnxt_in_error(bp); 2724 if (rc) 2725 return rc; 2726 2727 /* Filter settings will get applied when port is started */ 2728 if (!dev->data->dev_started) 2729 return 0; 2730 2731 if (mask & ETH_VLAN_FILTER_MASK) { 2732 /* Enable or disable VLAN filtering */ 2733 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2734 if (rc) 2735 return rc; 2736 } 2737 2738 if (mask & ETH_VLAN_STRIP_MASK) { 2739 /* Enable or disable VLAN stripping */ 2740 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2741 if (rc) 2742 return rc; 2743 } 2744 2745 if (mask & ETH_VLAN_EXTEND_MASK) { 2746 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2747 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2748 else 2749 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2750 } 2751 2752 return 0; 2753 } 2754 2755 static int 2756 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2757 uint16_t tpid) 2758 { 2759 struct bnxt *bp = dev->data->dev_private; 2760 int qinq = dev->data->dev_conf.rxmode.offloads & 2761 DEV_RX_OFFLOAD_VLAN_EXTEND; 2762 2763 if (vlan_type != ETH_VLAN_TYPE_INNER && 2764 vlan_type != ETH_VLAN_TYPE_OUTER) { 2765 PMD_DRV_LOG(ERR, 2766 "Unsupported vlan type."); 2767 return -EINVAL; 2768 } 2769 if (!qinq) { 2770 PMD_DRV_LOG(ERR, 2771 "QinQ not enabled. Needs to be ON as we can " 2772 "accelerate only outer vlan\n"); 2773 return -EINVAL; 2774 } 2775 2776 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2777 switch (tpid) { 2778 case RTE_ETHER_TYPE_QINQ: 2779 bp->outer_tpid_bd = 2780 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2781 break; 2782 case RTE_ETHER_TYPE_VLAN: 2783 bp->outer_tpid_bd = 2784 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2785 break; 2786 case RTE_ETHER_TYPE_QINQ1: 2787 bp->outer_tpid_bd = 2788 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2789 break; 2790 case RTE_ETHER_TYPE_QINQ2: 2791 bp->outer_tpid_bd = 2792 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2793 break; 2794 case RTE_ETHER_TYPE_QINQ3: 2795 bp->outer_tpid_bd = 2796 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2797 break; 2798 default: 2799 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2800 return -EINVAL; 2801 } 2802 bp->outer_tpid_bd |= tpid; 2803 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2804 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 2805 PMD_DRV_LOG(ERR, 2806 "Can accelerate only outer vlan in QinQ\n"); 2807 return -EINVAL; 2808 } 2809 2810 return 0; 2811 } 2812 2813 static int 2814 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2815 struct rte_ether_addr *addr) 2816 { 2817 struct bnxt *bp = dev->data->dev_private; 2818 /* Default Filter is tied to VNIC 0 */ 2819 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2820 int rc; 2821 2822 rc = is_bnxt_in_error(bp); 2823 if (rc) 2824 return rc; 2825 2826 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2827 return -EPERM; 2828 2829 if (rte_is_zero_ether_addr(addr)) 2830 return -EINVAL; 2831 2832 /* Filter settings will get applied when port is started */ 2833 if (!dev->data->dev_started) 2834 return 0; 2835 2836 /* Check if the requested MAC is already added */ 2837 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2838 return 0; 2839 2840 /* Destroy filter and re-create it */ 2841 bnxt_del_dflt_mac_filter(bp, vnic); 2842 2843 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2844 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 2845 /* This filter will allow only untagged packets */ 2846 rc = bnxt_add_vlan_filter(bp, 0); 2847 } else { 2848 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2849 } 2850 2851 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2852 return rc; 2853 } 2854 2855 static int 2856 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2857 struct rte_ether_addr *mc_addr_set, 2858 uint32_t nb_mc_addr) 2859 { 2860 struct bnxt *bp = eth_dev->data->dev_private; 2861 char *mc_addr_list = (char *)mc_addr_set; 2862 struct bnxt_vnic_info *vnic; 2863 uint32_t off = 0, i = 0; 2864 int rc; 2865 2866 rc = is_bnxt_in_error(bp); 2867 if (rc) 2868 return rc; 2869 2870 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2871 2872 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2873 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2874 goto allmulti; 2875 } 2876 2877 /* TODO Check for Duplicate mcast addresses */ 2878 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2879 for (i = 0; i < nb_mc_addr; i++) { 2880 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2881 RTE_ETHER_ADDR_LEN); 2882 off += RTE_ETHER_ADDR_LEN; 2883 } 2884 2885 vnic->mc_addr_cnt = i; 2886 if (vnic->mc_addr_cnt) 2887 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2888 else 2889 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2890 2891 allmulti: 2892 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2893 } 2894 2895 static int 2896 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2897 { 2898 struct bnxt *bp = dev->data->dev_private; 2899 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2900 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2901 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2902 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2903 int ret; 2904 2905 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2906 fw_major, fw_minor, fw_updt, fw_rsvd); 2907 if (ret < 0) 2908 return -EINVAL; 2909 2910 ret += 1; /* add the size of '\0' */ 2911 if (fw_size < (size_t)ret) 2912 return ret; 2913 else 2914 return 0; 2915 } 2916 2917 static void 2918 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2919 struct rte_eth_rxq_info *qinfo) 2920 { 2921 struct bnxt *bp = dev->data->dev_private; 2922 struct bnxt_rx_queue *rxq; 2923 2924 if (is_bnxt_in_error(bp)) 2925 return; 2926 2927 rxq = dev->data->rx_queues[queue_id]; 2928 2929 qinfo->mp = rxq->mb_pool; 2930 qinfo->scattered_rx = dev->data->scattered_rx; 2931 qinfo->nb_desc = rxq->nb_rx_desc; 2932 2933 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2934 qinfo->conf.rx_drop_en = rxq->drop_en; 2935 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2936 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2937 } 2938 2939 static void 2940 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2941 struct rte_eth_txq_info *qinfo) 2942 { 2943 struct bnxt *bp = dev->data->dev_private; 2944 struct bnxt_tx_queue *txq; 2945 2946 if (is_bnxt_in_error(bp)) 2947 return; 2948 2949 txq = dev->data->tx_queues[queue_id]; 2950 2951 qinfo->nb_desc = txq->nb_tx_desc; 2952 2953 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2954 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2955 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2956 2957 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2958 qinfo->conf.tx_rs_thresh = 0; 2959 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2960 qinfo->conf.offloads = txq->offloads; 2961 } 2962 2963 static const struct { 2964 eth_rx_burst_t pkt_burst; 2965 const char *info; 2966 } bnxt_rx_burst_info[] = { 2967 {bnxt_recv_pkts, "Scalar"}, 2968 #if defined(RTE_ARCH_X86) 2969 {bnxt_recv_pkts_vec, "Vector SSE"}, 2970 #endif 2971 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2972 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 2973 #endif 2974 #if defined(RTE_ARCH_ARM64) 2975 {bnxt_recv_pkts_vec, "Vector Neon"}, 2976 #endif 2977 }; 2978 2979 static int 2980 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2981 struct rte_eth_burst_mode *mode) 2982 { 2983 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2984 size_t i; 2985 2986 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2987 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2988 snprintf(mode->info, sizeof(mode->info), "%s", 2989 bnxt_rx_burst_info[i].info); 2990 return 0; 2991 } 2992 } 2993 2994 return -EINVAL; 2995 } 2996 2997 static const struct { 2998 eth_tx_burst_t pkt_burst; 2999 const char *info; 3000 } bnxt_tx_burst_info[] = { 3001 {bnxt_xmit_pkts, "Scalar"}, 3002 #if defined(RTE_ARCH_X86) 3003 {bnxt_xmit_pkts_vec, "Vector SSE"}, 3004 #endif 3005 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 3006 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 3007 #endif 3008 #if defined(RTE_ARCH_ARM64) 3009 {bnxt_xmit_pkts_vec, "Vector Neon"}, 3010 #endif 3011 }; 3012 3013 static int 3014 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3015 struct rte_eth_burst_mode *mode) 3016 { 3017 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 3018 size_t i; 3019 3020 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 3021 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 3022 snprintf(mode->info, sizeof(mode->info), "%s", 3023 bnxt_tx_burst_info[i].info); 3024 return 0; 3025 } 3026 } 3027 3028 return -EINVAL; 3029 } 3030 3031 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3032 { 3033 struct bnxt *bp = eth_dev->data->dev_private; 3034 uint32_t new_pkt_size; 3035 uint32_t rc = 0; 3036 uint32_t i; 3037 3038 rc = is_bnxt_in_error(bp); 3039 if (rc) 3040 return rc; 3041 3042 /* Exit if receive queues are not configured yet */ 3043 if (!eth_dev->data->nb_rx_queues) 3044 return rc; 3045 3046 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 3047 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 3048 3049 /* 3050 * Disallow any MTU change that would require scattered receive support 3051 * if it is not already enabled. 3052 */ 3053 if (eth_dev->data->dev_started && 3054 !eth_dev->data->scattered_rx && 3055 (new_pkt_size > 3056 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3057 PMD_DRV_LOG(ERR, 3058 "MTU change would require scattered rx support. "); 3059 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 3060 return -EINVAL; 3061 } 3062 3063 if (new_mtu > RTE_ETHER_MTU) { 3064 bp->flags |= BNXT_FLAG_JUMBO; 3065 bp->eth_dev->data->dev_conf.rxmode.offloads |= 3066 DEV_RX_OFFLOAD_JUMBO_FRAME; 3067 } else { 3068 bp->eth_dev->data->dev_conf.rxmode.offloads &= 3069 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 3070 bp->flags &= ~BNXT_FLAG_JUMBO; 3071 } 3072 3073 /* Is there a change in mtu setting? */ 3074 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) 3075 return rc; 3076 3077 for (i = 0; i < bp->nr_vnics; i++) { 3078 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3079 uint16_t size = 0; 3080 3081 vnic->mru = BNXT_VNIC_MRU(new_mtu); 3082 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 3083 if (rc) 3084 break; 3085 3086 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 3087 size -= RTE_PKTMBUF_HEADROOM; 3088 3089 if (size < new_mtu) { 3090 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 3091 if (rc) 3092 return rc; 3093 } 3094 } 3095 3096 if (!rc) 3097 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 3098 3099 if (bnxt_hwrm_config_host_mtu(bp)) 3100 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3101 3102 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3103 3104 return rc; 3105 } 3106 3107 static int 3108 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3109 { 3110 struct bnxt *bp = dev->data->dev_private; 3111 uint16_t vlan = bp->vlan; 3112 int rc; 3113 3114 rc = is_bnxt_in_error(bp); 3115 if (rc) 3116 return rc; 3117 3118 if (!BNXT_SINGLE_PF(bp)) { 3119 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3120 return -ENOTSUP; 3121 } 3122 bp->vlan = on ? pvid : 0; 3123 3124 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3125 if (rc) 3126 bp->vlan = vlan; 3127 return rc; 3128 } 3129 3130 static int 3131 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3132 { 3133 struct bnxt *bp = dev->data->dev_private; 3134 int rc; 3135 3136 rc = is_bnxt_in_error(bp); 3137 if (rc) 3138 return rc; 3139 3140 return bnxt_hwrm_port_led_cfg(bp, true); 3141 } 3142 3143 static int 3144 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3145 { 3146 struct bnxt *bp = dev->data->dev_private; 3147 int rc; 3148 3149 rc = is_bnxt_in_error(bp); 3150 if (rc) 3151 return rc; 3152 3153 return bnxt_hwrm_port_led_cfg(bp, false); 3154 } 3155 3156 static uint32_t 3157 bnxt_rx_queue_count_op(void *rx_queue) 3158 { 3159 struct bnxt *bp; 3160 struct bnxt_cp_ring_info *cpr; 3161 uint32_t desc = 0, raw_cons, cp_ring_size; 3162 struct bnxt_rx_queue *rxq; 3163 struct rx_pkt_cmpl *rxcmp; 3164 int rc; 3165 3166 rxq = rx_queue; 3167 bp = rxq->bp; 3168 3169 rc = is_bnxt_in_error(bp); 3170 if (rc) 3171 return rc; 3172 3173 cpr = rxq->cp_ring; 3174 raw_cons = cpr->cp_raw_cons; 3175 cp_ring_size = cpr->cp_ring_struct->ring_size; 3176 3177 while (1) { 3178 uint32_t agg_cnt, cons, cmpl_type; 3179 3180 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3181 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3182 3183 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3184 break; 3185 3186 cmpl_type = CMP_TYPE(rxcmp); 3187 3188 switch (cmpl_type) { 3189 case CMPL_BASE_TYPE_RX_L2: 3190 case CMPL_BASE_TYPE_RX_L2_V2: 3191 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3192 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3193 desc++; 3194 break; 3195 3196 case CMPL_BASE_TYPE_RX_TPA_END: 3197 if (BNXT_CHIP_P5(rxq->bp)) { 3198 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3199 3200 p5_tpa_end = (void *)rxcmp; 3201 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3202 } else { 3203 struct rx_tpa_end_cmpl *tpa_end; 3204 3205 tpa_end = (void *)rxcmp; 3206 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3207 } 3208 3209 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3210 desc++; 3211 break; 3212 3213 default: 3214 raw_cons += CMP_LEN(cmpl_type); 3215 } 3216 } 3217 3218 return desc; 3219 } 3220 3221 static int 3222 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3223 { 3224 struct bnxt_rx_queue *rxq = rx_queue; 3225 struct bnxt_cp_ring_info *cpr; 3226 struct bnxt_rx_ring_info *rxr; 3227 uint32_t desc, raw_cons, cp_ring_size; 3228 struct bnxt *bp = rxq->bp; 3229 struct rx_pkt_cmpl *rxcmp; 3230 int rc; 3231 3232 rc = is_bnxt_in_error(bp); 3233 if (rc) 3234 return rc; 3235 3236 if (offset >= rxq->nb_rx_desc) 3237 return -EINVAL; 3238 3239 rxr = rxq->rx_ring; 3240 cpr = rxq->cp_ring; 3241 cp_ring_size = cpr->cp_ring_struct->ring_size; 3242 3243 /* 3244 * For the vector receive case, the completion at the requested 3245 * offset can be indexed directly. 3246 */ 3247 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3248 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3249 struct rx_pkt_cmpl *rxcmp; 3250 uint32_t cons; 3251 3252 /* Check status of completion descriptor. */ 3253 raw_cons = cpr->cp_raw_cons + 3254 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3255 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3256 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3257 3258 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3259 return RTE_ETH_RX_DESC_DONE; 3260 3261 /* Check whether rx desc has an mbuf attached. */ 3262 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3263 if (cons >= rxq->rxrearm_start && 3264 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3265 return RTE_ETH_RX_DESC_UNAVAIL; 3266 } 3267 3268 return RTE_ETH_RX_DESC_AVAIL; 3269 } 3270 #endif 3271 3272 /* 3273 * For the non-vector receive case, scan the completion ring to 3274 * locate the completion descriptor for the requested offset. 3275 */ 3276 raw_cons = cpr->cp_raw_cons; 3277 desc = 0; 3278 while (1) { 3279 uint32_t agg_cnt, cons, cmpl_type; 3280 3281 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3282 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3283 3284 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3285 break; 3286 3287 cmpl_type = CMP_TYPE(rxcmp); 3288 3289 switch (cmpl_type) { 3290 case CMPL_BASE_TYPE_RX_L2: 3291 case CMPL_BASE_TYPE_RX_L2_V2: 3292 if (desc == offset) { 3293 cons = rxcmp->opaque; 3294 if (rxr->rx_buf_ring[cons]) 3295 return RTE_ETH_RX_DESC_DONE; 3296 else 3297 return RTE_ETH_RX_DESC_UNAVAIL; 3298 } 3299 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3300 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3301 desc++; 3302 break; 3303 3304 case CMPL_BASE_TYPE_RX_TPA_END: 3305 if (desc == offset) 3306 return RTE_ETH_RX_DESC_DONE; 3307 3308 if (BNXT_CHIP_P5(rxq->bp)) { 3309 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3310 3311 p5_tpa_end = (void *)rxcmp; 3312 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3313 } else { 3314 struct rx_tpa_end_cmpl *tpa_end; 3315 3316 tpa_end = (void *)rxcmp; 3317 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3318 } 3319 3320 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3321 desc++; 3322 break; 3323 3324 default: 3325 raw_cons += CMP_LEN(cmpl_type); 3326 } 3327 } 3328 3329 return RTE_ETH_RX_DESC_AVAIL; 3330 } 3331 3332 static int 3333 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3334 { 3335 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3336 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3337 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3338 struct cmpl_base *cp_desc_ring; 3339 int rc; 3340 3341 rc = is_bnxt_in_error(txq->bp); 3342 if (rc) 3343 return rc; 3344 3345 if (offset >= txq->nb_tx_desc) 3346 return -EINVAL; 3347 3348 /* Return "desc done" if descriptor is available for use. */ 3349 if (bnxt_tx_bds_in_hw(txq) <= offset) 3350 return RTE_ETH_TX_DESC_DONE; 3351 3352 raw_cons = cpr->cp_raw_cons; 3353 cp_desc_ring = cpr->cp_desc_ring; 3354 ring_mask = cpr->cp_ring_struct->ring_mask; 3355 3356 /* Check to see if hw has posted a completion for the descriptor. */ 3357 while (1) { 3358 struct tx_cmpl *txcmp; 3359 uint32_t cons; 3360 3361 cons = RING_CMPL(ring_mask, raw_cons); 3362 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3363 3364 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3365 break; 3366 3367 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3368 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3369 3370 if (nb_tx_pkts > offset) 3371 return RTE_ETH_TX_DESC_DONE; 3372 3373 raw_cons = NEXT_RAW_CMP(raw_cons); 3374 } 3375 3376 /* Descriptor is pending transmit, not yet completed by hardware. */ 3377 return RTE_ETH_TX_DESC_FULL; 3378 } 3379 3380 int 3381 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3382 const struct rte_flow_ops **ops) 3383 { 3384 struct bnxt *bp = dev->data->dev_private; 3385 int ret = 0; 3386 3387 if (!bp) 3388 return -EIO; 3389 3390 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3391 struct bnxt_representor *vfr = dev->data->dev_private; 3392 bp = vfr->parent_dev->data->dev_private; 3393 /* parent is deleted while children are still valid */ 3394 if (!bp) { 3395 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3396 dev->data->port_id); 3397 return -EIO; 3398 } 3399 } 3400 3401 ret = is_bnxt_in_error(bp); 3402 if (ret) 3403 return ret; 3404 3405 /* PMD supports thread-safe flow operations. rte_flow API 3406 * functions can avoid mutex for multi-thread safety. 3407 */ 3408 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3409 3410 if (BNXT_TRUFLOW_EN(bp)) 3411 *ops = &bnxt_ulp_rte_flow_ops; 3412 else 3413 *ops = &bnxt_flow_ops; 3414 3415 return ret; 3416 } 3417 3418 static const uint32_t * 3419 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3420 { 3421 static const uint32_t ptypes[] = { 3422 RTE_PTYPE_L2_ETHER_VLAN, 3423 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3424 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3425 RTE_PTYPE_L4_ICMP, 3426 RTE_PTYPE_L4_TCP, 3427 RTE_PTYPE_L4_UDP, 3428 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3429 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3430 RTE_PTYPE_INNER_L4_ICMP, 3431 RTE_PTYPE_INNER_L4_TCP, 3432 RTE_PTYPE_INNER_L4_UDP, 3433 RTE_PTYPE_UNKNOWN 3434 }; 3435 3436 if (!dev->rx_pkt_burst) 3437 return NULL; 3438 3439 return ptypes; 3440 } 3441 3442 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3443 int reg_win) 3444 { 3445 uint32_t reg_base = *reg_arr & 0xfffff000; 3446 uint32_t win_off; 3447 int i; 3448 3449 for (i = 0; i < count; i++) { 3450 if ((reg_arr[i] & 0xfffff000) != reg_base) 3451 return -ERANGE; 3452 } 3453 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3454 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3455 return 0; 3456 } 3457 3458 static int bnxt_map_ptp_regs(struct bnxt *bp) 3459 { 3460 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3461 uint32_t *reg_arr; 3462 int rc, i; 3463 3464 reg_arr = ptp->rx_regs; 3465 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3466 if (rc) 3467 return rc; 3468 3469 reg_arr = ptp->tx_regs; 3470 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3471 if (rc) 3472 return rc; 3473 3474 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3475 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3476 3477 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3478 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3479 3480 return 0; 3481 } 3482 3483 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3484 { 3485 rte_write32(0, (uint8_t *)bp->bar0 + 3486 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3487 rte_write32(0, (uint8_t *)bp->bar0 + 3488 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3489 } 3490 3491 static uint64_t bnxt_cc_read(struct bnxt *bp) 3492 { 3493 uint64_t ns; 3494 3495 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3496 BNXT_GRCPF_REG_SYNC_TIME)); 3497 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3498 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3499 return ns; 3500 } 3501 3502 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3503 { 3504 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3505 uint32_t fifo; 3506 3507 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3508 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3509 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3510 return -EAGAIN; 3511 3512 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3513 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3514 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3515 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3516 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3517 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3518 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3519 3520 return 0; 3521 } 3522 3523 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3524 { 3525 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3526 struct bnxt_pf_info *pf = bp->pf; 3527 uint16_t port_id; 3528 int i = 0; 3529 uint32_t fifo; 3530 3531 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3532 return -EINVAL; 3533 3534 port_id = pf->port_id; 3535 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3536 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3537 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3538 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3539 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3540 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3541 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3542 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3543 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3544 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3545 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3546 i++; 3547 } 3548 3549 if (i >= BNXT_PTP_RX_PND_CNT) 3550 return -EBUSY; 3551 3552 return 0; 3553 } 3554 3555 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3556 { 3557 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3558 struct bnxt_pf_info *pf = bp->pf; 3559 uint16_t port_id; 3560 uint32_t fifo; 3561 3562 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3563 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3564 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3565 return -EAGAIN; 3566 3567 port_id = pf->port_id; 3568 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3569 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3570 3571 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3572 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3573 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3574 return bnxt_clr_rx_ts(bp, ts); 3575 3576 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3577 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3578 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3579 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3580 3581 return 0; 3582 } 3583 3584 static int 3585 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3586 { 3587 uint64_t ns; 3588 struct bnxt *bp = dev->data->dev_private; 3589 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3590 3591 if (!ptp) 3592 return -ENOTSUP; 3593 3594 ns = rte_timespec_to_ns(ts); 3595 /* Set the timecounters to a new value. */ 3596 ptp->tc.nsec = ns; 3597 ptp->tx_tstamp_tc.nsec = ns; 3598 ptp->rx_tstamp_tc.nsec = ns; 3599 3600 return 0; 3601 } 3602 3603 static int 3604 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3605 { 3606 struct bnxt *bp = dev->data->dev_private; 3607 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3608 uint64_t ns, systime_cycles = 0; 3609 int rc = 0; 3610 3611 if (!ptp) 3612 return -ENOTSUP; 3613 3614 if (BNXT_CHIP_P5(bp)) 3615 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3616 &systime_cycles); 3617 else 3618 systime_cycles = bnxt_cc_read(bp); 3619 3620 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3621 *ts = rte_ns_to_timespec(ns); 3622 3623 return rc; 3624 } 3625 static int 3626 bnxt_timesync_enable(struct rte_eth_dev *dev) 3627 { 3628 struct bnxt *bp = dev->data->dev_private; 3629 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3630 uint32_t shift = 0; 3631 int rc; 3632 3633 if (!ptp) 3634 return -ENOTSUP; 3635 3636 ptp->rx_filter = 1; 3637 ptp->tx_tstamp_en = 1; 3638 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3639 3640 rc = bnxt_hwrm_ptp_cfg(bp); 3641 if (rc) 3642 return rc; 3643 3644 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3645 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3646 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3647 3648 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3649 ptp->tc.cc_shift = shift; 3650 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3651 3652 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3653 ptp->rx_tstamp_tc.cc_shift = shift; 3654 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3655 3656 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3657 ptp->tx_tstamp_tc.cc_shift = shift; 3658 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3659 3660 if (!BNXT_CHIP_P5(bp)) 3661 bnxt_map_ptp_regs(bp); 3662 else 3663 rc = bnxt_ptp_start(bp); 3664 3665 return rc; 3666 } 3667 3668 static int 3669 bnxt_timesync_disable(struct rte_eth_dev *dev) 3670 { 3671 struct bnxt *bp = dev->data->dev_private; 3672 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3673 3674 if (!ptp) 3675 return -ENOTSUP; 3676 3677 ptp->rx_filter = 0; 3678 ptp->tx_tstamp_en = 0; 3679 ptp->rxctl = 0; 3680 3681 bnxt_hwrm_ptp_cfg(bp); 3682 3683 if (!BNXT_CHIP_P5(bp)) 3684 bnxt_unmap_ptp_regs(bp); 3685 else 3686 bnxt_ptp_stop(bp); 3687 3688 return 0; 3689 } 3690 3691 static int 3692 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3693 struct timespec *timestamp, 3694 uint32_t flags __rte_unused) 3695 { 3696 struct bnxt *bp = dev->data->dev_private; 3697 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3698 uint64_t rx_tstamp_cycles = 0; 3699 uint64_t ns; 3700 3701 if (!ptp) 3702 return -ENOTSUP; 3703 3704 if (BNXT_CHIP_P5(bp)) 3705 rx_tstamp_cycles = ptp->rx_timestamp; 3706 else 3707 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3708 3709 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3710 *timestamp = rte_ns_to_timespec(ns); 3711 return 0; 3712 } 3713 3714 static int 3715 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3716 struct timespec *timestamp) 3717 { 3718 struct bnxt *bp = dev->data->dev_private; 3719 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3720 uint64_t tx_tstamp_cycles = 0; 3721 uint64_t ns; 3722 int rc = 0; 3723 3724 if (!ptp) 3725 return -ENOTSUP; 3726 3727 if (BNXT_CHIP_P5(bp)) 3728 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3729 &tx_tstamp_cycles); 3730 else 3731 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3732 3733 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3734 *timestamp = rte_ns_to_timespec(ns); 3735 3736 return rc; 3737 } 3738 3739 static int 3740 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3741 { 3742 struct bnxt *bp = dev->data->dev_private; 3743 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3744 3745 if (!ptp) 3746 return -ENOTSUP; 3747 3748 ptp->tc.nsec += delta; 3749 ptp->tx_tstamp_tc.nsec += delta; 3750 ptp->rx_tstamp_tc.nsec += delta; 3751 3752 return 0; 3753 } 3754 3755 static int 3756 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3757 { 3758 struct bnxt *bp = dev->data->dev_private; 3759 int rc; 3760 uint32_t dir_entries; 3761 uint32_t entry_length; 3762 3763 rc = is_bnxt_in_error(bp); 3764 if (rc) 3765 return rc; 3766 3767 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3768 bp->pdev->addr.domain, bp->pdev->addr.bus, 3769 bp->pdev->addr.devid, bp->pdev->addr.function); 3770 3771 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3772 if (rc != 0) 3773 return rc; 3774 3775 return dir_entries * entry_length; 3776 } 3777 3778 static int 3779 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3780 struct rte_dev_eeprom_info *in_eeprom) 3781 { 3782 struct bnxt *bp = dev->data->dev_private; 3783 uint32_t index; 3784 uint32_t offset; 3785 int rc; 3786 3787 rc = is_bnxt_in_error(bp); 3788 if (rc) 3789 return rc; 3790 3791 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3792 bp->pdev->addr.domain, bp->pdev->addr.bus, 3793 bp->pdev->addr.devid, bp->pdev->addr.function, 3794 in_eeprom->offset, in_eeprom->length); 3795 3796 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3797 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3798 in_eeprom->data); 3799 3800 index = in_eeprom->offset >> 24; 3801 offset = in_eeprom->offset & 0xffffff; 3802 3803 if (index != 0) 3804 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3805 in_eeprom->length, in_eeprom->data); 3806 3807 return 0; 3808 } 3809 3810 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3811 { 3812 switch (dir_type) { 3813 case BNX_DIR_TYPE_CHIMP_PATCH: 3814 case BNX_DIR_TYPE_BOOTCODE: 3815 case BNX_DIR_TYPE_BOOTCODE_2: 3816 case BNX_DIR_TYPE_APE_FW: 3817 case BNX_DIR_TYPE_APE_PATCH: 3818 case BNX_DIR_TYPE_KONG_FW: 3819 case BNX_DIR_TYPE_KONG_PATCH: 3820 case BNX_DIR_TYPE_BONO_FW: 3821 case BNX_DIR_TYPE_BONO_PATCH: 3822 /* FALLTHROUGH */ 3823 return true; 3824 } 3825 3826 return false; 3827 } 3828 3829 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3830 { 3831 switch (dir_type) { 3832 case BNX_DIR_TYPE_AVS: 3833 case BNX_DIR_TYPE_EXP_ROM_MBA: 3834 case BNX_DIR_TYPE_PCIE: 3835 case BNX_DIR_TYPE_TSCF_UCODE: 3836 case BNX_DIR_TYPE_EXT_PHY: 3837 case BNX_DIR_TYPE_CCM: 3838 case BNX_DIR_TYPE_ISCSI_BOOT: 3839 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3840 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3841 /* FALLTHROUGH */ 3842 return true; 3843 } 3844 3845 return false; 3846 } 3847 3848 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3849 { 3850 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3851 bnxt_dir_type_is_other_exec_format(dir_type); 3852 } 3853 3854 static int 3855 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3856 struct rte_dev_eeprom_info *in_eeprom) 3857 { 3858 struct bnxt *bp = dev->data->dev_private; 3859 uint8_t index, dir_op; 3860 uint16_t type, ext, ordinal, attr; 3861 int rc; 3862 3863 rc = is_bnxt_in_error(bp); 3864 if (rc) 3865 return rc; 3866 3867 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3868 bp->pdev->addr.domain, bp->pdev->addr.bus, 3869 bp->pdev->addr.devid, bp->pdev->addr.function, 3870 in_eeprom->offset, in_eeprom->length); 3871 3872 if (!BNXT_PF(bp)) { 3873 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3874 return -EINVAL; 3875 } 3876 3877 type = in_eeprom->magic >> 16; 3878 3879 if (type == 0xffff) { /* special value for directory operations */ 3880 index = in_eeprom->magic & 0xff; 3881 dir_op = in_eeprom->magic >> 8; 3882 if (index == 0) 3883 return -EINVAL; 3884 switch (dir_op) { 3885 case 0x0e: /* erase */ 3886 if (in_eeprom->offset != ~in_eeprom->magic) 3887 return -EINVAL; 3888 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3889 default: 3890 return -EINVAL; 3891 } 3892 } 3893 3894 /* Create or re-write an NVM item: */ 3895 if (bnxt_dir_type_is_executable(type) == true) 3896 return -EOPNOTSUPP; 3897 ext = in_eeprom->magic & 0xffff; 3898 ordinal = in_eeprom->offset >> 16; 3899 attr = in_eeprom->offset & 0xffff; 3900 3901 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3902 in_eeprom->data, in_eeprom->length); 3903 } 3904 3905 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3906 struct rte_eth_dev_module_info *modinfo) 3907 { 3908 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3909 struct bnxt *bp = dev->data->dev_private; 3910 int rc; 3911 3912 /* No point in going further if phy status indicates 3913 * module is not inserted or if it is powered down or 3914 * if it is of type 10GBase-T 3915 */ 3916 if (bp->link_info->module_status > 3917 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3918 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3919 dev->data->port_id); 3920 return -ENOTSUP; 3921 } 3922 3923 /* This feature is not supported in older firmware versions */ 3924 if (bp->hwrm_spec_code < 0x10202) { 3925 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3926 dev->data->port_id); 3927 return -ENOTSUP; 3928 } 3929 3930 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3931 SFF_DIAG_SUPPORT_OFFSET + 1, 3932 module_info); 3933 3934 if (rc) 3935 return rc; 3936 3937 switch (module_info[0]) { 3938 case SFF_MODULE_ID_SFP: 3939 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3940 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3941 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3942 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3943 break; 3944 case SFF_MODULE_ID_QSFP: 3945 case SFF_MODULE_ID_QSFP_PLUS: 3946 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3947 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3948 break; 3949 case SFF_MODULE_ID_QSFP28: 3950 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3951 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3952 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3953 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3954 break; 3955 default: 3956 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3957 return -ENOTSUP; 3958 } 3959 3960 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3961 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3962 3963 return 0; 3964 } 3965 3966 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3967 struct rte_dev_eeprom_info *info) 3968 { 3969 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3970 uint32_t offset = info->offset, length = info->length; 3971 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3972 struct bnxt *bp = dev->data->dev_private; 3973 uint8_t *data = info->data; 3974 uint8_t page = offset >> 7; 3975 uint8_t max_pages = 2; 3976 uint8_t opt_pages; 3977 int rc; 3978 3979 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3980 SFF_DIAG_SUPPORT_OFFSET + 1, 3981 module_info); 3982 if (rc) 3983 return rc; 3984 3985 switch (module_info[0]) { 3986 case SFF_MODULE_ID_SFP: 3987 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 3988 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 3989 pg_addr[2] = I2C_DEV_ADDR_A2; 3990 pg_addr[3] = I2C_DEV_ADDR_A2; 3991 max_pages = 4; 3992 } 3993 break; 3994 case SFF_MODULE_ID_QSFP28: 3995 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3996 SFF8636_OPT_PAGES_OFFSET, 3997 1, &opt_pages); 3998 if (rc) 3999 return rc; 4000 4001 if (opt_pages & SFF8636_PAGE1_MASK) { 4002 pg_addr[2] = I2C_DEV_ADDR_A0; 4003 max_pages = 3; 4004 } 4005 if (opt_pages & SFF8636_PAGE2_MASK) { 4006 pg_addr[3] = I2C_DEV_ADDR_A0; 4007 max_pages = 4; 4008 } 4009 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 4010 pg_addr[4] = I2C_DEV_ADDR_A0; 4011 max_pages = 5; 4012 } 4013 break; 4014 default: 4015 break; 4016 } 4017 4018 memset(data, 0, length); 4019 4020 offset &= 0xff; 4021 while (length && page < max_pages) { 4022 uint8_t raw_page = page ? page - 1 : 0; 4023 uint16_t chunk; 4024 4025 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4026 raw_page = 0; 4027 else if (page) 4028 offset |= 0x80; 4029 chunk = RTE_MIN(length, 256 - offset); 4030 4031 if (pg_addr[page]) { 4032 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4033 raw_page, offset, 4034 chunk, data); 4035 if (rc) 4036 return rc; 4037 } 4038 4039 data += chunk; 4040 length -= chunk; 4041 offset = 0; 4042 page += 1 + (chunk > 128); 4043 } 4044 4045 return length ? -EINVAL : 0; 4046 } 4047 4048 /* 4049 * Initialization 4050 */ 4051 4052 static const struct eth_dev_ops bnxt_dev_ops = { 4053 .dev_infos_get = bnxt_dev_info_get_op, 4054 .dev_close = bnxt_dev_close_op, 4055 .dev_configure = bnxt_dev_configure_op, 4056 .dev_start = bnxt_dev_start_op, 4057 .dev_stop = bnxt_dev_stop_op, 4058 .dev_set_link_up = bnxt_dev_set_link_up_op, 4059 .dev_set_link_down = bnxt_dev_set_link_down_op, 4060 .stats_get = bnxt_stats_get_op, 4061 .stats_reset = bnxt_stats_reset_op, 4062 .rx_queue_setup = bnxt_rx_queue_setup_op, 4063 .rx_queue_release = bnxt_rx_queue_release_op, 4064 .tx_queue_setup = bnxt_tx_queue_setup_op, 4065 .tx_queue_release = bnxt_tx_queue_release_op, 4066 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4067 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4068 .reta_update = bnxt_reta_update_op, 4069 .reta_query = bnxt_reta_query_op, 4070 .rss_hash_update = bnxt_rss_hash_update_op, 4071 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4072 .link_update = bnxt_link_update_op, 4073 .promiscuous_enable = bnxt_promiscuous_enable_op, 4074 .promiscuous_disable = bnxt_promiscuous_disable_op, 4075 .allmulticast_enable = bnxt_allmulticast_enable_op, 4076 .allmulticast_disable = bnxt_allmulticast_disable_op, 4077 .mac_addr_add = bnxt_mac_addr_add_op, 4078 .mac_addr_remove = bnxt_mac_addr_remove_op, 4079 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4080 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4081 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4082 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4083 .vlan_filter_set = bnxt_vlan_filter_set_op, 4084 .vlan_offload_set = bnxt_vlan_offload_set_op, 4085 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4086 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4087 .mtu_set = bnxt_mtu_set_op, 4088 .mac_addr_set = bnxt_set_default_mac_addr_op, 4089 .xstats_get = bnxt_dev_xstats_get_op, 4090 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4091 .xstats_reset = bnxt_dev_xstats_reset_op, 4092 .fw_version_get = bnxt_fw_version_get, 4093 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4094 .rxq_info_get = bnxt_rxq_info_get_op, 4095 .txq_info_get = bnxt_txq_info_get_op, 4096 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4097 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4098 .dev_led_on = bnxt_dev_led_on_op, 4099 .dev_led_off = bnxt_dev_led_off_op, 4100 .rx_queue_start = bnxt_rx_queue_start, 4101 .rx_queue_stop = bnxt_rx_queue_stop, 4102 .tx_queue_start = bnxt_tx_queue_start, 4103 .tx_queue_stop = bnxt_tx_queue_stop, 4104 .flow_ops_get = bnxt_flow_ops_get_op, 4105 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4106 .get_eeprom_length = bnxt_get_eeprom_length_op, 4107 .get_eeprom = bnxt_get_eeprom_op, 4108 .set_eeprom = bnxt_set_eeprom_op, 4109 .get_module_info = bnxt_get_module_info, 4110 .get_module_eeprom = bnxt_get_module_eeprom, 4111 .timesync_enable = bnxt_timesync_enable, 4112 .timesync_disable = bnxt_timesync_disable, 4113 .timesync_read_time = bnxt_timesync_read_time, 4114 .timesync_write_time = bnxt_timesync_write_time, 4115 .timesync_adjust_time = bnxt_timesync_adjust_time, 4116 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4117 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4118 }; 4119 4120 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4121 { 4122 uint32_t offset; 4123 4124 /* Only pre-map the reset GRC registers using window 3 */ 4125 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4126 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4127 4128 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4129 4130 return offset; 4131 } 4132 4133 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4134 { 4135 struct bnxt_error_recovery_info *info = bp->recovery_info; 4136 uint32_t reg_base = 0xffffffff; 4137 int i; 4138 4139 /* Only pre-map the monitoring GRC registers using window 2 */ 4140 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4141 uint32_t reg = info->status_regs[i]; 4142 4143 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4144 continue; 4145 4146 if (reg_base == 0xffffffff) 4147 reg_base = reg & 0xfffff000; 4148 if ((reg & 0xfffff000) != reg_base) 4149 return -ERANGE; 4150 4151 /* Use mask 0xffc as the Lower 2 bits indicates 4152 * address space location 4153 */ 4154 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4155 (reg & 0xffc); 4156 } 4157 4158 if (reg_base == 0xffffffff) 4159 return 0; 4160 4161 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4162 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4163 4164 return 0; 4165 } 4166 4167 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4168 { 4169 struct bnxt_error_recovery_info *info = bp->recovery_info; 4170 uint32_t delay = info->delay_after_reset[index]; 4171 uint32_t val = info->reset_reg_val[index]; 4172 uint32_t reg = info->reset_reg[index]; 4173 uint32_t type, offset; 4174 int ret; 4175 4176 type = BNXT_FW_STATUS_REG_TYPE(reg); 4177 offset = BNXT_FW_STATUS_REG_OFF(reg); 4178 4179 switch (type) { 4180 case BNXT_FW_STATUS_REG_TYPE_CFG: 4181 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4182 if (ret < 0) { 4183 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4184 val, offset); 4185 return; 4186 } 4187 break; 4188 case BNXT_FW_STATUS_REG_TYPE_GRC: 4189 offset = bnxt_map_reset_regs(bp, offset); 4190 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4191 break; 4192 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4193 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4194 break; 4195 } 4196 /* wait on a specific interval of time until core reset is complete */ 4197 if (delay) 4198 rte_delay_ms(delay); 4199 } 4200 4201 static void bnxt_dev_cleanup(struct bnxt *bp) 4202 { 4203 bp->eth_dev->data->dev_link.link_status = 0; 4204 bp->link_info->link_up = 0; 4205 if (bp->eth_dev->data->dev_started) 4206 bnxt_dev_stop(bp->eth_dev); 4207 4208 bnxt_uninit_resources(bp, true); 4209 } 4210 4211 static int 4212 bnxt_check_fw_reset_done(struct bnxt *bp) 4213 { 4214 int timeout = bp->fw_reset_max_msecs; 4215 uint16_t val = 0; 4216 int rc; 4217 4218 do { 4219 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4220 if (rc < 0) { 4221 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4222 return rc; 4223 } 4224 if (val != 0xffff) 4225 break; 4226 rte_delay_ms(1); 4227 } while (timeout--); 4228 4229 if (val == 0xffff) { 4230 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4231 return -1; 4232 } 4233 4234 return 0; 4235 } 4236 4237 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4238 { 4239 struct rte_eth_dev *dev = bp->eth_dev; 4240 struct rte_vlan_filter_conf *vfc; 4241 int vidx, vbit, rc; 4242 uint16_t vlan_id; 4243 4244 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4245 vfc = &dev->data->vlan_filter_conf; 4246 vidx = vlan_id / 64; 4247 vbit = vlan_id % 64; 4248 4249 /* Each bit corresponds to a VLAN id */ 4250 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4251 rc = bnxt_add_vlan_filter(bp, vlan_id); 4252 if (rc) 4253 return rc; 4254 } 4255 } 4256 4257 return 0; 4258 } 4259 4260 static int bnxt_restore_mac_filters(struct bnxt *bp) 4261 { 4262 struct rte_eth_dev *dev = bp->eth_dev; 4263 struct rte_eth_dev_info dev_info; 4264 struct rte_ether_addr *addr; 4265 uint64_t pool_mask; 4266 uint32_t pool = 0; 4267 uint32_t i; 4268 int rc; 4269 4270 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4271 return 0; 4272 4273 rc = bnxt_dev_info_get_op(dev, &dev_info); 4274 if (rc) 4275 return rc; 4276 4277 /* replay MAC address configuration */ 4278 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4279 addr = &dev->data->mac_addrs[i]; 4280 4281 /* skip zero address */ 4282 if (rte_is_zero_ether_addr(addr)) 4283 continue; 4284 4285 pool = 0; 4286 pool_mask = dev->data->mac_pool_sel[i]; 4287 4288 do { 4289 if (pool_mask & 1ULL) { 4290 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4291 if (rc) 4292 return rc; 4293 } 4294 pool_mask >>= 1; 4295 pool++; 4296 } while (pool_mask); 4297 } 4298 4299 return 0; 4300 } 4301 4302 static int bnxt_restore_filters(struct bnxt *bp) 4303 { 4304 struct rte_eth_dev *dev = bp->eth_dev; 4305 int ret = 0; 4306 4307 if (dev->data->all_multicast) { 4308 ret = bnxt_allmulticast_enable_op(dev); 4309 if (ret) 4310 return ret; 4311 } 4312 if (dev->data->promiscuous) { 4313 ret = bnxt_promiscuous_enable_op(dev); 4314 if (ret) 4315 return ret; 4316 } 4317 4318 ret = bnxt_restore_mac_filters(bp); 4319 if (ret) 4320 return ret; 4321 4322 ret = bnxt_restore_vlan_filters(bp); 4323 /* TODO restore other filters as well */ 4324 return ret; 4325 } 4326 4327 static int bnxt_check_fw_ready(struct bnxt *bp) 4328 { 4329 int timeout = bp->fw_reset_max_msecs; 4330 int rc = 0; 4331 4332 do { 4333 rc = bnxt_hwrm_poll_ver_get(bp); 4334 if (rc == 0) 4335 break; 4336 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4337 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4338 } while (rc && timeout > 0); 4339 4340 if (rc) 4341 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4342 4343 return rc; 4344 } 4345 4346 static void bnxt_dev_recover(void *arg) 4347 { 4348 struct bnxt *bp = arg; 4349 int rc = 0; 4350 4351 pthread_mutex_lock(&bp->err_recovery_lock); 4352 4353 if (!bp->fw_reset_min_msecs) { 4354 rc = bnxt_check_fw_reset_done(bp); 4355 if (rc) 4356 goto err; 4357 } 4358 4359 /* Clear Error flag so that device re-init should happen */ 4360 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4361 4362 rc = bnxt_check_fw_ready(bp); 4363 if (rc) 4364 goto err; 4365 4366 rc = bnxt_init_resources(bp, true); 4367 if (rc) { 4368 PMD_DRV_LOG(ERR, 4369 "Failed to initialize resources after reset\n"); 4370 goto err; 4371 } 4372 /* clear reset flag as the device is initialized now */ 4373 bp->flags &= ~BNXT_FLAG_FW_RESET; 4374 4375 rc = bnxt_dev_start_op(bp->eth_dev); 4376 if (rc) { 4377 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4378 goto err_start; 4379 } 4380 4381 rc = bnxt_restore_filters(bp); 4382 if (rc) 4383 goto err_start; 4384 4385 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 4386 pthread_mutex_unlock(&bp->err_recovery_lock); 4387 4388 return; 4389 err_start: 4390 bnxt_dev_stop(bp->eth_dev); 4391 err: 4392 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4393 bnxt_uninit_resources(bp, false); 4394 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4395 rte_eth_dev_callback_process(bp->eth_dev, 4396 RTE_ETH_EVENT_INTR_RMV, 4397 NULL); 4398 pthread_mutex_unlock(&bp->err_recovery_lock); 4399 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4400 } 4401 4402 void bnxt_dev_reset_and_resume(void *arg) 4403 { 4404 struct bnxt *bp = arg; 4405 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4406 uint16_t val = 0; 4407 int rc; 4408 4409 bnxt_dev_cleanup(bp); 4410 4411 bnxt_wait_for_device_shutdown(bp); 4412 4413 /* During some fatal firmware error conditions, the PCI config space 4414 * register 0x2e which normally contains the subsystem ID will become 4415 * 0xffff. This register will revert back to the normal value after 4416 * the chip has completed core reset. If we detect this condition, 4417 * we can poll this config register immediately for the value to revert. 4418 */ 4419 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4420 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4421 if (rc < 0) { 4422 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4423 return; 4424 } 4425 if (val == 0xffff) { 4426 bp->fw_reset_min_msecs = 0; 4427 us = 1; 4428 } 4429 } 4430 4431 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4432 if (rc) 4433 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4434 } 4435 4436 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4437 { 4438 struct bnxt_error_recovery_info *info = bp->recovery_info; 4439 uint32_t reg = info->status_regs[index]; 4440 uint32_t type, offset, val = 0; 4441 int ret = 0; 4442 4443 type = BNXT_FW_STATUS_REG_TYPE(reg); 4444 offset = BNXT_FW_STATUS_REG_OFF(reg); 4445 4446 switch (type) { 4447 case BNXT_FW_STATUS_REG_TYPE_CFG: 4448 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4449 if (ret < 0) 4450 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4451 offset); 4452 break; 4453 case BNXT_FW_STATUS_REG_TYPE_GRC: 4454 offset = info->mapped_status_regs[index]; 4455 /* FALLTHROUGH */ 4456 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4457 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4458 offset)); 4459 break; 4460 } 4461 4462 return val; 4463 } 4464 4465 static int bnxt_fw_reset_all(struct bnxt *bp) 4466 { 4467 struct bnxt_error_recovery_info *info = bp->recovery_info; 4468 uint32_t i; 4469 int rc = 0; 4470 4471 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4472 /* Reset through primary function driver */ 4473 for (i = 0; i < info->reg_array_cnt; i++) 4474 bnxt_write_fw_reset_reg(bp, i); 4475 /* Wait for time specified by FW after triggering reset */ 4476 rte_delay_ms(info->primary_func_wait_period_after_reset); 4477 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4478 /* Reset with the help of Kong processor */ 4479 rc = bnxt_hwrm_fw_reset(bp); 4480 if (rc) 4481 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4482 } 4483 4484 return rc; 4485 } 4486 4487 static void bnxt_fw_reset_cb(void *arg) 4488 { 4489 struct bnxt *bp = arg; 4490 struct bnxt_error_recovery_info *info = bp->recovery_info; 4491 int rc = 0; 4492 4493 /* Only Primary function can do FW reset */ 4494 if (bnxt_is_primary_func(bp) && 4495 bnxt_is_recovery_enabled(bp)) { 4496 rc = bnxt_fw_reset_all(bp); 4497 if (rc) { 4498 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4499 return; 4500 } 4501 } 4502 4503 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4504 * EXCEPTION_FATAL_ASYNC event to all the functions 4505 * (including MASTER FUNC). After receiving this Async, all the active 4506 * drivers should treat this case as FW initiated recovery 4507 */ 4508 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4509 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4510 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4511 4512 /* To recover from error */ 4513 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4514 (void *)bp); 4515 } 4516 } 4517 4518 /* Driver should poll FW heartbeat, reset_counter with the frequency 4519 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4520 * When the driver detects heartbeat stop or change in reset_counter, 4521 * it has to trigger a reset to recover from the error condition. 4522 * A “primary function” is the function who will have the privilege to 4523 * initiate the chimp reset. The primary function will be elected by the 4524 * firmware and will be notified through async message. 4525 */ 4526 static void bnxt_check_fw_health(void *arg) 4527 { 4528 struct bnxt *bp = arg; 4529 struct bnxt_error_recovery_info *info = bp->recovery_info; 4530 uint32_t val = 0, wait_msec; 4531 4532 if (!info || !bnxt_is_recovery_enabled(bp) || 4533 is_bnxt_in_error(bp)) 4534 return; 4535 4536 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4537 if (val == info->last_heart_beat) 4538 goto reset; 4539 4540 info->last_heart_beat = val; 4541 4542 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4543 if (val != info->last_reset_counter) 4544 goto reset; 4545 4546 info->last_reset_counter = val; 4547 4548 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4549 bnxt_check_fw_health, (void *)bp); 4550 4551 return; 4552 reset: 4553 /* Stop DMA to/from device */ 4554 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4555 bp->flags |= BNXT_FLAG_FW_RESET; 4556 4557 bnxt_stop_rxtx(bp); 4558 4559 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4560 4561 if (bnxt_is_primary_func(bp)) 4562 wait_msec = info->primary_func_wait_period; 4563 else 4564 wait_msec = info->normal_func_wait_period; 4565 4566 rte_eal_alarm_set(US_PER_MS * wait_msec, 4567 bnxt_fw_reset_cb, (void *)bp); 4568 } 4569 4570 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4571 { 4572 uint32_t polling_freq; 4573 4574 pthread_mutex_lock(&bp->health_check_lock); 4575 4576 if (!bnxt_is_recovery_enabled(bp)) 4577 goto done; 4578 4579 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4580 goto done; 4581 4582 polling_freq = bp->recovery_info->driver_polling_freq; 4583 4584 rte_eal_alarm_set(US_PER_MS * polling_freq, 4585 bnxt_check_fw_health, (void *)bp); 4586 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4587 4588 done: 4589 pthread_mutex_unlock(&bp->health_check_lock); 4590 } 4591 4592 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4593 { 4594 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4595 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4596 } 4597 4598 static bool bnxt_vf_pciid(uint16_t device_id) 4599 { 4600 switch (device_id) { 4601 case BROADCOM_DEV_ID_57304_VF: 4602 case BROADCOM_DEV_ID_57406_VF: 4603 case BROADCOM_DEV_ID_5731X_VF: 4604 case BROADCOM_DEV_ID_5741X_VF: 4605 case BROADCOM_DEV_ID_57414_VF: 4606 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4607 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4608 case BROADCOM_DEV_ID_58802_VF: 4609 case BROADCOM_DEV_ID_57500_VF1: 4610 case BROADCOM_DEV_ID_57500_VF2: 4611 case BROADCOM_DEV_ID_58818_VF: 4612 /* FALLTHROUGH */ 4613 return true; 4614 default: 4615 return false; 4616 } 4617 } 4618 4619 /* Phase 5 device */ 4620 static bool bnxt_p5_device(uint16_t device_id) 4621 { 4622 switch (device_id) { 4623 case BROADCOM_DEV_ID_57508: 4624 case BROADCOM_DEV_ID_57504: 4625 case BROADCOM_DEV_ID_57502: 4626 case BROADCOM_DEV_ID_57508_MF1: 4627 case BROADCOM_DEV_ID_57504_MF1: 4628 case BROADCOM_DEV_ID_57502_MF1: 4629 case BROADCOM_DEV_ID_57508_MF2: 4630 case BROADCOM_DEV_ID_57504_MF2: 4631 case BROADCOM_DEV_ID_57502_MF2: 4632 case BROADCOM_DEV_ID_57500_VF1: 4633 case BROADCOM_DEV_ID_57500_VF2: 4634 case BROADCOM_DEV_ID_58812: 4635 case BROADCOM_DEV_ID_58814: 4636 case BROADCOM_DEV_ID_58818: 4637 case BROADCOM_DEV_ID_58818_VF: 4638 /* FALLTHROUGH */ 4639 return true; 4640 default: 4641 return false; 4642 } 4643 } 4644 4645 bool bnxt_stratus_device(struct bnxt *bp) 4646 { 4647 uint16_t device_id = bp->pdev->id.device_id; 4648 4649 switch (device_id) { 4650 case BROADCOM_DEV_ID_STRATUS_NIC: 4651 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4652 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4653 /* FALLTHROUGH */ 4654 return true; 4655 default: 4656 return false; 4657 } 4658 } 4659 4660 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4661 { 4662 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4663 struct bnxt *bp = eth_dev->data->dev_private; 4664 4665 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4666 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4667 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4668 if (!bp->bar0 || !bp->doorbell_base) { 4669 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4670 return -ENODEV; 4671 } 4672 4673 bp->eth_dev = eth_dev; 4674 bp->pdev = pci_dev; 4675 4676 return 0; 4677 } 4678 4679 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4680 struct bnxt_ctx_pg_info *ctx_pg, 4681 uint32_t mem_size, 4682 const char *suffix, 4683 uint16_t idx) 4684 { 4685 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4686 const struct rte_memzone *mz = NULL; 4687 char mz_name[RTE_MEMZONE_NAMESIZE]; 4688 rte_iova_t mz_phys_addr; 4689 uint64_t valid_bits = 0; 4690 uint32_t sz; 4691 int i; 4692 4693 if (!mem_size) 4694 return 0; 4695 4696 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4697 BNXT_PAGE_SIZE; 4698 rmem->page_size = BNXT_PAGE_SIZE; 4699 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4700 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4701 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4702 4703 valid_bits = PTU_PTE_VALID; 4704 4705 if (rmem->nr_pages > 1) { 4706 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4707 "bnxt_ctx_pg_tbl%s_%x_%d", 4708 suffix, idx, bp->eth_dev->data->port_id); 4709 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4710 mz = rte_memzone_lookup(mz_name); 4711 if (!mz) { 4712 mz = rte_memzone_reserve_aligned(mz_name, 4713 rmem->nr_pages * 8, 4714 bp->eth_dev->device->numa_node, 4715 RTE_MEMZONE_2MB | 4716 RTE_MEMZONE_SIZE_HINT_ONLY | 4717 RTE_MEMZONE_IOVA_CONTIG, 4718 BNXT_PAGE_SIZE); 4719 if (mz == NULL) 4720 return -ENOMEM; 4721 } 4722 4723 memset(mz->addr, 0, mz->len); 4724 mz_phys_addr = mz->iova; 4725 4726 rmem->pg_tbl = mz->addr; 4727 rmem->pg_tbl_map = mz_phys_addr; 4728 rmem->pg_tbl_mz = mz; 4729 } 4730 4731 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4732 suffix, idx, bp->eth_dev->data->port_id); 4733 mz = rte_memzone_lookup(mz_name); 4734 if (!mz) { 4735 mz = rte_memzone_reserve_aligned(mz_name, 4736 mem_size, 4737 bp->eth_dev->device->numa_node, 4738 RTE_MEMZONE_1GB | 4739 RTE_MEMZONE_SIZE_HINT_ONLY | 4740 RTE_MEMZONE_IOVA_CONTIG, 4741 BNXT_PAGE_SIZE); 4742 if (mz == NULL) 4743 return -ENOMEM; 4744 } 4745 4746 memset(mz->addr, 0, mz->len); 4747 mz_phys_addr = mz->iova; 4748 4749 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4750 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4751 rmem->dma_arr[i] = mz_phys_addr + sz; 4752 4753 if (rmem->nr_pages > 1) { 4754 if (i == rmem->nr_pages - 2 && 4755 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4756 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4757 else if (i == rmem->nr_pages - 1 && 4758 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4759 valid_bits |= PTU_PTE_LAST; 4760 4761 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4762 valid_bits); 4763 } 4764 } 4765 4766 rmem->mz = mz; 4767 if (rmem->vmem_size) 4768 rmem->vmem = (void **)mz->addr; 4769 rmem->dma_arr[0] = mz_phys_addr; 4770 return 0; 4771 } 4772 4773 static void bnxt_free_ctx_mem(struct bnxt *bp) 4774 { 4775 int i; 4776 4777 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4778 return; 4779 4780 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4781 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4782 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4783 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4784 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4785 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4786 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4787 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4788 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4789 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4790 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4791 4792 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4793 if (bp->ctx->tqm_mem[i]) 4794 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4795 } 4796 4797 rte_free(bp->ctx); 4798 bp->ctx = NULL; 4799 } 4800 4801 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4802 4803 #define min_t(type, x, y) ({ \ 4804 type __min1 = (x); \ 4805 type __min2 = (y); \ 4806 __min1 < __min2 ? __min1 : __min2; }) 4807 4808 #define max_t(type, x, y) ({ \ 4809 type __max1 = (x); \ 4810 type __max2 = (y); \ 4811 __max1 > __max2 ? __max1 : __max2; }) 4812 4813 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4814 4815 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4816 { 4817 struct bnxt_ctx_pg_info *ctx_pg; 4818 struct bnxt_ctx_mem_info *ctx; 4819 uint32_t mem_size, ena, entries; 4820 uint32_t entries_sp, min; 4821 int i, rc; 4822 4823 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4824 if (rc) { 4825 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4826 return rc; 4827 } 4828 ctx = bp->ctx; 4829 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4830 return 0; 4831 4832 ctx_pg = &ctx->qp_mem; 4833 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4834 if (ctx->qp_entry_size) { 4835 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4836 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4837 if (rc) 4838 return rc; 4839 } 4840 4841 ctx_pg = &ctx->srq_mem; 4842 ctx_pg->entries = ctx->srq_max_l2_entries; 4843 if (ctx->srq_entry_size) { 4844 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4845 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4846 if (rc) 4847 return rc; 4848 } 4849 4850 ctx_pg = &ctx->cq_mem; 4851 ctx_pg->entries = ctx->cq_max_l2_entries; 4852 if (ctx->cq_entry_size) { 4853 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4854 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4855 if (rc) 4856 return rc; 4857 } 4858 4859 ctx_pg = &ctx->vnic_mem; 4860 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4861 ctx->vnic_max_ring_table_entries; 4862 if (ctx->vnic_entry_size) { 4863 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4864 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4865 if (rc) 4866 return rc; 4867 } 4868 4869 ctx_pg = &ctx->stat_mem; 4870 ctx_pg->entries = ctx->stat_max_entries; 4871 if (ctx->stat_entry_size) { 4872 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4873 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4874 if (rc) 4875 return rc; 4876 } 4877 4878 min = ctx->tqm_min_entries_per_ring; 4879 4880 entries_sp = ctx->qp_max_l2_entries + 4881 ctx->vnic_max_vnic_entries + 4882 2 * ctx->qp_min_qp1_entries + min; 4883 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4884 4885 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4886 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4887 entries = clamp_t(uint32_t, entries, min, 4888 ctx->tqm_max_entries_per_ring); 4889 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4890 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4891 * i > 8 is other ext rings. 4892 */ 4893 ctx_pg = ctx->tqm_mem[i]; 4894 ctx_pg->entries = i ? entries : entries_sp; 4895 if (ctx->tqm_entry_size) { 4896 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4897 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4898 "tqm_mem", i); 4899 if (rc) 4900 return rc; 4901 } 4902 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4903 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4904 else 4905 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4906 } 4907 4908 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4909 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4910 if (rc) 4911 PMD_DRV_LOG(ERR, 4912 "Failed to configure context mem: rc = %d\n", rc); 4913 else 4914 ctx->flags |= BNXT_CTX_FLAG_INITED; 4915 4916 return rc; 4917 } 4918 4919 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4920 { 4921 struct rte_pci_device *pci_dev = bp->pdev; 4922 char mz_name[RTE_MEMZONE_NAMESIZE]; 4923 const struct rte_memzone *mz = NULL; 4924 uint32_t total_alloc_len; 4925 rte_iova_t mz_phys_addr; 4926 4927 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4928 return 0; 4929 4930 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4931 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4932 pci_dev->addr.bus, pci_dev->addr.devid, 4933 pci_dev->addr.function, "rx_port_stats"); 4934 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4935 mz = rte_memzone_lookup(mz_name); 4936 total_alloc_len = 4937 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4938 sizeof(struct rx_port_stats_ext) + 512); 4939 if (!mz) { 4940 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4941 SOCKET_ID_ANY, 4942 RTE_MEMZONE_2MB | 4943 RTE_MEMZONE_SIZE_HINT_ONLY | 4944 RTE_MEMZONE_IOVA_CONTIG); 4945 if (mz == NULL) 4946 return -ENOMEM; 4947 } 4948 memset(mz->addr, 0, mz->len); 4949 mz_phys_addr = mz->iova; 4950 4951 bp->rx_mem_zone = (const void *)mz; 4952 bp->hw_rx_port_stats = mz->addr; 4953 bp->hw_rx_port_stats_map = mz_phys_addr; 4954 4955 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4956 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4957 pci_dev->addr.bus, pci_dev->addr.devid, 4958 pci_dev->addr.function, "tx_port_stats"); 4959 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4960 mz = rte_memzone_lookup(mz_name); 4961 total_alloc_len = 4962 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4963 sizeof(struct tx_port_stats_ext) + 512); 4964 if (!mz) { 4965 mz = rte_memzone_reserve(mz_name, 4966 total_alloc_len, 4967 SOCKET_ID_ANY, 4968 RTE_MEMZONE_2MB | 4969 RTE_MEMZONE_SIZE_HINT_ONLY | 4970 RTE_MEMZONE_IOVA_CONTIG); 4971 if (mz == NULL) 4972 return -ENOMEM; 4973 } 4974 memset(mz->addr, 0, mz->len); 4975 mz_phys_addr = mz->iova; 4976 4977 bp->tx_mem_zone = (const void *)mz; 4978 bp->hw_tx_port_stats = mz->addr; 4979 bp->hw_tx_port_stats_map = mz_phys_addr; 4980 bp->flags |= BNXT_FLAG_PORT_STATS; 4981 4982 /* Display extended statistics if FW supports it */ 4983 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4984 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4985 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4986 return 0; 4987 4988 bp->hw_rx_port_stats_ext = (void *) 4989 ((uint8_t *)bp->hw_rx_port_stats + 4990 sizeof(struct rx_port_stats)); 4991 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4992 sizeof(struct rx_port_stats); 4993 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4994 4995 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4996 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4997 bp->hw_tx_port_stats_ext = (void *) 4998 ((uint8_t *)bp->hw_tx_port_stats + 4999 sizeof(struct tx_port_stats)); 5000 bp->hw_tx_port_stats_ext_map = 5001 bp->hw_tx_port_stats_map + 5002 sizeof(struct tx_port_stats); 5003 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 5004 } 5005 5006 return 0; 5007 } 5008 5009 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5010 { 5011 struct bnxt *bp = eth_dev->data->dev_private; 5012 int rc = 0; 5013 5014 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5015 RTE_ETHER_ADDR_LEN * 5016 bp->max_l2_ctx, 5017 0); 5018 if (eth_dev->data->mac_addrs == NULL) { 5019 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5020 return -ENOMEM; 5021 } 5022 5023 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5024 if (BNXT_PF(bp)) 5025 return -EINVAL; 5026 5027 /* Generate a random MAC address, if none was assigned by PF */ 5028 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5029 bnxt_eth_hw_addr_random(bp->mac_addr); 5030 PMD_DRV_LOG(INFO, 5031 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5032 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5033 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5034 5035 rc = bnxt_hwrm_set_mac(bp); 5036 if (rc) 5037 return rc; 5038 } 5039 5040 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5041 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5042 5043 return rc; 5044 } 5045 5046 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5047 { 5048 int rc = 0; 5049 5050 /* MAC is already configured in FW */ 5051 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5052 return 0; 5053 5054 /* Restore the old MAC configured */ 5055 rc = bnxt_hwrm_set_mac(bp); 5056 if (rc) 5057 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5058 5059 return rc; 5060 } 5061 5062 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5063 { 5064 if (!BNXT_PF(bp)) 5065 return; 5066 5067 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5068 5069 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5070 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5071 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5072 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5073 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5074 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5075 } 5076 5077 struct bnxt * 5078 bnxt_get_bp(uint16_t port) 5079 { 5080 struct bnxt *bp; 5081 struct rte_eth_dev *dev; 5082 5083 if (!rte_eth_dev_is_valid_port(port)) { 5084 PMD_DRV_LOG(ERR, "Invalid port %d\n", port); 5085 return NULL; 5086 } 5087 5088 dev = &rte_eth_devices[port]; 5089 if (!is_bnxt_supported(dev)) { 5090 PMD_DRV_LOG(ERR, "Device %d not supported\n", port); 5091 return NULL; 5092 } 5093 5094 bp = (struct bnxt *)dev->data->dev_private; 5095 if (!BNXT_TRUFLOW_EN(bp)) { 5096 PMD_DRV_LOG(ERR, "TRUFLOW not enabled\n"); 5097 return NULL; 5098 } 5099 5100 return bp; 5101 } 5102 5103 uint16_t 5104 bnxt_get_svif(uint16_t port_id, bool func_svif, 5105 enum bnxt_ulp_intf_type type) 5106 { 5107 struct rte_eth_dev *eth_dev; 5108 struct bnxt *bp; 5109 5110 eth_dev = &rte_eth_devices[port_id]; 5111 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5112 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5113 if (!vfr) 5114 return 0; 5115 5116 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5117 return vfr->svif; 5118 5119 eth_dev = vfr->parent_dev; 5120 } 5121 5122 bp = eth_dev->data->dev_private; 5123 5124 return func_svif ? bp->func_svif : bp->port_svif; 5125 } 5126 5127 void 5128 bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type, 5129 uint8_t *mac, uint8_t *parent_mac) 5130 { 5131 struct rte_eth_dev *eth_dev; 5132 struct bnxt *bp; 5133 5134 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF && 5135 type != BNXT_ULP_INTF_TYPE_PF) 5136 return; 5137 5138 eth_dev = &rte_eth_devices[port]; 5139 bp = eth_dev->data->dev_private; 5140 memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN); 5141 5142 if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5143 memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN); 5144 } 5145 5146 uint16_t 5147 bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5148 { 5149 struct rte_eth_dev *eth_dev; 5150 struct bnxt *bp; 5151 5152 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5153 return 0; 5154 5155 eth_dev = &rte_eth_devices[port]; 5156 bp = eth_dev->data->dev_private; 5157 5158 return bp->parent->vnic; 5159 } 5160 uint16_t 5161 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5162 { 5163 struct rte_eth_dev *eth_dev; 5164 struct bnxt_vnic_info *vnic; 5165 struct bnxt *bp; 5166 5167 eth_dev = &rte_eth_devices[port]; 5168 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5169 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5170 if (!vfr) 5171 return 0; 5172 5173 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5174 return vfr->dflt_vnic_id; 5175 5176 eth_dev = vfr->parent_dev; 5177 } 5178 5179 bp = eth_dev->data->dev_private; 5180 5181 vnic = BNXT_GET_DEFAULT_VNIC(bp); 5182 5183 return vnic->fw_vnic_id; 5184 } 5185 5186 uint16_t 5187 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 5188 { 5189 struct rte_eth_dev *eth_dev; 5190 struct bnxt *bp; 5191 5192 eth_dev = &rte_eth_devices[port]; 5193 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5194 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5195 if (!vfr) 5196 return 0; 5197 5198 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5199 return vfr->fw_fid; 5200 5201 eth_dev = vfr->parent_dev; 5202 } 5203 5204 bp = eth_dev->data->dev_private; 5205 5206 return bp->fw_fid; 5207 } 5208 5209 enum bnxt_ulp_intf_type 5210 bnxt_get_interface_type(uint16_t port) 5211 { 5212 struct rte_eth_dev *eth_dev; 5213 struct bnxt *bp; 5214 5215 eth_dev = &rte_eth_devices[port]; 5216 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 5217 return BNXT_ULP_INTF_TYPE_VF_REP; 5218 5219 bp = eth_dev->data->dev_private; 5220 if (BNXT_PF(bp)) 5221 return BNXT_ULP_INTF_TYPE_PF; 5222 else if (BNXT_VF_IS_TRUSTED(bp)) 5223 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 5224 else if (BNXT_VF(bp)) 5225 return BNXT_ULP_INTF_TYPE_VF; 5226 5227 return BNXT_ULP_INTF_TYPE_INVALID; 5228 } 5229 5230 uint16_t 5231 bnxt_get_phy_port_id(uint16_t port_id) 5232 { 5233 struct bnxt_representor *vfr; 5234 struct rte_eth_dev *eth_dev; 5235 struct bnxt *bp; 5236 5237 eth_dev = &rte_eth_devices[port_id]; 5238 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5239 vfr = eth_dev->data->dev_private; 5240 if (!vfr) 5241 return 0; 5242 5243 eth_dev = vfr->parent_dev; 5244 } 5245 5246 bp = eth_dev->data->dev_private; 5247 5248 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 5249 } 5250 5251 uint16_t 5252 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 5253 { 5254 struct rte_eth_dev *eth_dev; 5255 struct bnxt *bp; 5256 5257 eth_dev = &rte_eth_devices[port_id]; 5258 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5259 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5260 if (!vfr) 5261 return 0; 5262 5263 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5264 return vfr->fw_fid - 1; 5265 5266 eth_dev = vfr->parent_dev; 5267 } 5268 5269 bp = eth_dev->data->dev_private; 5270 5271 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 5272 } 5273 5274 uint16_t 5275 bnxt_get_vport(uint16_t port_id) 5276 { 5277 return (1 << bnxt_get_phy_port_id(port_id)); 5278 } 5279 5280 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5281 { 5282 struct bnxt_error_recovery_info *info = bp->recovery_info; 5283 5284 if (info) { 5285 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5286 memset(info, 0, sizeof(*info)); 5287 return; 5288 } 5289 5290 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5291 return; 5292 5293 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5294 sizeof(*info), 0); 5295 if (!info) 5296 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5297 5298 bp->recovery_info = info; 5299 } 5300 5301 static void bnxt_check_fw_status(struct bnxt *bp) 5302 { 5303 uint32_t fw_status; 5304 5305 if (!(bp->recovery_info && 5306 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5307 return; 5308 5309 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5310 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5311 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5312 fw_status); 5313 } 5314 5315 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5316 { 5317 struct bnxt_error_recovery_info *info = bp->recovery_info; 5318 uint32_t status_loc; 5319 uint32_t sig_ver; 5320 5321 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5322 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5323 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5324 BNXT_GRCP_WINDOW_2_BASE + 5325 offsetof(struct hcomm_status, 5326 sig_ver))); 5327 /* If the signature is absent, then FW does not support this feature */ 5328 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5329 HCOMM_STATUS_SIGNATURE_VAL) 5330 return 0; 5331 5332 if (!info) { 5333 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5334 sizeof(*info), 0); 5335 if (!info) 5336 return -ENOMEM; 5337 bp->recovery_info = info; 5338 } else { 5339 memset(info, 0, sizeof(*info)); 5340 } 5341 5342 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5343 BNXT_GRCP_WINDOW_2_BASE + 5344 offsetof(struct hcomm_status, 5345 fw_status_loc))); 5346 5347 /* Only pre-map the FW health status GRC register */ 5348 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5349 return 0; 5350 5351 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5352 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5353 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5354 5355 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5356 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5357 5358 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5359 5360 return 0; 5361 } 5362 5363 /* This function gets the FW version along with the 5364 * capabilities(MAX and current) of the function, vnic, 5365 * error recovery, phy and other chip related info 5366 */ 5367 static int bnxt_get_config(struct bnxt *bp) 5368 { 5369 uint16_t mtu; 5370 int rc = 0; 5371 5372 bp->fw_cap = 0; 5373 5374 rc = bnxt_map_hcomm_fw_status_reg(bp); 5375 if (rc) 5376 return rc; 5377 5378 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5379 if (rc) { 5380 bnxt_check_fw_status(bp); 5381 return rc; 5382 } 5383 5384 rc = bnxt_hwrm_func_reset(bp); 5385 if (rc) 5386 return -EIO; 5387 5388 rc = bnxt_hwrm_vnic_qcaps(bp); 5389 if (rc) 5390 return rc; 5391 5392 rc = bnxt_hwrm_queue_qportcfg(bp); 5393 if (rc) 5394 return rc; 5395 5396 /* Get the MAX capabilities for this function. 5397 * This function also allocates context memory for TQM rings and 5398 * informs the firmware about this allocated backing store memory. 5399 */ 5400 rc = bnxt_hwrm_func_qcaps(bp); 5401 if (rc) 5402 return rc; 5403 5404 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5405 if (rc) 5406 return rc; 5407 5408 rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); 5409 if (rc) 5410 return rc; 5411 5412 bnxt_hwrm_port_mac_qcfg(bp); 5413 5414 bnxt_hwrm_parent_pf_qcfg(bp); 5415 5416 bnxt_hwrm_port_phy_qcaps(bp); 5417 5418 bnxt_alloc_error_recovery_info(bp); 5419 /* Get the adapter error recovery support info */ 5420 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5421 if (rc) 5422 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5423 5424 bnxt_hwrm_port_led_qcaps(bp); 5425 5426 return 0; 5427 } 5428 5429 static int 5430 bnxt_init_locks(struct bnxt *bp) 5431 { 5432 int err; 5433 5434 err = pthread_mutex_init(&bp->flow_lock, NULL); 5435 if (err) { 5436 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5437 return err; 5438 } 5439 5440 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5441 if (err) { 5442 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5443 return err; 5444 } 5445 5446 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5447 if (err) { 5448 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5449 return err; 5450 } 5451 5452 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5453 if (err) 5454 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5455 5456 return err; 5457 } 5458 5459 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5460 { 5461 int rc = 0; 5462 5463 rc = bnxt_get_config(bp); 5464 if (rc) 5465 return rc; 5466 5467 if (!reconfig_dev) { 5468 rc = bnxt_setup_mac_addr(bp->eth_dev); 5469 if (rc) 5470 return rc; 5471 } else { 5472 rc = bnxt_restore_dflt_mac(bp); 5473 if (rc) 5474 return rc; 5475 } 5476 5477 bnxt_config_vf_req_fwd(bp); 5478 5479 rc = bnxt_hwrm_func_driver_register(bp); 5480 if (rc) { 5481 PMD_DRV_LOG(ERR, "Failed to register driver"); 5482 return -EBUSY; 5483 } 5484 5485 if (BNXT_PF(bp)) { 5486 if (bp->pdev->max_vfs) { 5487 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5488 if (rc) { 5489 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5490 return rc; 5491 } 5492 } else { 5493 rc = bnxt_hwrm_allocate_pf_only(bp); 5494 if (rc) { 5495 PMD_DRV_LOG(ERR, 5496 "Failed to allocate PF resources"); 5497 return rc; 5498 } 5499 } 5500 } 5501 5502 rc = bnxt_alloc_mem(bp, reconfig_dev); 5503 if (rc) 5504 return rc; 5505 5506 rc = bnxt_setup_int(bp); 5507 if (rc) 5508 return rc; 5509 5510 rc = bnxt_request_int(bp); 5511 if (rc) 5512 return rc; 5513 5514 rc = bnxt_init_ctx_mem(bp); 5515 if (rc) { 5516 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5517 return rc; 5518 } 5519 5520 return 0; 5521 } 5522 5523 static int 5524 bnxt_parse_devarg_accum_stats(__rte_unused const char *key, 5525 const char *value, void *opaque_arg) 5526 { 5527 struct bnxt *bp = opaque_arg; 5528 unsigned long accum_stats; 5529 char *end = NULL; 5530 5531 if (!value || !opaque_arg) { 5532 PMD_DRV_LOG(ERR, 5533 "Invalid parameter passed to accum-stats devargs.\n"); 5534 return -EINVAL; 5535 } 5536 5537 accum_stats = strtoul(value, &end, 10); 5538 if (end == NULL || *end != '\0' || 5539 (accum_stats == ULONG_MAX && errno == ERANGE)) { 5540 PMD_DRV_LOG(ERR, 5541 "Invalid parameter passed to accum-stats devargs.\n"); 5542 return -EINVAL; 5543 } 5544 5545 if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) { 5546 PMD_DRV_LOG(ERR, 5547 "Invalid value passed to accum-stats devargs.\n"); 5548 return -EINVAL; 5549 } 5550 5551 if (accum_stats) { 5552 bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN; 5553 PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n"); 5554 } else { 5555 bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN; 5556 PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n"); 5557 } 5558 5559 return 0; 5560 } 5561 5562 static int 5563 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5564 const char *value, void *opaque_arg) 5565 { 5566 struct bnxt *bp = opaque_arg; 5567 unsigned long flow_xstat; 5568 char *end = NULL; 5569 5570 if (!value || !opaque_arg) { 5571 PMD_DRV_LOG(ERR, 5572 "Invalid parameter passed to flow_xstat devarg.\n"); 5573 return -EINVAL; 5574 } 5575 5576 flow_xstat = strtoul(value, &end, 10); 5577 if (end == NULL || *end != '\0' || 5578 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5579 PMD_DRV_LOG(ERR, 5580 "Invalid parameter passed to flow_xstat devarg.\n"); 5581 return -EINVAL; 5582 } 5583 5584 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5585 PMD_DRV_LOG(ERR, 5586 "Invalid value passed to flow_xstat devarg.\n"); 5587 return -EINVAL; 5588 } 5589 5590 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5591 if (BNXT_FLOW_XSTATS_EN(bp)) 5592 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5593 5594 return 0; 5595 } 5596 5597 static int 5598 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5599 const char *value, void *opaque_arg) 5600 { 5601 struct bnxt *bp = opaque_arg; 5602 unsigned long max_num_kflows; 5603 char *end = NULL; 5604 5605 if (!value || !opaque_arg) { 5606 PMD_DRV_LOG(ERR, 5607 "Invalid parameter passed to max_num_kflows devarg.\n"); 5608 return -EINVAL; 5609 } 5610 5611 max_num_kflows = strtoul(value, &end, 10); 5612 if (end == NULL || *end != '\0' || 5613 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5614 PMD_DRV_LOG(ERR, 5615 "Invalid parameter passed to max_num_kflows devarg.\n"); 5616 return -EINVAL; 5617 } 5618 5619 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5620 PMD_DRV_LOG(ERR, 5621 "Invalid value passed to max_num_kflows devarg.\n"); 5622 return -EINVAL; 5623 } 5624 5625 bp->max_num_kflows = max_num_kflows; 5626 if (bp->max_num_kflows) 5627 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5628 max_num_kflows); 5629 5630 return 0; 5631 } 5632 5633 static int 5634 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5635 const char *value, void *opaque_arg) 5636 { 5637 struct bnxt *bp = opaque_arg; 5638 unsigned long app_id; 5639 char *end = NULL; 5640 5641 if (!value || !opaque_arg) { 5642 PMD_DRV_LOG(ERR, 5643 "Invalid parameter passed to app-id " 5644 "devargs.\n"); 5645 return -EINVAL; 5646 } 5647 5648 app_id = strtoul(value, &end, 10); 5649 if (end == NULL || *end != '\0' || 5650 (app_id == ULONG_MAX && errno == ERANGE)) { 5651 PMD_DRV_LOG(ERR, 5652 "Invalid parameter passed to app_id " 5653 "devargs.\n"); 5654 return -EINVAL; 5655 } 5656 5657 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5658 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5659 (uint16_t)app_id); 5660 return -EINVAL; 5661 } 5662 5663 bp->app_id = app_id; 5664 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5665 5666 return 0; 5667 } 5668 5669 static int 5670 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5671 const char *value, void *opaque_arg) 5672 { 5673 struct bnxt_representor *vfr_bp = opaque_arg; 5674 unsigned long rep_is_pf; 5675 char *end = NULL; 5676 5677 if (!value || !opaque_arg) { 5678 PMD_DRV_LOG(ERR, 5679 "Invalid parameter passed to rep_is_pf devargs.\n"); 5680 return -EINVAL; 5681 } 5682 5683 rep_is_pf = strtoul(value, &end, 10); 5684 if (end == NULL || *end != '\0' || 5685 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5686 PMD_DRV_LOG(ERR, 5687 "Invalid parameter passed to rep_is_pf devargs.\n"); 5688 return -EINVAL; 5689 } 5690 5691 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5692 PMD_DRV_LOG(ERR, 5693 "Invalid value passed to rep_is_pf devargs.\n"); 5694 return -EINVAL; 5695 } 5696 5697 vfr_bp->flags |= rep_is_pf; 5698 if (BNXT_REP_PF(vfr_bp)) 5699 PMD_DRV_LOG(INFO, "PF representor\n"); 5700 else 5701 PMD_DRV_LOG(INFO, "VF representor\n"); 5702 5703 return 0; 5704 } 5705 5706 static int 5707 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5708 const char *value, void *opaque_arg) 5709 { 5710 struct bnxt_representor *vfr_bp = opaque_arg; 5711 unsigned long rep_based_pf; 5712 char *end = NULL; 5713 5714 if (!value || !opaque_arg) { 5715 PMD_DRV_LOG(ERR, 5716 "Invalid parameter passed to rep_based_pf " 5717 "devargs.\n"); 5718 return -EINVAL; 5719 } 5720 5721 rep_based_pf = strtoul(value, &end, 10); 5722 if (end == NULL || *end != '\0' || 5723 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5724 PMD_DRV_LOG(ERR, 5725 "Invalid parameter passed to rep_based_pf " 5726 "devargs.\n"); 5727 return -EINVAL; 5728 } 5729 5730 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5731 PMD_DRV_LOG(ERR, 5732 "Invalid value passed to rep_based_pf devargs.\n"); 5733 return -EINVAL; 5734 } 5735 5736 vfr_bp->rep_based_pf = rep_based_pf; 5737 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5738 5739 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5740 5741 return 0; 5742 } 5743 5744 static int 5745 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5746 const char *value, void *opaque_arg) 5747 { 5748 struct bnxt_representor *vfr_bp = opaque_arg; 5749 unsigned long rep_q_r2f; 5750 char *end = NULL; 5751 5752 if (!value || !opaque_arg) { 5753 PMD_DRV_LOG(ERR, 5754 "Invalid parameter passed to rep_q_r2f " 5755 "devargs.\n"); 5756 return -EINVAL; 5757 } 5758 5759 rep_q_r2f = strtoul(value, &end, 10); 5760 if (end == NULL || *end != '\0' || 5761 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5762 PMD_DRV_LOG(ERR, 5763 "Invalid parameter passed to rep_q_r2f " 5764 "devargs.\n"); 5765 return -EINVAL; 5766 } 5767 5768 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5769 PMD_DRV_LOG(ERR, 5770 "Invalid value passed to rep_q_r2f devargs.\n"); 5771 return -EINVAL; 5772 } 5773 5774 vfr_bp->rep_q_r2f = rep_q_r2f; 5775 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5776 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5777 5778 return 0; 5779 } 5780 5781 static int 5782 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5783 const char *value, void *opaque_arg) 5784 { 5785 struct bnxt_representor *vfr_bp = opaque_arg; 5786 unsigned long rep_q_f2r; 5787 char *end = NULL; 5788 5789 if (!value || !opaque_arg) { 5790 PMD_DRV_LOG(ERR, 5791 "Invalid parameter passed to rep_q_f2r " 5792 "devargs.\n"); 5793 return -EINVAL; 5794 } 5795 5796 rep_q_f2r = strtoul(value, &end, 10); 5797 if (end == NULL || *end != '\0' || 5798 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5799 PMD_DRV_LOG(ERR, 5800 "Invalid parameter passed to rep_q_f2r " 5801 "devargs.\n"); 5802 return -EINVAL; 5803 } 5804 5805 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5806 PMD_DRV_LOG(ERR, 5807 "Invalid value passed to rep_q_f2r devargs.\n"); 5808 return -EINVAL; 5809 } 5810 5811 vfr_bp->rep_q_f2r = rep_q_f2r; 5812 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5813 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5814 5815 return 0; 5816 } 5817 5818 static int 5819 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5820 const char *value, void *opaque_arg) 5821 { 5822 struct bnxt_representor *vfr_bp = opaque_arg; 5823 unsigned long rep_fc_r2f; 5824 char *end = NULL; 5825 5826 if (!value || !opaque_arg) { 5827 PMD_DRV_LOG(ERR, 5828 "Invalid parameter passed to rep_fc_r2f " 5829 "devargs.\n"); 5830 return -EINVAL; 5831 } 5832 5833 rep_fc_r2f = strtoul(value, &end, 10); 5834 if (end == NULL || *end != '\0' || 5835 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5836 PMD_DRV_LOG(ERR, 5837 "Invalid parameter passed to rep_fc_r2f " 5838 "devargs.\n"); 5839 return -EINVAL; 5840 } 5841 5842 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5843 PMD_DRV_LOG(ERR, 5844 "Invalid value passed to rep_fc_r2f devargs.\n"); 5845 return -EINVAL; 5846 } 5847 5848 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5849 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5850 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5851 5852 return 0; 5853 } 5854 5855 static int 5856 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5857 const char *value, void *opaque_arg) 5858 { 5859 struct bnxt_representor *vfr_bp = opaque_arg; 5860 unsigned long rep_fc_f2r; 5861 char *end = NULL; 5862 5863 if (!value || !opaque_arg) { 5864 PMD_DRV_LOG(ERR, 5865 "Invalid parameter passed to rep_fc_f2r " 5866 "devargs.\n"); 5867 return -EINVAL; 5868 } 5869 5870 rep_fc_f2r = strtoul(value, &end, 10); 5871 if (end == NULL || *end != '\0' || 5872 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5873 PMD_DRV_LOG(ERR, 5874 "Invalid parameter passed to rep_fc_f2r " 5875 "devargs.\n"); 5876 return -EINVAL; 5877 } 5878 5879 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5880 PMD_DRV_LOG(ERR, 5881 "Invalid value passed to rep_fc_f2r devargs.\n"); 5882 return -EINVAL; 5883 } 5884 5885 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5886 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5887 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5888 5889 return 0; 5890 } 5891 5892 static int 5893 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5894 { 5895 struct rte_kvargs *kvlist; 5896 int ret; 5897 5898 if (devargs == NULL) 5899 return 0; 5900 5901 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5902 if (kvlist == NULL) 5903 return -EINVAL; 5904 5905 /* 5906 * Handler for "flow_xstat" devarg. 5907 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5908 */ 5909 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5910 bnxt_parse_devarg_flow_xstat, bp); 5911 if (ret) 5912 goto err; 5913 5914 /* 5915 * Handler for "accum-stats" devarg. 5916 * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1" 5917 */ 5918 rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS, 5919 bnxt_parse_devarg_accum_stats, bp); 5920 /* 5921 * Handler for "max_num_kflows" devarg. 5922 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5923 */ 5924 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5925 bnxt_parse_devarg_max_num_kflows, bp); 5926 if (ret) 5927 goto err; 5928 5929 err: 5930 /* 5931 * Handler for "app-id" devarg. 5932 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5933 */ 5934 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5935 bnxt_parse_devarg_app_id, bp); 5936 5937 rte_kvargs_free(kvlist); 5938 return ret; 5939 } 5940 5941 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5942 { 5943 int rc = 0; 5944 5945 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5946 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5947 if (rc) 5948 PMD_DRV_LOG(ERR, 5949 "Failed to alloc switch domain: %d\n", rc); 5950 else 5951 PMD_DRV_LOG(INFO, 5952 "Switch domain allocated %d\n", 5953 bp->switch_domain_id); 5954 } 5955 5956 return rc; 5957 } 5958 5959 /* Allocate and initialize various fields in bnxt struct that 5960 * need to be allocated/destroyed only once in the lifetime of the driver 5961 */ 5962 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5963 { 5964 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5965 struct bnxt *bp = eth_dev->data->dev_private; 5966 int rc = 0; 5967 5968 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5969 5970 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5971 bp->flags |= BNXT_FLAG_VF; 5972 5973 if (bnxt_p5_device(pci_dev->id.device_id)) 5974 bp->flags |= BNXT_FLAG_CHIP_P5; 5975 5976 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5977 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5978 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5979 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5980 bp->flags |= BNXT_FLAG_STINGRAY; 5981 5982 if (BNXT_TRUFLOW_EN(bp)) { 5983 /* extra mbuf field is required to store CFA code from mark */ 5984 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5985 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5986 .size = sizeof(bnxt_cfa_code_dynfield_t), 5987 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5988 }; 5989 bnxt_cfa_code_dynfield_offset = 5990 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5991 if (bnxt_cfa_code_dynfield_offset < 0) { 5992 PMD_DRV_LOG(ERR, 5993 "Failed to register mbuf field for TruFlow mark\n"); 5994 return -rte_errno; 5995 } 5996 } 5997 5998 rc = bnxt_map_pci_bars(eth_dev); 5999 if (rc) { 6000 PMD_DRV_LOG(ERR, 6001 "Failed to initialize board rc: %x\n", rc); 6002 return rc; 6003 } 6004 6005 rc = bnxt_alloc_pf_info(bp); 6006 if (rc) 6007 return rc; 6008 6009 rc = bnxt_alloc_link_info(bp); 6010 if (rc) 6011 return rc; 6012 6013 rc = bnxt_alloc_parent_info(bp); 6014 if (rc) 6015 return rc; 6016 6017 rc = bnxt_alloc_hwrm_resources(bp); 6018 if (rc) { 6019 PMD_DRV_LOG(ERR, 6020 "Failed to allocate response buffer rc: %x\n", rc); 6021 return rc; 6022 } 6023 rc = bnxt_alloc_leds_info(bp); 6024 if (rc) 6025 return rc; 6026 6027 rc = bnxt_alloc_cos_queues(bp); 6028 if (rc) 6029 return rc; 6030 6031 rc = bnxt_init_locks(bp); 6032 if (rc) 6033 return rc; 6034 6035 rc = bnxt_alloc_switch_domain(bp); 6036 if (rc) 6037 return rc; 6038 6039 return rc; 6040 } 6041 6042 static int 6043 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 6044 { 6045 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 6046 static int version_printed; 6047 struct bnxt *bp; 6048 int rc; 6049 6050 if (version_printed++ == 0) 6051 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 6052 6053 eth_dev->dev_ops = &bnxt_dev_ops; 6054 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 6055 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 6056 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 6057 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 6058 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 6059 6060 /* 6061 * For secondary processes, we don't initialise any further 6062 * as primary has already done this work. 6063 */ 6064 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6065 return 0; 6066 6067 rte_eth_copy_pci_info(eth_dev, pci_dev); 6068 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 6069 6070 bp = eth_dev->data->dev_private; 6071 6072 /* Parse dev arguments passed on when starting the DPDK application. */ 6073 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 6074 if (rc) 6075 goto error_free; 6076 6077 rc = bnxt_drv_init(eth_dev); 6078 if (rc) 6079 goto error_free; 6080 6081 rc = bnxt_init_resources(bp, false); 6082 if (rc) 6083 goto error_free; 6084 6085 rc = bnxt_alloc_stats_mem(bp); 6086 if (rc) 6087 goto error_free; 6088 6089 PMD_DRV_LOG(INFO, 6090 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 6091 DRV_MODULE_NAME, 6092 pci_dev->mem_resource[0].phys_addr, 6093 pci_dev->mem_resource[0].addr); 6094 6095 return 0; 6096 6097 error_free: 6098 bnxt_dev_uninit(eth_dev); 6099 return rc; 6100 } 6101 6102 6103 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 6104 { 6105 if (!ctx) 6106 return; 6107 6108 if (ctx->va) 6109 rte_free(ctx->va); 6110 6111 ctx->va = NULL; 6112 ctx->dma = RTE_BAD_IOVA; 6113 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6114 } 6115 6116 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6117 { 6118 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6119 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6120 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6121 bp->flow_stat->max_fc, 6122 false); 6123 6124 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6125 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6126 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6127 bp->flow_stat->max_fc, 6128 false); 6129 6130 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6131 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6132 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6133 6134 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6135 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6136 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6137 6138 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6139 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6140 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6141 6142 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6143 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6144 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6145 } 6146 6147 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6148 { 6149 bnxt_unregister_fc_ctx_mem(bp); 6150 6151 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6152 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6153 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6154 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6155 } 6156 6157 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6158 { 6159 if (BNXT_FLOW_XSTATS_EN(bp)) 6160 bnxt_uninit_fc_ctx_mem(bp); 6161 } 6162 6163 static void 6164 bnxt_free_error_recovery_info(struct bnxt *bp) 6165 { 6166 rte_free(bp->recovery_info); 6167 bp->recovery_info = NULL; 6168 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6169 } 6170 6171 static int 6172 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6173 { 6174 int rc; 6175 6176 bnxt_free_int(bp); 6177 bnxt_free_mem(bp, reconfig_dev); 6178 6179 bnxt_hwrm_func_buf_unrgtr(bp); 6180 if (bp->pf != NULL) { 6181 rte_free(bp->pf->vf_req_buf); 6182 bp->pf->vf_req_buf = NULL; 6183 } 6184 6185 rc = bnxt_hwrm_func_driver_unregister(bp); 6186 bp->flags &= ~BNXT_FLAG_REGISTERED; 6187 bnxt_free_ctx_mem(bp); 6188 if (!reconfig_dev) { 6189 bnxt_free_hwrm_resources(bp); 6190 bnxt_free_error_recovery_info(bp); 6191 } 6192 6193 bnxt_uninit_ctx_mem(bp); 6194 6195 bnxt_free_flow_stats_info(bp); 6196 if (bp->rep_info != NULL) 6197 bnxt_free_switch_domain(bp); 6198 bnxt_free_rep_info(bp); 6199 rte_free(bp->ptp_cfg); 6200 bp->ptp_cfg = NULL; 6201 return rc; 6202 } 6203 6204 static int 6205 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6206 { 6207 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6208 return -EPERM; 6209 6210 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6211 6212 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6213 bnxt_dev_close_op(eth_dev); 6214 6215 return 0; 6216 } 6217 6218 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6219 { 6220 struct bnxt *bp = eth_dev->data->dev_private; 6221 struct rte_eth_dev *vf_rep_eth_dev; 6222 int ret = 0, i; 6223 6224 if (!bp) 6225 return -EINVAL; 6226 6227 for (i = 0; i < bp->num_reps; i++) { 6228 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6229 if (!vf_rep_eth_dev) 6230 continue; 6231 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6232 vf_rep_eth_dev->data->port_id); 6233 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6234 } 6235 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6236 eth_dev->data->port_id); 6237 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6238 6239 return ret; 6240 } 6241 6242 static void bnxt_free_rep_info(struct bnxt *bp) 6243 { 6244 rte_free(bp->rep_info); 6245 bp->rep_info = NULL; 6246 rte_free(bp->cfa_code_map); 6247 bp->cfa_code_map = NULL; 6248 } 6249 6250 static int bnxt_init_rep_info(struct bnxt *bp) 6251 { 6252 int i = 0, rc; 6253 6254 if (bp->rep_info) 6255 return 0; 6256 6257 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6258 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 6259 0); 6260 if (!bp->rep_info) { 6261 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6262 return -ENOMEM; 6263 } 6264 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6265 sizeof(*bp->cfa_code_map) * 6266 BNXT_MAX_CFA_CODE, 0); 6267 if (!bp->cfa_code_map) { 6268 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6269 bnxt_free_rep_info(bp); 6270 return -ENOMEM; 6271 } 6272 6273 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6274 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6275 6276 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6277 if (rc) { 6278 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6279 bnxt_free_rep_info(bp); 6280 return rc; 6281 } 6282 6283 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6284 if (rc) { 6285 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6286 bnxt_free_rep_info(bp); 6287 return rc; 6288 } 6289 6290 return rc; 6291 } 6292 6293 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6294 struct rte_eth_devargs *eth_da, 6295 struct rte_eth_dev *backing_eth_dev, 6296 const char *dev_args) 6297 { 6298 struct rte_eth_dev *vf_rep_eth_dev; 6299 char name[RTE_ETH_NAME_MAX_LEN]; 6300 struct bnxt *backing_bp; 6301 uint16_t num_rep; 6302 int i, ret = 0; 6303 struct rte_kvargs *kvlist = NULL; 6304 6305 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6306 return 0; 6307 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6308 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6309 eth_da->type); 6310 return -ENOTSUP; 6311 } 6312 num_rep = eth_da->nb_representor_ports; 6313 if (num_rep > BNXT_MAX_VF_REPS) { 6314 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6315 num_rep, BNXT_MAX_VF_REPS); 6316 return -EINVAL; 6317 } 6318 6319 if (num_rep >= RTE_MAX_ETHPORTS) { 6320 PMD_DRV_LOG(ERR, 6321 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6322 num_rep, RTE_MAX_ETHPORTS); 6323 return -EINVAL; 6324 } 6325 6326 backing_bp = backing_eth_dev->data->dev_private; 6327 6328 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6329 PMD_DRV_LOG(ERR, 6330 "Not a PF or trusted VF. No Representor support\n"); 6331 /* Returning an error is not an option. 6332 * Applications are not handling this correctly 6333 */ 6334 return 0; 6335 } 6336 6337 if (bnxt_init_rep_info(backing_bp)) 6338 return 0; 6339 6340 for (i = 0; i < num_rep; i++) { 6341 struct bnxt_representor representor = { 6342 .vf_id = eth_da->representor_ports[i], 6343 .switch_domain_id = backing_bp->switch_domain_id, 6344 .parent_dev = backing_eth_dev 6345 }; 6346 6347 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 6348 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6349 representor.vf_id, BNXT_MAX_VF_REPS); 6350 continue; 6351 } 6352 6353 /* representor port net_bdf_port */ 6354 snprintf(name, sizeof(name), "net_%s_representor_%d", 6355 pci_dev->device.name, eth_da->representor_ports[i]); 6356 6357 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6358 if (kvlist) { 6359 /* 6360 * Handler for "rep_is_pf" devarg. 6361 * Invoked as for ex: "-a 000:00:0d.0, 6362 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6363 */ 6364 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6365 bnxt_parse_devarg_rep_is_pf, 6366 (void *)&representor); 6367 if (ret) { 6368 ret = -EINVAL; 6369 goto err; 6370 } 6371 /* 6372 * Handler for "rep_based_pf" devarg. 6373 * Invoked as for ex: "-a 000:00:0d.0, 6374 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6375 */ 6376 ret = rte_kvargs_process(kvlist, 6377 BNXT_DEVARG_REP_BASED_PF, 6378 bnxt_parse_devarg_rep_based_pf, 6379 (void *)&representor); 6380 if (ret) { 6381 ret = -EINVAL; 6382 goto err; 6383 } 6384 /* 6385 * Handler for "rep_based_pf" devarg. 6386 * Invoked as for ex: "-a 000:00:0d.0, 6387 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6388 */ 6389 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6390 bnxt_parse_devarg_rep_q_r2f, 6391 (void *)&representor); 6392 if (ret) { 6393 ret = -EINVAL; 6394 goto err; 6395 } 6396 /* 6397 * Handler for "rep_based_pf" devarg. 6398 * Invoked as for ex: "-a 000:00:0d.0, 6399 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6400 */ 6401 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6402 bnxt_parse_devarg_rep_q_f2r, 6403 (void *)&representor); 6404 if (ret) { 6405 ret = -EINVAL; 6406 goto err; 6407 } 6408 /* 6409 * Handler for "rep_based_pf" devarg. 6410 * Invoked as for ex: "-a 000:00:0d.0, 6411 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6412 */ 6413 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6414 bnxt_parse_devarg_rep_fc_r2f, 6415 (void *)&representor); 6416 if (ret) { 6417 ret = -EINVAL; 6418 goto err; 6419 } 6420 /* 6421 * Handler for "rep_based_pf" devarg. 6422 * Invoked as for ex: "-a 000:00:0d.0, 6423 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6424 */ 6425 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6426 bnxt_parse_devarg_rep_fc_f2r, 6427 (void *)&representor); 6428 if (ret) { 6429 ret = -EINVAL; 6430 goto err; 6431 } 6432 } 6433 6434 ret = rte_eth_dev_create(&pci_dev->device, name, 6435 sizeof(struct bnxt_representor), 6436 NULL, NULL, 6437 bnxt_representor_init, 6438 &representor); 6439 if (ret) { 6440 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6441 "representor %s.", name); 6442 goto err; 6443 } 6444 6445 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6446 if (!vf_rep_eth_dev) { 6447 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6448 " for VF-Rep: %s.", name); 6449 ret = -ENODEV; 6450 goto err; 6451 } 6452 6453 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6454 backing_eth_dev->data->port_id); 6455 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6456 vf_rep_eth_dev; 6457 backing_bp->num_reps++; 6458 6459 } 6460 6461 rte_kvargs_free(kvlist); 6462 return 0; 6463 6464 err: 6465 /* If num_rep > 1, then rollback already created 6466 * ports, since we'll be failing the probe anyway 6467 */ 6468 if (num_rep > 1) 6469 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6470 rte_errno = -ret; 6471 rte_kvargs_free(kvlist); 6472 6473 return ret; 6474 } 6475 6476 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6477 struct rte_pci_device *pci_dev) 6478 { 6479 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6480 struct rte_eth_dev *backing_eth_dev; 6481 uint16_t num_rep; 6482 int ret = 0; 6483 6484 if (pci_dev->device.devargs) { 6485 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6486 ð_da); 6487 if (ret) 6488 return ret; 6489 } 6490 6491 num_rep = eth_da.nb_representor_ports; 6492 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6493 num_rep); 6494 6495 /* We could come here after first level of probe is already invoked 6496 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6497 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6498 */ 6499 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6500 if (backing_eth_dev == NULL) { 6501 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6502 sizeof(struct bnxt), 6503 eth_dev_pci_specific_init, pci_dev, 6504 bnxt_dev_init, NULL); 6505 6506 if (ret || !num_rep) 6507 return ret; 6508 6509 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6510 } 6511 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6512 backing_eth_dev->data->port_id); 6513 6514 if (!num_rep) 6515 return ret; 6516 6517 /* probe representor ports now */ 6518 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6519 pci_dev->device.devargs->args); 6520 6521 return ret; 6522 } 6523 6524 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6525 { 6526 struct rte_eth_dev *eth_dev; 6527 6528 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6529 if (!eth_dev) 6530 return 0; /* Invoked typically only by OVS-DPDK, by the 6531 * time it comes here the eth_dev is already 6532 * deleted by rte_eth_dev_close(), so returning 6533 * +ve value will at least help in proper cleanup 6534 */ 6535 6536 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6537 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6538 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6539 return rte_eth_dev_destroy(eth_dev, 6540 bnxt_representor_uninit); 6541 else 6542 return rte_eth_dev_destroy(eth_dev, 6543 bnxt_dev_uninit); 6544 } else { 6545 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6546 } 6547 } 6548 6549 static struct rte_pci_driver bnxt_rte_pmd = { 6550 .id_table = bnxt_pci_id_map, 6551 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6552 RTE_PCI_DRV_INTR_RMV | 6553 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6554 * and OVS-DPDK 6555 */ 6556 .probe = bnxt_pci_probe, 6557 .remove = bnxt_pci_remove, 6558 }; 6559 6560 static bool 6561 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6562 { 6563 if (strcmp(dev->device->driver->name, drv->driver.name)) 6564 return false; 6565 6566 return true; 6567 } 6568 6569 bool is_bnxt_supported(struct rte_eth_dev *dev) 6570 { 6571 return is_device_supported(dev, &bnxt_rte_pmd); 6572 } 6573 6574 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6575 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6576 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6577 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6578