1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <rte_ethdev_driver.h> 11 #include <rte_ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 97 { .vendor_id = 0, /* sentinel */ }, 98 }; 99 100 #define BNXT_DEVARG_TRUFLOW "host-based-truflow" 101 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 102 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 103 #define BNXT_DEVARG_REPRESENTOR "representor" 104 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 105 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 106 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 107 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 108 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 109 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 110 111 static const char *const bnxt_dev_args[] = { 112 BNXT_DEVARG_REPRESENTOR, 113 BNXT_DEVARG_TRUFLOW, 114 BNXT_DEVARG_FLOW_XSTAT, 115 BNXT_DEVARG_MAX_NUM_KFLOWS, 116 BNXT_DEVARG_REP_BASED_PF, 117 BNXT_DEVARG_REP_IS_PF, 118 BNXT_DEVARG_REP_Q_R2F, 119 BNXT_DEVARG_REP_Q_F2R, 120 BNXT_DEVARG_REP_FC_R2F, 121 BNXT_DEVARG_REP_FC_F2R, 122 NULL 123 }; 124 125 /* 126 * truflow == false to disable the feature 127 * truflow == true to enable the feature 128 */ 129 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow) ((truflow) > 1) 130 131 /* 132 * flow_xstat == false to disable the feature 133 * flow_xstat == true to enable the feature 134 */ 135 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 136 137 /* 138 * rep_is_pf == false to indicate VF representor 139 * rep_is_pf == true to indicate PF representor 140 */ 141 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 142 143 /* 144 * rep_based_pf == Physical index of the PF 145 */ 146 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 147 /* 148 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 149 */ 150 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 151 152 /* 153 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 154 */ 155 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 156 157 /* 158 * rep_fc_r2f == Flow control for the representor to endpoint direction 159 */ 160 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 161 162 /* 163 * rep_fc_f2r == Flow control for the endpoint to representor direction 164 */ 165 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 166 167 int bnxt_cfa_code_dynfield_offset = -1; 168 169 /* 170 * max_num_kflows must be >= 32 171 * and must be a power-of-2 supported value 172 * return: 1 -> invalid 173 * 0 -> valid 174 */ 175 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 176 { 177 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 178 return 1; 179 return 0; 180 } 181 182 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 183 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 184 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 185 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 186 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 187 static int bnxt_restore_vlan_filters(struct bnxt *bp); 188 static void bnxt_dev_recover(void *arg); 189 static void bnxt_free_error_recovery_info(struct bnxt *bp); 190 static void bnxt_free_rep_info(struct bnxt *bp); 191 192 int is_bnxt_in_error(struct bnxt *bp) 193 { 194 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 195 return -EIO; 196 if (bp->flags & BNXT_FLAG_FW_RESET) 197 return -EBUSY; 198 199 return 0; 200 } 201 202 /***********************/ 203 204 /* 205 * High level utility functions 206 */ 207 208 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 209 { 210 if (!BNXT_CHIP_THOR(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_THOR) / 215 BNXT_RSS_ENTRIES_PER_CTX_THOR; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_THOR(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 } 230 231 static void bnxt_free_pf_info(struct bnxt *bp) 232 { 233 rte_free(bp->pf); 234 } 235 236 static void bnxt_free_link_info(struct bnxt *bp) 237 { 238 rte_free(bp->link_info); 239 } 240 241 static void bnxt_free_leds_info(struct bnxt *bp) 242 { 243 if (BNXT_VF(bp)) 244 return; 245 246 rte_free(bp->leds); 247 bp->leds = NULL; 248 } 249 250 static void bnxt_free_flow_stats_info(struct bnxt *bp) 251 { 252 rte_free(bp->flow_stat); 253 bp->flow_stat = NULL; 254 } 255 256 static void bnxt_free_cos_queues(struct bnxt *bp) 257 { 258 rte_free(bp->rx_cos_queue); 259 rte_free(bp->tx_cos_queue); 260 } 261 262 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 263 { 264 bnxt_free_filter_mem(bp); 265 bnxt_free_vnic_attributes(bp); 266 bnxt_free_vnic_mem(bp); 267 268 /* tx/rx rings are configured as part of *_queue_setup callbacks. 269 * If the number of rings change across fw update, 270 * we don't have much choice except to warn the user. 271 */ 272 if (!reconfig) { 273 bnxt_free_stats(bp); 274 bnxt_free_tx_rings(bp); 275 bnxt_free_rx_rings(bp); 276 } 277 bnxt_free_async_cp_ring(bp); 278 bnxt_free_rxtx_nq_ring(bp); 279 280 rte_free(bp->grp_info); 281 bp->grp_info = NULL; 282 } 283 284 static int bnxt_alloc_parent_info(struct bnxt *bp) 285 { 286 bp->parent = rte_zmalloc("bnxt_parent_info", 287 sizeof(struct bnxt_parent_info), 0); 288 if (bp->parent == NULL) 289 return -ENOMEM; 290 291 return 0; 292 } 293 294 static int bnxt_alloc_pf_info(struct bnxt *bp) 295 { 296 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 297 if (bp->pf == NULL) 298 return -ENOMEM; 299 300 return 0; 301 } 302 303 static int bnxt_alloc_link_info(struct bnxt *bp) 304 { 305 bp->link_info = 306 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 307 if (bp->link_info == NULL) 308 return -ENOMEM; 309 310 return 0; 311 } 312 313 static int bnxt_alloc_leds_info(struct bnxt *bp) 314 { 315 if (BNXT_VF(bp)) 316 return 0; 317 318 bp->leds = rte_zmalloc("bnxt_leds", 319 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 320 0); 321 if (bp->leds == NULL) 322 return -ENOMEM; 323 324 return 0; 325 } 326 327 static int bnxt_alloc_cos_queues(struct bnxt *bp) 328 { 329 bp->rx_cos_queue = 330 rte_zmalloc("bnxt_rx_cosq", 331 BNXT_COS_QUEUE_COUNT * 332 sizeof(struct bnxt_cos_queue_info), 333 0); 334 if (bp->rx_cos_queue == NULL) 335 return -ENOMEM; 336 337 bp->tx_cos_queue = 338 rte_zmalloc("bnxt_tx_cosq", 339 BNXT_COS_QUEUE_COUNT * 340 sizeof(struct bnxt_cos_queue_info), 341 0); 342 if (bp->tx_cos_queue == NULL) 343 return -ENOMEM; 344 345 return 0; 346 } 347 348 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 349 { 350 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 351 sizeof(struct bnxt_flow_stat_info), 0); 352 if (bp->flow_stat == NULL) 353 return -ENOMEM; 354 355 return 0; 356 } 357 358 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 359 { 360 int rc; 361 362 rc = bnxt_alloc_ring_grps(bp); 363 if (rc) 364 goto alloc_mem_err; 365 366 rc = bnxt_alloc_async_ring_struct(bp); 367 if (rc) 368 goto alloc_mem_err; 369 370 rc = bnxt_alloc_vnic_mem(bp); 371 if (rc) 372 goto alloc_mem_err; 373 374 rc = bnxt_alloc_vnic_attributes(bp); 375 if (rc) 376 goto alloc_mem_err; 377 378 rc = bnxt_alloc_filter_mem(bp); 379 if (rc) 380 goto alloc_mem_err; 381 382 rc = bnxt_alloc_async_cp_ring(bp); 383 if (rc) 384 goto alloc_mem_err; 385 386 rc = bnxt_alloc_rxtx_nq_ring(bp); 387 if (rc) 388 goto alloc_mem_err; 389 390 if (BNXT_FLOW_XSTATS_EN(bp)) { 391 rc = bnxt_alloc_flow_stats_info(bp); 392 if (rc) 393 goto alloc_mem_err; 394 } 395 396 return 0; 397 398 alloc_mem_err: 399 bnxt_free_mem(bp, reconfig); 400 return rc; 401 } 402 403 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 404 { 405 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 406 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 407 uint64_t rx_offloads = dev_conf->rxmode.offloads; 408 struct bnxt_rx_queue *rxq; 409 unsigned int j; 410 int rc; 411 412 rc = bnxt_vnic_grp_alloc(bp, vnic); 413 if (rc) 414 goto err_out; 415 416 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 417 vnic_id, vnic, vnic->fw_grp_ids); 418 419 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 420 if (rc) 421 goto err_out; 422 423 /* Alloc RSS context only if RSS mode is enabled */ 424 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 425 int j, nr_ctxs = bnxt_rss_ctxts(bp); 426 427 rc = 0; 428 for (j = 0; j < nr_ctxs; j++) { 429 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 430 if (rc) 431 break; 432 } 433 if (rc) { 434 PMD_DRV_LOG(ERR, 435 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 436 vnic_id, j, rc); 437 goto err_out; 438 } 439 vnic->num_lb_ctxts = nr_ctxs; 440 } 441 442 /* 443 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 444 * setting is not available at this time, it will not be 445 * configured correctly in the CFA. 446 */ 447 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 448 vnic->vlan_strip = true; 449 else 450 vnic->vlan_strip = false; 451 452 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 453 if (rc) 454 goto err_out; 455 456 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 457 if (rc) 458 goto err_out; 459 460 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 461 rxq = bp->eth_dev->data->rx_queues[j]; 462 463 PMD_DRV_LOG(DEBUG, 464 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 465 j, rxq->vnic, rxq->vnic->fw_grp_ids); 466 467 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 468 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 469 else 470 vnic->rx_queue_cnt++; 471 } 472 473 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 474 475 rc = bnxt_vnic_rss_configure(bp, vnic); 476 if (rc) 477 goto err_out; 478 479 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 480 481 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 482 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 483 else 484 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 485 486 return 0; 487 err_out: 488 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 489 vnic_id, rc); 490 return rc; 491 } 492 493 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 494 { 495 int rc = 0; 496 497 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 498 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 499 if (rc) 500 return rc; 501 502 PMD_DRV_LOG(DEBUG, 503 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 504 " rx_fc_in_tbl.ctx_id = %d\n", 505 bp->flow_stat->rx_fc_in_tbl.va, 506 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 507 bp->flow_stat->rx_fc_in_tbl.ctx_id); 508 509 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 510 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 511 if (rc) 512 return rc; 513 514 PMD_DRV_LOG(DEBUG, 515 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 516 " rx_fc_out_tbl.ctx_id = %d\n", 517 bp->flow_stat->rx_fc_out_tbl.va, 518 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 519 bp->flow_stat->rx_fc_out_tbl.ctx_id); 520 521 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 522 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 523 if (rc) 524 return rc; 525 526 PMD_DRV_LOG(DEBUG, 527 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 528 " tx_fc_in_tbl.ctx_id = %d\n", 529 bp->flow_stat->tx_fc_in_tbl.va, 530 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 531 bp->flow_stat->tx_fc_in_tbl.ctx_id); 532 533 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 534 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 535 if (rc) 536 return rc; 537 538 PMD_DRV_LOG(DEBUG, 539 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 540 " tx_fc_out_tbl.ctx_id = %d\n", 541 bp->flow_stat->tx_fc_out_tbl.va, 542 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 543 bp->flow_stat->tx_fc_out_tbl.ctx_id); 544 545 memset(bp->flow_stat->rx_fc_out_tbl.va, 546 0, 547 bp->flow_stat->rx_fc_out_tbl.size); 548 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 549 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 550 bp->flow_stat->rx_fc_out_tbl.ctx_id, 551 bp->flow_stat->max_fc, 552 true); 553 if (rc) 554 return rc; 555 556 memset(bp->flow_stat->tx_fc_out_tbl.va, 557 0, 558 bp->flow_stat->tx_fc_out_tbl.size); 559 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 560 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 561 bp->flow_stat->tx_fc_out_tbl.ctx_id, 562 bp->flow_stat->max_fc, 563 true); 564 565 return rc; 566 } 567 568 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size, 569 struct bnxt_ctx_mem_buf_info *ctx) 570 { 571 if (!ctx) 572 return -EINVAL; 573 574 ctx->va = rte_zmalloc(type, size, 0); 575 if (ctx->va == NULL) 576 return -ENOMEM; 577 rte_mem_lock_page(ctx->va); 578 ctx->size = size; 579 ctx->dma = rte_mem_virt2iova(ctx->va); 580 if (ctx->dma == RTE_BAD_IOVA) 581 return -ENOMEM; 582 583 return 0; 584 } 585 586 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 587 { 588 struct rte_pci_device *pdev = bp->pdev; 589 char type[RTE_MEMZONE_NAMESIZE]; 590 uint16_t max_fc; 591 int rc = 0; 592 593 max_fc = bp->flow_stat->max_fc; 594 595 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 596 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 597 /* 4 bytes for each counter-id */ 598 rc = bnxt_alloc_ctx_mem_buf(type, 599 max_fc * 4, 600 &bp->flow_stat->rx_fc_in_tbl); 601 if (rc) 602 return rc; 603 604 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 605 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 606 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 607 rc = bnxt_alloc_ctx_mem_buf(type, 608 max_fc * 16, 609 &bp->flow_stat->rx_fc_out_tbl); 610 if (rc) 611 return rc; 612 613 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 614 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 615 /* 4 bytes for each counter-id */ 616 rc = bnxt_alloc_ctx_mem_buf(type, 617 max_fc * 4, 618 &bp->flow_stat->tx_fc_in_tbl); 619 if (rc) 620 return rc; 621 622 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 623 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 624 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 625 rc = bnxt_alloc_ctx_mem_buf(type, 626 max_fc * 16, 627 &bp->flow_stat->tx_fc_out_tbl); 628 if (rc) 629 return rc; 630 631 rc = bnxt_register_fc_ctx_mem(bp); 632 633 return rc; 634 } 635 636 static int bnxt_init_ctx_mem(struct bnxt *bp) 637 { 638 int rc = 0; 639 640 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 641 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 642 !BNXT_FLOW_XSTATS_EN(bp)) 643 return 0; 644 645 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 646 if (rc) 647 return rc; 648 649 rc = bnxt_init_fc_ctx_mem(bp); 650 651 return rc; 652 } 653 654 static int bnxt_update_phy_setting(struct bnxt *bp) 655 { 656 struct rte_eth_link new; 657 int rc; 658 659 rc = bnxt_get_hwrm_link_config(bp, &new); 660 if (rc) { 661 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 662 return rc; 663 } 664 665 /* 666 * On BCM957508-N2100 adapters, FW will not allow any user other 667 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 668 * always returns link up. Force phy update always in that case. 669 */ 670 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 671 rc = bnxt_set_hwrm_link_config(bp, true); 672 if (rc) { 673 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 674 return rc; 675 } 676 } 677 678 return rc; 679 } 680 681 static int bnxt_init_chip(struct bnxt *bp) 682 { 683 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 684 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 685 uint32_t intr_vector = 0; 686 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 687 uint32_t vec = BNXT_MISC_VEC_ID; 688 unsigned int i, j; 689 int rc; 690 691 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 692 bp->eth_dev->data->dev_conf.rxmode.offloads |= 693 DEV_RX_OFFLOAD_JUMBO_FRAME; 694 bp->flags |= BNXT_FLAG_JUMBO; 695 } else { 696 bp->eth_dev->data->dev_conf.rxmode.offloads &= 697 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 698 bp->flags &= ~BNXT_FLAG_JUMBO; 699 } 700 701 /* THOR does not support ring groups. 702 * But we will use the array to save RSS context IDs. 703 */ 704 if (BNXT_CHIP_THOR(bp)) 705 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 706 707 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 708 if (rc) { 709 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 710 goto err_out; 711 } 712 713 rc = bnxt_alloc_hwrm_rings(bp); 714 if (rc) { 715 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 716 goto err_out; 717 } 718 719 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 720 if (rc) { 721 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 722 goto err_out; 723 } 724 725 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 726 goto skip_cosq_cfg; 727 728 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 729 if (bp->rx_cos_queue[i].id != 0xff) { 730 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 731 732 if (!vnic) { 733 PMD_DRV_LOG(ERR, 734 "Num pools more than FW profile\n"); 735 rc = -EINVAL; 736 goto err_out; 737 } 738 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 739 bp->rx_cosq_cnt++; 740 } 741 } 742 743 skip_cosq_cfg: 744 rc = bnxt_mq_rx_configure(bp); 745 if (rc) { 746 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 747 goto err_out; 748 } 749 750 /* VNIC configuration */ 751 for (i = 0; i < bp->nr_vnics; i++) { 752 rc = bnxt_setup_one_vnic(bp, i); 753 if (rc) 754 goto err_out; 755 } 756 757 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 758 if (rc) { 759 PMD_DRV_LOG(ERR, 760 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 761 goto err_out; 762 } 763 764 /* check and configure queue intr-vector mapping */ 765 if ((rte_intr_cap_multiple(intr_handle) || 766 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 767 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 768 intr_vector = bp->eth_dev->data->nb_rx_queues; 769 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 770 if (intr_vector > bp->rx_cp_nr_rings) { 771 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 772 bp->rx_cp_nr_rings); 773 return -ENOTSUP; 774 } 775 rc = rte_intr_efd_enable(intr_handle, intr_vector); 776 if (rc) 777 return rc; 778 } 779 780 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 781 intr_handle->intr_vec = 782 rte_zmalloc("intr_vec", 783 bp->eth_dev->data->nb_rx_queues * 784 sizeof(int), 0); 785 if (intr_handle->intr_vec == NULL) { 786 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 787 " intr_vec", bp->eth_dev->data->nb_rx_queues); 788 rc = -ENOMEM; 789 goto err_disable; 790 } 791 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 792 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 793 intr_handle->intr_vec, intr_handle->nb_efd, 794 intr_handle->max_intr); 795 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 796 queue_id++) { 797 intr_handle->intr_vec[queue_id] = 798 vec + BNXT_RX_VEC_START; 799 if (vec < base + intr_handle->nb_efd - 1) 800 vec++; 801 } 802 } 803 804 /* enable uio/vfio intr/eventfd mapping */ 805 rc = rte_intr_enable(intr_handle); 806 #ifndef RTE_EXEC_ENV_FREEBSD 807 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 808 if (rc) 809 goto err_free; 810 #endif 811 812 rc = bnxt_update_phy_setting(bp); 813 if (rc) 814 goto err_free; 815 816 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 817 if (!bp->mark_table) 818 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 819 820 return 0; 821 822 err_free: 823 rte_free(intr_handle->intr_vec); 824 err_disable: 825 rte_intr_efd_disable(intr_handle); 826 err_out: 827 /* Some of the error status returned by FW may not be from errno.h */ 828 if (rc > 0) 829 rc = -EIO; 830 831 return rc; 832 } 833 834 static int bnxt_shutdown_nic(struct bnxt *bp) 835 { 836 bnxt_free_all_hwrm_resources(bp); 837 bnxt_free_all_filters(bp); 838 bnxt_free_all_vnics(bp); 839 return 0; 840 } 841 842 /* 843 * Device configuration and status function 844 */ 845 846 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 847 { 848 uint32_t link_speed = bp->link_info->support_speeds; 849 uint32_t speed_capa = 0; 850 851 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 852 speed_capa |= ETH_LINK_SPEED_100M; 853 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 854 speed_capa |= ETH_LINK_SPEED_100M_HD; 855 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 856 speed_capa |= ETH_LINK_SPEED_1G; 857 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 858 speed_capa |= ETH_LINK_SPEED_2_5G; 859 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 860 speed_capa |= ETH_LINK_SPEED_10G; 861 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 862 speed_capa |= ETH_LINK_SPEED_20G; 863 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 864 speed_capa |= ETH_LINK_SPEED_25G; 865 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 866 speed_capa |= ETH_LINK_SPEED_40G; 867 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 868 speed_capa |= ETH_LINK_SPEED_50G; 869 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 870 speed_capa |= ETH_LINK_SPEED_100G; 871 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 872 speed_capa |= ETH_LINK_SPEED_50G; 873 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 874 speed_capa |= ETH_LINK_SPEED_100G; 875 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 876 speed_capa |= ETH_LINK_SPEED_200G; 877 878 if (bp->link_info->auto_mode == 879 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 880 speed_capa |= ETH_LINK_SPEED_FIXED; 881 else 882 speed_capa |= ETH_LINK_SPEED_AUTONEG; 883 884 return speed_capa; 885 } 886 887 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 888 struct rte_eth_dev_info *dev_info) 889 { 890 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 891 struct bnxt *bp = eth_dev->data->dev_private; 892 uint16_t max_vnics, i, j, vpool, vrxq; 893 unsigned int max_rx_rings; 894 int rc; 895 896 rc = is_bnxt_in_error(bp); 897 if (rc) 898 return rc; 899 900 /* MAC Specifics */ 901 dev_info->max_mac_addrs = bp->max_l2_ctx; 902 dev_info->max_hash_mac_addrs = 0; 903 904 /* PF/VF specifics */ 905 if (BNXT_PF(bp)) 906 dev_info->max_vfs = pdev->max_vfs; 907 908 max_rx_rings = BNXT_MAX_RINGS(bp); 909 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 910 dev_info->max_rx_queues = max_rx_rings; 911 dev_info->max_tx_queues = max_rx_rings; 912 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 913 dev_info->hash_key_size = 40; 914 max_vnics = bp->max_vnics; 915 916 /* MTU specifics */ 917 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 918 dev_info->max_mtu = BNXT_MAX_MTU; 919 920 /* Fast path specifics */ 921 dev_info->min_rx_bufsize = 1; 922 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 923 924 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 925 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 926 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 927 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; 928 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 929 dev_info->tx_queue_offload_capa; 930 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 931 932 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 933 934 /* *INDENT-OFF* */ 935 dev_info->default_rxconf = (struct rte_eth_rxconf) { 936 .rx_thresh = { 937 .pthresh = 8, 938 .hthresh = 8, 939 .wthresh = 0, 940 }, 941 .rx_free_thresh = 32, 942 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 943 }; 944 945 dev_info->default_txconf = (struct rte_eth_txconf) { 946 .tx_thresh = { 947 .pthresh = 32, 948 .hthresh = 0, 949 .wthresh = 0, 950 }, 951 .tx_free_thresh = 32, 952 .tx_rs_thresh = 32, 953 }; 954 eth_dev->data->dev_conf.intr_conf.lsc = 1; 955 956 eth_dev->data->dev_conf.intr_conf.rxq = 1; 957 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 958 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 959 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 960 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 961 962 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 963 dev_info->switch_info.name = eth_dev->device->name; 964 dev_info->switch_info.domain_id = bp->switch_domain_id; 965 dev_info->switch_info.port_id = 966 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 967 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 968 } 969 970 /* *INDENT-ON* */ 971 972 /* 973 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 974 * need further investigation. 975 */ 976 977 /* VMDq resources */ 978 vpool = 64; /* ETH_64_POOLS */ 979 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 980 for (i = 0; i < 4; vpool >>= 1, i++) { 981 if (max_vnics > vpool) { 982 for (j = 0; j < 5; vrxq >>= 1, j++) { 983 if (dev_info->max_rx_queues > vrxq) { 984 if (vpool > vrxq) 985 vpool = vrxq; 986 goto found; 987 } 988 } 989 /* Not enough resources to support VMDq */ 990 break; 991 } 992 } 993 /* Not enough resources to support VMDq */ 994 vpool = 0; 995 vrxq = 0; 996 found: 997 dev_info->max_vmdq_pools = vpool; 998 dev_info->vmdq_queue_num = vrxq; 999 1000 dev_info->vmdq_pool_base = 0; 1001 dev_info->vmdq_queue_base = 0; 1002 1003 return 0; 1004 } 1005 1006 /* Configure the device based on the configuration provided */ 1007 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1008 { 1009 struct bnxt *bp = eth_dev->data->dev_private; 1010 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1011 int rc; 1012 1013 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1014 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1015 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1016 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1017 1018 rc = is_bnxt_in_error(bp); 1019 if (rc) 1020 return rc; 1021 1022 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1023 rc = bnxt_hwrm_check_vf_rings(bp); 1024 if (rc) { 1025 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1026 return -ENOSPC; 1027 } 1028 1029 /* If a resource has already been allocated - in this case 1030 * it is the async completion ring, free it. Reallocate it after 1031 * resource reservation. This will ensure the resource counts 1032 * are calculated correctly. 1033 */ 1034 1035 pthread_mutex_lock(&bp->def_cp_lock); 1036 1037 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1038 bnxt_disable_int(bp); 1039 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1040 } 1041 1042 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1043 if (rc) { 1044 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1045 pthread_mutex_unlock(&bp->def_cp_lock); 1046 return -ENOSPC; 1047 } 1048 1049 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1050 rc = bnxt_alloc_async_cp_ring(bp); 1051 if (rc) { 1052 pthread_mutex_unlock(&bp->def_cp_lock); 1053 return rc; 1054 } 1055 bnxt_enable_int(bp); 1056 } 1057 1058 pthread_mutex_unlock(&bp->def_cp_lock); 1059 } else { 1060 /* legacy driver needs to get updated values */ 1061 rc = bnxt_hwrm_func_qcaps(bp); 1062 if (rc) { 1063 PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); 1064 return rc; 1065 } 1066 } 1067 1068 /* Inherit new configurations */ 1069 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1070 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1071 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1072 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1073 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1074 bp->max_stat_ctx) 1075 goto resource_error; 1076 1077 if (BNXT_HAS_RING_GRPS(bp) && 1078 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1079 goto resource_error; 1080 1081 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 1082 bp->max_vnics < eth_dev->data->nb_rx_queues) 1083 goto resource_error; 1084 1085 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1086 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1087 1088 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1089 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1090 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1091 1092 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1093 eth_dev->data->mtu = 1094 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1095 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 1096 BNXT_NUM_VLANS; 1097 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1098 } 1099 return 0; 1100 1101 resource_error: 1102 PMD_DRV_LOG(ERR, 1103 "Insufficient resources to support requested config\n"); 1104 PMD_DRV_LOG(ERR, 1105 "Num Queues Requested: Tx %d, Rx %d\n", 1106 eth_dev->data->nb_tx_queues, 1107 eth_dev->data->nb_rx_queues); 1108 PMD_DRV_LOG(ERR, 1109 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1110 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1111 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1112 return -ENOSPC; 1113 } 1114 1115 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1116 { 1117 struct rte_eth_link *link = ð_dev->data->dev_link; 1118 1119 if (link->link_status) 1120 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1121 eth_dev->data->port_id, 1122 (uint32_t)link->link_speed, 1123 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 1124 ("full-duplex") : ("half-duplex\n")); 1125 else 1126 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1127 eth_dev->data->port_id); 1128 } 1129 1130 /* 1131 * Determine whether the current configuration requires support for scattered 1132 * receive; return 1 if scattered receive is required and 0 if not. 1133 */ 1134 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1135 { 1136 uint16_t buf_size; 1137 int i; 1138 1139 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) 1140 return 1; 1141 1142 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1143 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1144 1145 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1146 RTE_PKTMBUF_HEADROOM); 1147 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 1148 return 1; 1149 } 1150 return 0; 1151 } 1152 1153 static eth_rx_burst_t 1154 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1155 { 1156 struct bnxt *bp = eth_dev->data->dev_private; 1157 1158 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1159 #ifndef RTE_LIBRTE_IEEE1588 1160 /* 1161 * Vector mode receive can be enabled only if scatter rx is not 1162 * in use and rx offloads are limited to VLAN stripping and 1163 * CRC stripping. 1164 */ 1165 if (!eth_dev->data->scattered_rx && 1166 !(eth_dev->data->dev_conf.rxmode.offloads & 1167 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 1168 DEV_RX_OFFLOAD_KEEP_CRC | 1169 DEV_RX_OFFLOAD_JUMBO_FRAME | 1170 DEV_RX_OFFLOAD_IPV4_CKSUM | 1171 DEV_RX_OFFLOAD_UDP_CKSUM | 1172 DEV_RX_OFFLOAD_TCP_CKSUM | 1173 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1174 DEV_RX_OFFLOAD_RSS_HASH | 1175 DEV_RX_OFFLOAD_VLAN_FILTER)) && 1176 !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) && 1177 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1178 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", 1179 eth_dev->data->port_id); 1180 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1181 return bnxt_recv_pkts_vec; 1182 } 1183 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1184 eth_dev->data->port_id); 1185 PMD_DRV_LOG(INFO, 1186 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1187 eth_dev->data->port_id, 1188 eth_dev->data->scattered_rx, 1189 eth_dev->data->dev_conf.rxmode.offloads); 1190 #endif 1191 #endif 1192 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1193 return bnxt_recv_pkts; 1194 } 1195 1196 static eth_tx_burst_t 1197 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 1198 { 1199 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1200 #ifndef RTE_LIBRTE_IEEE1588 1201 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1202 struct bnxt *bp = eth_dev->data->dev_private; 1203 1204 /* 1205 * Vector mode transmit can be enabled only if not using scatter rx 1206 * or tx offloads. 1207 */ 1208 if (!eth_dev->data->scattered_rx && 1209 !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) && 1210 !BNXT_TRUFLOW_EN(bp) && 1211 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1212 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", 1213 eth_dev->data->port_id); 1214 return bnxt_xmit_pkts_vec; 1215 } 1216 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1217 eth_dev->data->port_id); 1218 PMD_DRV_LOG(INFO, 1219 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1220 eth_dev->data->port_id, 1221 eth_dev->data->scattered_rx, 1222 offloads); 1223 #endif 1224 #endif 1225 return bnxt_xmit_pkts; 1226 } 1227 1228 static int bnxt_handle_if_change_status(struct bnxt *bp) 1229 { 1230 int rc; 1231 1232 /* Since fw has undergone a reset and lost all contexts, 1233 * set fatal flag to not issue hwrm during cleanup 1234 */ 1235 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1236 bnxt_uninit_resources(bp, true); 1237 1238 /* clear fatal flag so that re-init happens */ 1239 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1240 rc = bnxt_init_resources(bp, true); 1241 1242 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1243 1244 return rc; 1245 } 1246 1247 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1248 { 1249 struct bnxt *bp = eth_dev->data->dev_private; 1250 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1251 int vlan_mask = 0; 1252 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1253 1254 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1255 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1256 return -EINVAL; 1257 } 1258 1259 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1260 PMD_DRV_LOG(ERR, 1261 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1262 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1263 } 1264 1265 do { 1266 rc = bnxt_hwrm_if_change(bp, true); 1267 if (rc == 0 || rc != -EAGAIN) 1268 break; 1269 1270 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1271 } while (retry_cnt--); 1272 1273 if (rc) 1274 return rc; 1275 1276 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1277 rc = bnxt_handle_if_change_status(bp); 1278 if (rc) 1279 return rc; 1280 } 1281 1282 bnxt_enable_int(bp); 1283 1284 rc = bnxt_init_chip(bp); 1285 if (rc) 1286 goto error; 1287 1288 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1289 eth_dev->data->dev_started = 1; 1290 1291 bnxt_link_update_op(eth_dev, 1); 1292 1293 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1294 vlan_mask |= ETH_VLAN_FILTER_MASK; 1295 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1296 vlan_mask |= ETH_VLAN_STRIP_MASK; 1297 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1298 if (rc) 1299 goto error; 1300 1301 /* Initialize bnxt ULP port details */ 1302 rc = bnxt_ulp_port_init(bp); 1303 if (rc) 1304 goto error; 1305 1306 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1307 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1308 1309 bnxt_schedule_fw_health_check(bp); 1310 1311 return 0; 1312 1313 error: 1314 bnxt_shutdown_nic(bp); 1315 bnxt_free_tx_mbufs(bp); 1316 bnxt_free_rx_mbufs(bp); 1317 bnxt_hwrm_if_change(bp, false); 1318 eth_dev->data->dev_started = 0; 1319 return rc; 1320 } 1321 1322 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1323 { 1324 struct bnxt *bp = eth_dev->data->dev_private; 1325 int rc = 0; 1326 1327 if (!bp->link_info->link_up) 1328 rc = bnxt_set_hwrm_link_config(bp, true); 1329 if (!rc) 1330 eth_dev->data->dev_link.link_status = 1; 1331 1332 bnxt_print_link_info(eth_dev); 1333 return rc; 1334 } 1335 1336 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1337 { 1338 struct bnxt *bp = eth_dev->data->dev_private; 1339 1340 eth_dev->data->dev_link.link_status = 0; 1341 bnxt_set_hwrm_link_config(bp, false); 1342 bp->link_info->link_up = 0; 1343 1344 return 0; 1345 } 1346 1347 static void bnxt_free_switch_domain(struct bnxt *bp) 1348 { 1349 if (bp->switch_domain_id) 1350 rte_eth_switch_domain_free(bp->switch_domain_id); 1351 } 1352 1353 /* Unload the driver, release resources */ 1354 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1355 { 1356 struct bnxt *bp = eth_dev->data->dev_private; 1357 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1358 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1359 struct rte_eth_link link; 1360 int ret; 1361 1362 eth_dev->data->dev_started = 0; 1363 eth_dev->data->scattered_rx = 0; 1364 1365 /* Prevent crashes when queues are still in use */ 1366 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1367 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1368 1369 bnxt_disable_int(bp); 1370 1371 /* disable uio/vfio intr/eventfd mapping */ 1372 rte_intr_disable(intr_handle); 1373 1374 /* Stop the child representors for this device */ 1375 ret = bnxt_rep_stop_all(bp); 1376 if (ret != 0) 1377 return ret; 1378 1379 /* delete the bnxt ULP port details */ 1380 bnxt_ulp_port_deinit(bp); 1381 1382 bnxt_cancel_fw_health_check(bp); 1383 1384 /* Do not bring link down during reset recovery */ 1385 if (!is_bnxt_in_error(bp)) { 1386 bnxt_dev_set_link_down_op(eth_dev); 1387 /* Wait for link to be reset */ 1388 if (BNXT_SINGLE_PF(bp)) 1389 rte_delay_ms(500); 1390 /* clear the recorded link status */ 1391 memset(&link, 0, sizeof(link)); 1392 rte_eth_linkstatus_set(eth_dev, &link); 1393 } 1394 1395 /* Clean queue intr-vector mapping */ 1396 rte_intr_efd_disable(intr_handle); 1397 if (intr_handle->intr_vec != NULL) { 1398 rte_free(intr_handle->intr_vec); 1399 intr_handle->intr_vec = NULL; 1400 } 1401 1402 bnxt_hwrm_port_clr_stats(bp); 1403 bnxt_free_tx_mbufs(bp); 1404 bnxt_free_rx_mbufs(bp); 1405 /* Process any remaining notifications in default completion queue */ 1406 bnxt_int_handler(eth_dev); 1407 bnxt_shutdown_nic(bp); 1408 bnxt_hwrm_if_change(bp, false); 1409 1410 rte_free(bp->mark_table); 1411 bp->mark_table = NULL; 1412 1413 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1414 bp->rx_cosq_cnt = 0; 1415 /* All filters are deleted on a port stop. */ 1416 if (BNXT_FLOW_XSTATS_EN(bp)) 1417 bp->flow_stat->flow_count = 0; 1418 1419 return 0; 1420 } 1421 1422 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1423 { 1424 struct bnxt *bp = eth_dev->data->dev_private; 1425 int ret = 0; 1426 1427 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1428 return 0; 1429 1430 /* cancel the recovery handler before remove dev */ 1431 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1432 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1433 bnxt_cancel_fc_thread(bp); 1434 1435 if (eth_dev->data->dev_started) 1436 ret = bnxt_dev_stop_op(eth_dev); 1437 1438 bnxt_free_switch_domain(bp); 1439 1440 bnxt_uninit_resources(bp, false); 1441 1442 bnxt_free_leds_info(bp); 1443 bnxt_free_cos_queues(bp); 1444 bnxt_free_link_info(bp); 1445 bnxt_free_pf_info(bp); 1446 bnxt_free_parent_info(bp); 1447 1448 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1449 bp->tx_mem_zone = NULL; 1450 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1451 bp->rx_mem_zone = NULL; 1452 1453 bnxt_hwrm_free_vf_info(bp); 1454 1455 rte_free(bp->grp_info); 1456 bp->grp_info = NULL; 1457 1458 return ret; 1459 } 1460 1461 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1462 uint32_t index) 1463 { 1464 struct bnxt *bp = eth_dev->data->dev_private; 1465 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1466 struct bnxt_vnic_info *vnic; 1467 struct bnxt_filter_info *filter, *temp_filter; 1468 uint32_t i; 1469 1470 if (is_bnxt_in_error(bp)) 1471 return; 1472 1473 /* 1474 * Loop through all VNICs from the specified filter flow pools to 1475 * remove the corresponding MAC addr filter 1476 */ 1477 for (i = 0; i < bp->nr_vnics; i++) { 1478 if (!(pool_mask & (1ULL << i))) 1479 continue; 1480 1481 vnic = &bp->vnic_info[i]; 1482 filter = STAILQ_FIRST(&vnic->filter); 1483 while (filter) { 1484 temp_filter = STAILQ_NEXT(filter, next); 1485 if (filter->mac_index == index) { 1486 STAILQ_REMOVE(&vnic->filter, filter, 1487 bnxt_filter_info, next); 1488 bnxt_hwrm_clear_l2_filter(bp, filter); 1489 bnxt_free_filter(bp, filter); 1490 } 1491 filter = temp_filter; 1492 } 1493 } 1494 } 1495 1496 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1497 struct rte_ether_addr *mac_addr, uint32_t index, 1498 uint32_t pool) 1499 { 1500 struct bnxt_filter_info *filter; 1501 int rc = 0; 1502 1503 /* Attach requested MAC address to the new l2_filter */ 1504 STAILQ_FOREACH(filter, &vnic->filter, next) { 1505 if (filter->mac_index == index) { 1506 PMD_DRV_LOG(DEBUG, 1507 "MAC addr already existed for pool %d\n", 1508 pool); 1509 return 0; 1510 } 1511 } 1512 1513 filter = bnxt_alloc_filter(bp); 1514 if (!filter) { 1515 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1516 return -ENODEV; 1517 } 1518 1519 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1520 * if the MAC that's been programmed now is a different one, then, 1521 * copy that addr to filter->l2_addr 1522 */ 1523 if (mac_addr) 1524 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1525 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1526 1527 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1528 if (!rc) { 1529 filter->mac_index = index; 1530 if (filter->mac_index == 0) 1531 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1532 else 1533 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1534 } else { 1535 bnxt_free_filter(bp, filter); 1536 } 1537 1538 return rc; 1539 } 1540 1541 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1542 struct rte_ether_addr *mac_addr, 1543 uint32_t index, uint32_t pool) 1544 { 1545 struct bnxt *bp = eth_dev->data->dev_private; 1546 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1547 int rc = 0; 1548 1549 rc = is_bnxt_in_error(bp); 1550 if (rc) 1551 return rc; 1552 1553 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1554 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1555 return -ENOTSUP; 1556 } 1557 1558 if (!vnic) { 1559 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1560 return -EINVAL; 1561 } 1562 1563 /* Filter settings will get applied when port is started */ 1564 if (!eth_dev->data->dev_started) 1565 return 0; 1566 1567 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1568 1569 return rc; 1570 } 1571 1572 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1573 { 1574 int rc = 0; 1575 struct bnxt *bp = eth_dev->data->dev_private; 1576 struct rte_eth_link new; 1577 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1578 BNXT_MIN_LINK_WAIT_CNT; 1579 1580 rc = is_bnxt_in_error(bp); 1581 if (rc) 1582 return rc; 1583 1584 memset(&new, 0, sizeof(new)); 1585 do { 1586 /* Retrieve link info from hardware */ 1587 rc = bnxt_get_hwrm_link_config(bp, &new); 1588 if (rc) { 1589 new.link_speed = ETH_LINK_SPEED_100M; 1590 new.link_duplex = ETH_LINK_FULL_DUPLEX; 1591 PMD_DRV_LOG(ERR, 1592 "Failed to retrieve link rc = 0x%x!\n", rc); 1593 goto out; 1594 } 1595 1596 if (!wait_to_complete || new.link_status) 1597 break; 1598 1599 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1600 } while (cnt--); 1601 1602 /* Only single function PF can bring phy down. 1603 * When port is stopped, report link down for VF/MH/NPAR functions. 1604 */ 1605 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1606 memset(&new, 0, sizeof(new)); 1607 1608 out: 1609 /* Timed out or success */ 1610 if (new.link_status != eth_dev->data->dev_link.link_status || 1611 new.link_speed != eth_dev->data->dev_link.link_speed) { 1612 rte_eth_linkstatus_set(eth_dev, &new); 1613 1614 rte_eth_dev_callback_process(eth_dev, 1615 RTE_ETH_EVENT_INTR_LSC, 1616 NULL); 1617 1618 bnxt_print_link_info(eth_dev); 1619 } 1620 1621 return rc; 1622 } 1623 1624 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1625 { 1626 struct bnxt *bp = eth_dev->data->dev_private; 1627 struct bnxt_vnic_info *vnic; 1628 uint32_t old_flags; 1629 int rc; 1630 1631 rc = is_bnxt_in_error(bp); 1632 if (rc) 1633 return rc; 1634 1635 /* Filter settings will get applied when port is started */ 1636 if (!eth_dev->data->dev_started) 1637 return 0; 1638 1639 if (bp->vnic_info == NULL) 1640 return 0; 1641 1642 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1643 1644 old_flags = vnic->flags; 1645 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1646 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1647 if (rc != 0) 1648 vnic->flags = old_flags; 1649 1650 return rc; 1651 } 1652 1653 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1654 { 1655 struct bnxt *bp = eth_dev->data->dev_private; 1656 struct bnxt_vnic_info *vnic; 1657 uint32_t old_flags; 1658 int rc; 1659 1660 rc = is_bnxt_in_error(bp); 1661 if (rc) 1662 return rc; 1663 1664 /* Filter settings will get applied when port is started */ 1665 if (!eth_dev->data->dev_started) 1666 return 0; 1667 1668 if (bp->vnic_info == NULL) 1669 return 0; 1670 1671 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1672 1673 old_flags = vnic->flags; 1674 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1675 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1676 if (rc != 0) 1677 vnic->flags = old_flags; 1678 1679 return rc; 1680 } 1681 1682 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1683 { 1684 struct bnxt *bp = eth_dev->data->dev_private; 1685 struct bnxt_vnic_info *vnic; 1686 uint32_t old_flags; 1687 int rc; 1688 1689 rc = is_bnxt_in_error(bp); 1690 if (rc) 1691 return rc; 1692 1693 /* Filter settings will get applied when port is started */ 1694 if (!eth_dev->data->dev_started) 1695 return 0; 1696 1697 if (bp->vnic_info == NULL) 1698 return 0; 1699 1700 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1701 1702 old_flags = vnic->flags; 1703 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1704 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1705 if (rc != 0) 1706 vnic->flags = old_flags; 1707 1708 return rc; 1709 } 1710 1711 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1712 { 1713 struct bnxt *bp = eth_dev->data->dev_private; 1714 struct bnxt_vnic_info *vnic; 1715 uint32_t old_flags; 1716 int rc; 1717 1718 rc = is_bnxt_in_error(bp); 1719 if (rc) 1720 return rc; 1721 1722 /* Filter settings will get applied when port is started */ 1723 if (!eth_dev->data->dev_started) 1724 return 0; 1725 1726 if (bp->vnic_info == NULL) 1727 return 0; 1728 1729 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1730 1731 old_flags = vnic->flags; 1732 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1733 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1734 if (rc != 0) 1735 vnic->flags = old_flags; 1736 1737 return rc; 1738 } 1739 1740 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1741 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1742 { 1743 if (qid >= bp->rx_nr_rings) 1744 return NULL; 1745 1746 return bp->eth_dev->data->rx_queues[qid]; 1747 } 1748 1749 /* Return rxq corresponding to a given rss table ring/group ID. */ 1750 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1751 { 1752 struct bnxt_rx_queue *rxq; 1753 unsigned int i; 1754 1755 if (!BNXT_HAS_RING_GRPS(bp)) { 1756 for (i = 0; i < bp->rx_nr_rings; i++) { 1757 rxq = bp->eth_dev->data->rx_queues[i]; 1758 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1759 return rxq->index; 1760 } 1761 } else { 1762 for (i = 0; i < bp->rx_nr_rings; i++) { 1763 if (bp->grp_info[i].fw_grp_id == fwr) 1764 return i; 1765 } 1766 } 1767 1768 return INVALID_HW_RING_ID; 1769 } 1770 1771 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1772 struct rte_eth_rss_reta_entry64 *reta_conf, 1773 uint16_t reta_size) 1774 { 1775 struct bnxt *bp = eth_dev->data->dev_private; 1776 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1777 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1778 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1779 uint16_t idx, sft; 1780 int i, rc; 1781 1782 rc = is_bnxt_in_error(bp); 1783 if (rc) 1784 return rc; 1785 1786 if (!vnic->rss_table) 1787 return -EINVAL; 1788 1789 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 1790 return -EINVAL; 1791 1792 if (reta_size != tbl_size) { 1793 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1794 "(%d) must equal the size supported by the hardware " 1795 "(%d)\n", reta_size, tbl_size); 1796 return -EINVAL; 1797 } 1798 1799 for (i = 0; i < reta_size; i++) { 1800 struct bnxt_rx_queue *rxq; 1801 1802 idx = i / RTE_RETA_GROUP_SIZE; 1803 sft = i % RTE_RETA_GROUP_SIZE; 1804 1805 if (!(reta_conf[idx].mask & (1ULL << sft))) 1806 continue; 1807 1808 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 1809 if (!rxq) { 1810 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 1811 return -EINVAL; 1812 } 1813 1814 if (BNXT_CHIP_THOR(bp)) { 1815 vnic->rss_table[i * 2] = 1816 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1817 vnic->rss_table[i * 2 + 1] = 1818 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1819 } else { 1820 vnic->rss_table[i] = 1821 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1822 } 1823 } 1824 1825 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1826 return 0; 1827 } 1828 1829 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 1830 struct rte_eth_rss_reta_entry64 *reta_conf, 1831 uint16_t reta_size) 1832 { 1833 struct bnxt *bp = eth_dev->data->dev_private; 1834 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1835 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1836 uint16_t idx, sft, i; 1837 int rc; 1838 1839 rc = is_bnxt_in_error(bp); 1840 if (rc) 1841 return rc; 1842 1843 /* Retrieve from the default VNIC */ 1844 if (!vnic) 1845 return -EINVAL; 1846 if (!vnic->rss_table) 1847 return -EINVAL; 1848 1849 if (reta_size != tbl_size) { 1850 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1851 "(%d) must equal the size supported by the hardware " 1852 "(%d)\n", reta_size, tbl_size); 1853 return -EINVAL; 1854 } 1855 1856 for (idx = 0, i = 0; i < reta_size; i++) { 1857 idx = i / RTE_RETA_GROUP_SIZE; 1858 sft = i % RTE_RETA_GROUP_SIZE; 1859 1860 if (reta_conf[idx].mask & (1ULL << sft)) { 1861 uint16_t qid; 1862 1863 if (BNXT_CHIP_THOR(bp)) 1864 qid = bnxt_rss_to_qid(bp, 1865 vnic->rss_table[i * 2]); 1866 else 1867 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 1868 1869 if (qid == INVALID_HW_RING_ID) { 1870 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 1871 return -EINVAL; 1872 } 1873 reta_conf[idx].reta[sft] = qid; 1874 } 1875 } 1876 1877 return 0; 1878 } 1879 1880 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 1881 struct rte_eth_rss_conf *rss_conf) 1882 { 1883 struct bnxt *bp = eth_dev->data->dev_private; 1884 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1885 struct bnxt_vnic_info *vnic; 1886 int rc; 1887 1888 rc = is_bnxt_in_error(bp); 1889 if (rc) 1890 return rc; 1891 1892 /* 1893 * If RSS enablement were different than dev_configure, 1894 * then return -EINVAL 1895 */ 1896 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 1897 if (!rss_conf->rss_hf) 1898 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 1899 } else { 1900 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 1901 return -EINVAL; 1902 } 1903 1904 bp->flags |= BNXT_FLAG_UPDATE_HASH; 1905 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 1906 rss_conf, 1907 sizeof(*rss_conf)); 1908 1909 /* Update the default RSS VNIC(s) */ 1910 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1911 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 1912 vnic->hash_mode = 1913 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 1914 ETH_RSS_LEVEL(rss_conf->rss_hf)); 1915 1916 /* 1917 * If hashkey is not specified, use the previously configured 1918 * hashkey 1919 */ 1920 if (!rss_conf->rss_key) 1921 goto rss_config; 1922 1923 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 1924 PMD_DRV_LOG(ERR, 1925 "Invalid hashkey length, should be 16 bytes\n"); 1926 return -EINVAL; 1927 } 1928 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 1929 1930 rss_config: 1931 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1932 return 0; 1933 } 1934 1935 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 1936 struct rte_eth_rss_conf *rss_conf) 1937 { 1938 struct bnxt *bp = eth_dev->data->dev_private; 1939 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1940 int len, rc; 1941 uint32_t hash_types; 1942 1943 rc = is_bnxt_in_error(bp); 1944 if (rc) 1945 return rc; 1946 1947 /* RSS configuration is the same for all VNICs */ 1948 if (vnic && vnic->rss_hash_key) { 1949 if (rss_conf->rss_key) { 1950 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 1951 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 1952 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 1953 } 1954 1955 hash_types = vnic->hash_type; 1956 rss_conf->rss_hf = 0; 1957 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 1958 rss_conf->rss_hf |= ETH_RSS_IPV4; 1959 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1960 } 1961 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 1962 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1963 hash_types &= 1964 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1965 } 1966 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 1967 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 1968 hash_types &= 1969 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1970 } 1971 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 1972 rss_conf->rss_hf |= ETH_RSS_IPV6; 1973 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1974 } 1975 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 1976 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 1977 hash_types &= 1978 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1979 } 1980 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 1981 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 1982 hash_types &= 1983 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1984 } 1985 1986 rss_conf->rss_hf |= 1987 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 1988 1989 if (hash_types) { 1990 PMD_DRV_LOG(ERR, 1991 "Unknown RSS config from firmware (%08x), RSS disabled", 1992 vnic->hash_type); 1993 return -ENOTSUP; 1994 } 1995 } else { 1996 rss_conf->rss_hf = 0; 1997 } 1998 return 0; 1999 } 2000 2001 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2002 struct rte_eth_fc_conf *fc_conf) 2003 { 2004 struct bnxt *bp = dev->data->dev_private; 2005 struct rte_eth_link link_info; 2006 int rc; 2007 2008 rc = is_bnxt_in_error(bp); 2009 if (rc) 2010 return rc; 2011 2012 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2013 if (rc) 2014 return rc; 2015 2016 memset(fc_conf, 0, sizeof(*fc_conf)); 2017 if (bp->link_info->auto_pause) 2018 fc_conf->autoneg = 1; 2019 switch (bp->link_info->pause) { 2020 case 0: 2021 fc_conf->mode = RTE_FC_NONE; 2022 break; 2023 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2024 fc_conf->mode = RTE_FC_TX_PAUSE; 2025 break; 2026 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2027 fc_conf->mode = RTE_FC_RX_PAUSE; 2028 break; 2029 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2030 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2031 fc_conf->mode = RTE_FC_FULL; 2032 break; 2033 } 2034 return 0; 2035 } 2036 2037 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2038 struct rte_eth_fc_conf *fc_conf) 2039 { 2040 struct bnxt *bp = dev->data->dev_private; 2041 int rc; 2042 2043 rc = is_bnxt_in_error(bp); 2044 if (rc) 2045 return rc; 2046 2047 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2048 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); 2049 return -ENOTSUP; 2050 } 2051 2052 switch (fc_conf->mode) { 2053 case RTE_FC_NONE: 2054 bp->link_info->auto_pause = 0; 2055 bp->link_info->force_pause = 0; 2056 break; 2057 case RTE_FC_RX_PAUSE: 2058 if (fc_conf->autoneg) { 2059 bp->link_info->auto_pause = 2060 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2061 bp->link_info->force_pause = 0; 2062 } else { 2063 bp->link_info->auto_pause = 0; 2064 bp->link_info->force_pause = 2065 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2066 } 2067 break; 2068 case RTE_FC_TX_PAUSE: 2069 if (fc_conf->autoneg) { 2070 bp->link_info->auto_pause = 2071 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2072 bp->link_info->force_pause = 0; 2073 } else { 2074 bp->link_info->auto_pause = 0; 2075 bp->link_info->force_pause = 2076 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2077 } 2078 break; 2079 case RTE_FC_FULL: 2080 if (fc_conf->autoneg) { 2081 bp->link_info->auto_pause = 2082 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2083 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2084 bp->link_info->force_pause = 0; 2085 } else { 2086 bp->link_info->auto_pause = 0; 2087 bp->link_info->force_pause = 2088 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2089 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2090 } 2091 break; 2092 } 2093 return bnxt_set_hwrm_link_config(bp, true); 2094 } 2095 2096 /* Add UDP tunneling port */ 2097 static int 2098 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2099 struct rte_eth_udp_tunnel *udp_tunnel) 2100 { 2101 struct bnxt *bp = eth_dev->data->dev_private; 2102 uint16_t tunnel_type = 0; 2103 int rc = 0; 2104 2105 rc = is_bnxt_in_error(bp); 2106 if (rc) 2107 return rc; 2108 2109 switch (udp_tunnel->prot_type) { 2110 case RTE_TUNNEL_TYPE_VXLAN: 2111 if (bp->vxlan_port_cnt) { 2112 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2113 udp_tunnel->udp_port); 2114 if (bp->vxlan_port != udp_tunnel->udp_port) { 2115 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2116 return -ENOSPC; 2117 } 2118 bp->vxlan_port_cnt++; 2119 return 0; 2120 } 2121 tunnel_type = 2122 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2123 bp->vxlan_port_cnt++; 2124 break; 2125 case RTE_TUNNEL_TYPE_GENEVE: 2126 if (bp->geneve_port_cnt) { 2127 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2128 udp_tunnel->udp_port); 2129 if (bp->geneve_port != udp_tunnel->udp_port) { 2130 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2131 return -ENOSPC; 2132 } 2133 bp->geneve_port_cnt++; 2134 return 0; 2135 } 2136 tunnel_type = 2137 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2138 bp->geneve_port_cnt++; 2139 break; 2140 default: 2141 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2142 return -ENOTSUP; 2143 } 2144 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2145 tunnel_type); 2146 return rc; 2147 } 2148 2149 static int 2150 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2151 struct rte_eth_udp_tunnel *udp_tunnel) 2152 { 2153 struct bnxt *bp = eth_dev->data->dev_private; 2154 uint16_t tunnel_type = 0; 2155 uint16_t port = 0; 2156 int rc = 0; 2157 2158 rc = is_bnxt_in_error(bp); 2159 if (rc) 2160 return rc; 2161 2162 switch (udp_tunnel->prot_type) { 2163 case RTE_TUNNEL_TYPE_VXLAN: 2164 if (!bp->vxlan_port_cnt) { 2165 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2166 return -EINVAL; 2167 } 2168 if (bp->vxlan_port != udp_tunnel->udp_port) { 2169 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2170 udp_tunnel->udp_port, bp->vxlan_port); 2171 return -EINVAL; 2172 } 2173 if (--bp->vxlan_port_cnt) 2174 return 0; 2175 2176 tunnel_type = 2177 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2178 port = bp->vxlan_fw_dst_port_id; 2179 break; 2180 case RTE_TUNNEL_TYPE_GENEVE: 2181 if (!bp->geneve_port_cnt) { 2182 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2183 return -EINVAL; 2184 } 2185 if (bp->geneve_port != udp_tunnel->udp_port) { 2186 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2187 udp_tunnel->udp_port, bp->geneve_port); 2188 return -EINVAL; 2189 } 2190 if (--bp->geneve_port_cnt) 2191 return 0; 2192 2193 tunnel_type = 2194 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2195 port = bp->geneve_fw_dst_port_id; 2196 break; 2197 default: 2198 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2199 return -ENOTSUP; 2200 } 2201 2202 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2203 return rc; 2204 } 2205 2206 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2207 { 2208 struct bnxt_filter_info *filter; 2209 struct bnxt_vnic_info *vnic; 2210 int rc = 0; 2211 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2212 2213 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2214 filter = STAILQ_FIRST(&vnic->filter); 2215 while (filter) { 2216 /* Search for this matching MAC+VLAN filter */ 2217 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2218 /* Delete the filter */ 2219 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2220 if (rc) 2221 return rc; 2222 STAILQ_REMOVE(&vnic->filter, filter, 2223 bnxt_filter_info, next); 2224 bnxt_free_filter(bp, filter); 2225 PMD_DRV_LOG(INFO, 2226 "Deleted vlan filter for %d\n", 2227 vlan_id); 2228 return 0; 2229 } 2230 filter = STAILQ_NEXT(filter, next); 2231 } 2232 return -ENOENT; 2233 } 2234 2235 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2236 { 2237 struct bnxt_filter_info *filter; 2238 struct bnxt_vnic_info *vnic; 2239 int rc = 0; 2240 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2241 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2242 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2243 2244 /* Implementation notes on the use of VNIC in this command: 2245 * 2246 * By default, these filters belong to default vnic for the function. 2247 * Once these filters are set up, only destination VNIC can be modified. 2248 * If the destination VNIC is not specified in this command, 2249 * then the HWRM shall only create an l2 context id. 2250 */ 2251 2252 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2253 filter = STAILQ_FIRST(&vnic->filter); 2254 /* Check if the VLAN has already been added */ 2255 while (filter) { 2256 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2257 return -EEXIST; 2258 2259 filter = STAILQ_NEXT(filter, next); 2260 } 2261 2262 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2263 * command to create MAC+VLAN filter with the right flags, enables set. 2264 */ 2265 filter = bnxt_alloc_filter(bp); 2266 if (!filter) { 2267 PMD_DRV_LOG(ERR, 2268 "MAC/VLAN filter alloc failed\n"); 2269 return -ENOMEM; 2270 } 2271 /* MAC + VLAN ID filter */ 2272 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2273 * untagged packets are received 2274 * 2275 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2276 * packets and only the programmed vlan's packets are received 2277 */ 2278 filter->l2_ivlan = vlan_id; 2279 filter->l2_ivlan_mask = 0x0FFF; 2280 filter->enables |= en; 2281 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2282 2283 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2284 if (rc) { 2285 /* Free the newly allocated filter as we were 2286 * not able to create the filter in hardware. 2287 */ 2288 bnxt_free_filter(bp, filter); 2289 return rc; 2290 } 2291 2292 filter->mac_index = 0; 2293 /* Add this new filter to the list */ 2294 if (vlan_id == 0) 2295 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2296 else 2297 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2298 2299 PMD_DRV_LOG(INFO, 2300 "Added Vlan filter for %d\n", vlan_id); 2301 return rc; 2302 } 2303 2304 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2305 uint16_t vlan_id, int on) 2306 { 2307 struct bnxt *bp = eth_dev->data->dev_private; 2308 int rc; 2309 2310 rc = is_bnxt_in_error(bp); 2311 if (rc) 2312 return rc; 2313 2314 if (!eth_dev->data->dev_started) { 2315 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2316 return -EINVAL; 2317 } 2318 2319 /* These operations apply to ALL existing MAC/VLAN filters */ 2320 if (on) 2321 return bnxt_add_vlan_filter(bp, vlan_id); 2322 else 2323 return bnxt_del_vlan_filter(bp, vlan_id); 2324 } 2325 2326 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2327 struct bnxt_vnic_info *vnic) 2328 { 2329 struct bnxt_filter_info *filter; 2330 int rc; 2331 2332 filter = STAILQ_FIRST(&vnic->filter); 2333 while (filter) { 2334 if (filter->mac_index == 0 && 2335 !memcmp(filter->l2_addr, bp->mac_addr, 2336 RTE_ETHER_ADDR_LEN)) { 2337 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2338 if (!rc) { 2339 STAILQ_REMOVE(&vnic->filter, filter, 2340 bnxt_filter_info, next); 2341 bnxt_free_filter(bp, filter); 2342 } 2343 return rc; 2344 } 2345 filter = STAILQ_NEXT(filter, next); 2346 } 2347 return 0; 2348 } 2349 2350 static int 2351 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2352 { 2353 struct bnxt_vnic_info *vnic; 2354 unsigned int i; 2355 int rc; 2356 2357 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2358 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 2359 /* Remove any VLAN filters programmed */ 2360 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2361 bnxt_del_vlan_filter(bp, i); 2362 2363 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2364 if (rc) 2365 return rc; 2366 } else { 2367 /* Default filter will allow packets that match the 2368 * dest mac. So, it has to be deleted, otherwise, we 2369 * will endup receiving vlan packets for which the 2370 * filter is not programmed, when hw-vlan-filter 2371 * configuration is ON 2372 */ 2373 bnxt_del_dflt_mac_filter(bp, vnic); 2374 /* This filter will allow only untagged packets */ 2375 bnxt_add_vlan_filter(bp, 0); 2376 } 2377 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2378 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 2379 2380 return 0; 2381 } 2382 2383 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2384 { 2385 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2386 unsigned int i; 2387 int rc; 2388 2389 /* Destroy vnic filters and vnic */ 2390 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2391 DEV_RX_OFFLOAD_VLAN_FILTER) { 2392 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2393 bnxt_del_vlan_filter(bp, i); 2394 } 2395 bnxt_del_dflt_mac_filter(bp, vnic); 2396 2397 rc = bnxt_hwrm_vnic_free(bp, vnic); 2398 if (rc) 2399 return rc; 2400 2401 rte_free(vnic->fw_grp_ids); 2402 vnic->fw_grp_ids = NULL; 2403 2404 vnic->rx_queue_cnt = 0; 2405 2406 return 0; 2407 } 2408 2409 static int 2410 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2411 { 2412 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2413 int rc; 2414 2415 /* Destroy, recreate and reconfigure the default vnic */ 2416 rc = bnxt_free_one_vnic(bp, 0); 2417 if (rc) 2418 return rc; 2419 2420 /* default vnic 0 */ 2421 rc = bnxt_setup_one_vnic(bp, 0); 2422 if (rc) 2423 return rc; 2424 2425 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2426 DEV_RX_OFFLOAD_VLAN_FILTER) { 2427 rc = bnxt_add_vlan_filter(bp, 0); 2428 if (rc) 2429 return rc; 2430 rc = bnxt_restore_vlan_filters(bp); 2431 if (rc) 2432 return rc; 2433 } else { 2434 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2435 if (rc) 2436 return rc; 2437 } 2438 2439 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2440 if (rc) 2441 return rc; 2442 2443 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2444 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 2445 2446 return rc; 2447 } 2448 2449 static int 2450 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2451 { 2452 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2453 struct bnxt *bp = dev->data->dev_private; 2454 int rc; 2455 2456 rc = is_bnxt_in_error(bp); 2457 if (rc) 2458 return rc; 2459 2460 /* Filter settings will get applied when port is started */ 2461 if (!dev->data->dev_started) 2462 return 0; 2463 2464 if (mask & ETH_VLAN_FILTER_MASK) { 2465 /* Enable or disable VLAN filtering */ 2466 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2467 if (rc) 2468 return rc; 2469 } 2470 2471 if (mask & ETH_VLAN_STRIP_MASK) { 2472 /* Enable or disable VLAN stripping */ 2473 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2474 if (rc) 2475 return rc; 2476 } 2477 2478 if (mask & ETH_VLAN_EXTEND_MASK) { 2479 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2480 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2481 else 2482 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2483 } 2484 2485 return 0; 2486 } 2487 2488 static int 2489 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2490 uint16_t tpid) 2491 { 2492 struct bnxt *bp = dev->data->dev_private; 2493 int qinq = dev->data->dev_conf.rxmode.offloads & 2494 DEV_RX_OFFLOAD_VLAN_EXTEND; 2495 2496 if (vlan_type != ETH_VLAN_TYPE_INNER && 2497 vlan_type != ETH_VLAN_TYPE_OUTER) { 2498 PMD_DRV_LOG(ERR, 2499 "Unsupported vlan type."); 2500 return -EINVAL; 2501 } 2502 if (!qinq) { 2503 PMD_DRV_LOG(ERR, 2504 "QinQ not enabled. Needs to be ON as we can " 2505 "accelerate only outer vlan\n"); 2506 return -EINVAL; 2507 } 2508 2509 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2510 switch (tpid) { 2511 case RTE_ETHER_TYPE_QINQ: 2512 bp->outer_tpid_bd = 2513 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2514 break; 2515 case RTE_ETHER_TYPE_VLAN: 2516 bp->outer_tpid_bd = 2517 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2518 break; 2519 case RTE_ETHER_TYPE_QINQ1: 2520 bp->outer_tpid_bd = 2521 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2522 break; 2523 case RTE_ETHER_TYPE_QINQ2: 2524 bp->outer_tpid_bd = 2525 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2526 break; 2527 case RTE_ETHER_TYPE_QINQ3: 2528 bp->outer_tpid_bd = 2529 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2530 break; 2531 default: 2532 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2533 return -EINVAL; 2534 } 2535 bp->outer_tpid_bd |= tpid; 2536 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2537 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 2538 PMD_DRV_LOG(ERR, 2539 "Can accelerate only outer vlan in QinQ\n"); 2540 return -EINVAL; 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int 2547 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2548 struct rte_ether_addr *addr) 2549 { 2550 struct bnxt *bp = dev->data->dev_private; 2551 /* Default Filter is tied to VNIC 0 */ 2552 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2553 int rc; 2554 2555 rc = is_bnxt_in_error(bp); 2556 if (rc) 2557 return rc; 2558 2559 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2560 return -EPERM; 2561 2562 if (rte_is_zero_ether_addr(addr)) 2563 return -EINVAL; 2564 2565 /* Filter settings will get applied when port is started */ 2566 if (!dev->data->dev_started) 2567 return 0; 2568 2569 /* Check if the requested MAC is already added */ 2570 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2571 return 0; 2572 2573 /* Destroy filter and re-create it */ 2574 bnxt_del_dflt_mac_filter(bp, vnic); 2575 2576 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2577 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 2578 /* This filter will allow only untagged packets */ 2579 rc = bnxt_add_vlan_filter(bp, 0); 2580 } else { 2581 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2582 } 2583 2584 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2585 return rc; 2586 } 2587 2588 static int 2589 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2590 struct rte_ether_addr *mc_addr_set, 2591 uint32_t nb_mc_addr) 2592 { 2593 struct bnxt *bp = eth_dev->data->dev_private; 2594 char *mc_addr_list = (char *)mc_addr_set; 2595 struct bnxt_vnic_info *vnic; 2596 uint32_t off = 0, i = 0; 2597 int rc; 2598 2599 rc = is_bnxt_in_error(bp); 2600 if (rc) 2601 return rc; 2602 2603 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2604 2605 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2606 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2607 goto allmulti; 2608 } 2609 2610 /* TODO Check for Duplicate mcast addresses */ 2611 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2612 for (i = 0; i < nb_mc_addr; i++) { 2613 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2614 RTE_ETHER_ADDR_LEN); 2615 off += RTE_ETHER_ADDR_LEN; 2616 } 2617 2618 vnic->mc_addr_cnt = i; 2619 if (vnic->mc_addr_cnt) 2620 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2621 else 2622 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2623 2624 allmulti: 2625 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2626 } 2627 2628 static int 2629 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2630 { 2631 struct bnxt *bp = dev->data->dev_private; 2632 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2633 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2634 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2635 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2636 int ret; 2637 2638 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2639 fw_major, fw_minor, fw_updt, fw_rsvd); 2640 2641 ret += 1; /* add the size of '\0' */ 2642 if (fw_size < (uint32_t)ret) 2643 return ret; 2644 else 2645 return 0; 2646 } 2647 2648 static void 2649 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2650 struct rte_eth_rxq_info *qinfo) 2651 { 2652 struct bnxt *bp = dev->data->dev_private; 2653 struct bnxt_rx_queue *rxq; 2654 2655 if (is_bnxt_in_error(bp)) 2656 return; 2657 2658 rxq = dev->data->rx_queues[queue_id]; 2659 2660 qinfo->mp = rxq->mb_pool; 2661 qinfo->scattered_rx = dev->data->scattered_rx; 2662 qinfo->nb_desc = rxq->nb_rx_desc; 2663 2664 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2665 qinfo->conf.rx_drop_en = rxq->drop_en; 2666 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2667 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2668 } 2669 2670 static void 2671 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2672 struct rte_eth_txq_info *qinfo) 2673 { 2674 struct bnxt *bp = dev->data->dev_private; 2675 struct bnxt_tx_queue *txq; 2676 2677 if (is_bnxt_in_error(bp)) 2678 return; 2679 2680 txq = dev->data->tx_queues[queue_id]; 2681 2682 qinfo->nb_desc = txq->nb_tx_desc; 2683 2684 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2685 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2686 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2687 2688 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2689 qinfo->conf.tx_rs_thresh = 0; 2690 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2691 qinfo->conf.offloads = txq->offloads; 2692 } 2693 2694 static const struct { 2695 eth_rx_burst_t pkt_burst; 2696 const char *info; 2697 } bnxt_rx_burst_info[] = { 2698 {bnxt_recv_pkts, "Scalar"}, 2699 #if defined(RTE_ARCH_X86) 2700 {bnxt_recv_pkts_vec, "Vector SSE"}, 2701 #elif defined(RTE_ARCH_ARM64) 2702 {bnxt_recv_pkts_vec, "Vector Neon"}, 2703 #endif 2704 }; 2705 2706 static int 2707 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2708 struct rte_eth_burst_mode *mode) 2709 { 2710 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2711 size_t i; 2712 2713 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2714 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2715 snprintf(mode->info, sizeof(mode->info), "%s", 2716 bnxt_rx_burst_info[i].info); 2717 return 0; 2718 } 2719 } 2720 2721 return -EINVAL; 2722 } 2723 2724 static const struct { 2725 eth_tx_burst_t pkt_burst; 2726 const char *info; 2727 } bnxt_tx_burst_info[] = { 2728 {bnxt_xmit_pkts, "Scalar"}, 2729 #if defined(RTE_ARCH_X86) 2730 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2731 #elif defined(RTE_ARCH_ARM64) 2732 {bnxt_xmit_pkts_vec, "Vector Neon"}, 2733 #endif 2734 }; 2735 2736 static int 2737 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2738 struct rte_eth_burst_mode *mode) 2739 { 2740 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 2741 size_t i; 2742 2743 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 2744 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 2745 snprintf(mode->info, sizeof(mode->info), "%s", 2746 bnxt_tx_burst_info[i].info); 2747 return 0; 2748 } 2749 } 2750 2751 return -EINVAL; 2752 } 2753 2754 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 2755 { 2756 struct bnxt *bp = eth_dev->data->dev_private; 2757 uint32_t new_pkt_size; 2758 uint32_t rc = 0; 2759 uint32_t i; 2760 2761 rc = is_bnxt_in_error(bp); 2762 if (rc) 2763 return rc; 2764 2765 /* Exit if receive queues are not configured yet */ 2766 if (!eth_dev->data->nb_rx_queues) 2767 return rc; 2768 2769 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 2770 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 2771 2772 /* 2773 * Disallow any MTU change that would require scattered receive support 2774 * if it is not already enabled. 2775 */ 2776 if (eth_dev->data->dev_started && 2777 !eth_dev->data->scattered_rx && 2778 (new_pkt_size > 2779 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 2780 PMD_DRV_LOG(ERR, 2781 "MTU change would require scattered rx support. "); 2782 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 2783 return -EINVAL; 2784 } 2785 2786 if (new_mtu > RTE_ETHER_MTU) { 2787 bp->flags |= BNXT_FLAG_JUMBO; 2788 bp->eth_dev->data->dev_conf.rxmode.offloads |= 2789 DEV_RX_OFFLOAD_JUMBO_FRAME; 2790 } else { 2791 bp->eth_dev->data->dev_conf.rxmode.offloads &= 2792 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2793 bp->flags &= ~BNXT_FLAG_JUMBO; 2794 } 2795 2796 /* Is there a change in mtu setting? */ 2797 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) 2798 return rc; 2799 2800 for (i = 0; i < bp->nr_vnics; i++) { 2801 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2802 uint16_t size = 0; 2803 2804 vnic->mru = BNXT_VNIC_MRU(new_mtu); 2805 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 2806 if (rc) 2807 break; 2808 2809 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 2810 size -= RTE_PKTMBUF_HEADROOM; 2811 2812 if (size < new_mtu) { 2813 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 2814 if (rc) 2815 return rc; 2816 } 2817 } 2818 2819 if (!rc) 2820 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 2821 2822 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 2823 2824 return rc; 2825 } 2826 2827 static int 2828 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 2829 { 2830 struct bnxt *bp = dev->data->dev_private; 2831 uint16_t vlan = bp->vlan; 2832 int rc; 2833 2834 rc = is_bnxt_in_error(bp); 2835 if (rc) 2836 return rc; 2837 2838 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2839 PMD_DRV_LOG(ERR, 2840 "PVID cannot be modified for this function\n"); 2841 return -ENOTSUP; 2842 } 2843 bp->vlan = on ? pvid : 0; 2844 2845 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 2846 if (rc) 2847 bp->vlan = vlan; 2848 return rc; 2849 } 2850 2851 static int 2852 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 2853 { 2854 struct bnxt *bp = dev->data->dev_private; 2855 int rc; 2856 2857 rc = is_bnxt_in_error(bp); 2858 if (rc) 2859 return rc; 2860 2861 return bnxt_hwrm_port_led_cfg(bp, true); 2862 } 2863 2864 static int 2865 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 2866 { 2867 struct bnxt *bp = dev->data->dev_private; 2868 int rc; 2869 2870 rc = is_bnxt_in_error(bp); 2871 if (rc) 2872 return rc; 2873 2874 return bnxt_hwrm_port_led_cfg(bp, false); 2875 } 2876 2877 static uint32_t 2878 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 2879 { 2880 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2881 uint32_t desc = 0, raw_cons = 0, cons; 2882 struct bnxt_cp_ring_info *cpr; 2883 struct bnxt_rx_queue *rxq; 2884 struct rx_pkt_cmpl *rxcmp; 2885 int rc; 2886 2887 rc = is_bnxt_in_error(bp); 2888 if (rc) 2889 return rc; 2890 2891 rxq = dev->data->rx_queues[rx_queue_id]; 2892 cpr = rxq->cp_ring; 2893 raw_cons = cpr->cp_raw_cons; 2894 2895 while (1) { 2896 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 2897 rte_prefetch0(&cpr->cp_desc_ring[cons]); 2898 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2899 2900 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) { 2901 break; 2902 } else { 2903 raw_cons++; 2904 desc++; 2905 } 2906 } 2907 2908 return desc; 2909 } 2910 2911 static int 2912 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 2913 { 2914 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 2915 struct bnxt_rx_ring_info *rxr; 2916 struct bnxt_cp_ring_info *cpr; 2917 struct rte_mbuf *rx_buf; 2918 struct rx_pkt_cmpl *rxcmp; 2919 uint32_t cons, cp_cons; 2920 int rc; 2921 2922 if (!rxq) 2923 return -EINVAL; 2924 2925 rc = is_bnxt_in_error(rxq->bp); 2926 if (rc) 2927 return rc; 2928 2929 cpr = rxq->cp_ring; 2930 rxr = rxq->rx_ring; 2931 2932 if (offset >= rxq->nb_rx_desc) 2933 return -EINVAL; 2934 2935 cons = RING_CMP(cpr->cp_ring_struct, offset); 2936 cp_cons = cpr->cp_raw_cons; 2937 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2938 2939 if (cons > cp_cons) { 2940 if (CMPL_VALID(rxcmp, cpr->valid)) 2941 return RTE_ETH_RX_DESC_DONE; 2942 } else { 2943 if (CMPL_VALID(rxcmp, !cpr->valid)) 2944 return RTE_ETH_RX_DESC_DONE; 2945 } 2946 rx_buf = rxr->rx_buf_ring[cons]; 2947 if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf) 2948 return RTE_ETH_RX_DESC_UNAVAIL; 2949 2950 2951 return RTE_ETH_RX_DESC_AVAIL; 2952 } 2953 2954 static int 2955 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 2956 { 2957 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 2958 struct bnxt_tx_ring_info *txr; 2959 struct bnxt_cp_ring_info *cpr; 2960 struct bnxt_sw_tx_bd *tx_buf; 2961 struct tx_pkt_cmpl *txcmp; 2962 uint32_t cons, cp_cons; 2963 int rc; 2964 2965 if (!txq) 2966 return -EINVAL; 2967 2968 rc = is_bnxt_in_error(txq->bp); 2969 if (rc) 2970 return rc; 2971 2972 cpr = txq->cp_ring; 2973 txr = txq->tx_ring; 2974 2975 if (offset >= txq->nb_tx_desc) 2976 return -EINVAL; 2977 2978 cons = RING_CMP(cpr->cp_ring_struct, offset); 2979 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2980 cp_cons = cpr->cp_raw_cons; 2981 2982 if (cons > cp_cons) { 2983 if (CMPL_VALID(txcmp, cpr->valid)) 2984 return RTE_ETH_TX_DESC_UNAVAIL; 2985 } else { 2986 if (CMPL_VALID(txcmp, !cpr->valid)) 2987 return RTE_ETH_TX_DESC_UNAVAIL; 2988 } 2989 tx_buf = &txr->tx_buf_ring[cons]; 2990 if (tx_buf->mbuf == NULL) 2991 return RTE_ETH_TX_DESC_DONE; 2992 2993 return RTE_ETH_TX_DESC_FULL; 2994 } 2995 2996 static int 2997 bnxt_parse_fdir_filter(struct bnxt *bp, 2998 struct rte_eth_fdir_filter *fdir, 2999 struct bnxt_filter_info *filter) 3000 { 3001 enum rte_fdir_mode fdir_mode = 3002 bp->eth_dev->data->dev_conf.fdir_conf.mode; 3003 struct bnxt_vnic_info *vnic0, *vnic; 3004 struct bnxt_filter_info *filter1; 3005 uint32_t en = 0; 3006 int i; 3007 3008 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3009 return -EINVAL; 3010 3011 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 3012 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 3013 3014 switch (fdir->input.flow_type) { 3015 case RTE_ETH_FLOW_IPV4: 3016 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 3017 /* FALLTHROUGH */ 3018 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 3019 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3020 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 3021 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3022 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 3023 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3024 filter->ip_addr_type = 3025 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3026 filter->src_ipaddr_mask[0] = 0xffffffff; 3027 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3028 filter->dst_ipaddr_mask[0] = 0xffffffff; 3029 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3030 filter->ethertype = 0x800; 3031 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3032 break; 3033 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 3034 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 3035 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3036 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 3037 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3038 filter->dst_port_mask = 0xffff; 3039 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3040 filter->src_port_mask = 0xffff; 3041 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3042 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 3043 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3044 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 3045 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3046 filter->ip_protocol = 6; 3047 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3048 filter->ip_addr_type = 3049 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3050 filter->src_ipaddr_mask[0] = 0xffffffff; 3051 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3052 filter->dst_ipaddr_mask[0] = 0xffffffff; 3053 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3054 filter->ethertype = 0x800; 3055 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3056 break; 3057 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 3058 filter->src_port = fdir->input.flow.udp4_flow.src_port; 3059 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3060 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 3061 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3062 filter->dst_port_mask = 0xffff; 3063 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3064 filter->src_port_mask = 0xffff; 3065 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3066 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 3067 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3068 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 3069 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3070 filter->ip_protocol = 17; 3071 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3072 filter->ip_addr_type = 3073 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3074 filter->src_ipaddr_mask[0] = 0xffffffff; 3075 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3076 filter->dst_ipaddr_mask[0] = 0xffffffff; 3077 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3078 filter->ethertype = 0x800; 3079 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3080 break; 3081 case RTE_ETH_FLOW_IPV6: 3082 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 3083 /* FALLTHROUGH */ 3084 filter->ip_addr_type = 3085 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3086 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 3087 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3088 rte_memcpy(filter->src_ipaddr, 3089 fdir->input.flow.ipv6_flow.src_ip, 16); 3090 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3091 rte_memcpy(filter->dst_ipaddr, 3092 fdir->input.flow.ipv6_flow.dst_ip, 16); 3093 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3094 memset(filter->dst_ipaddr_mask, 0xff, 16); 3095 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3096 memset(filter->src_ipaddr_mask, 0xff, 16); 3097 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3098 filter->ethertype = 0x86dd; 3099 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3100 break; 3101 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 3102 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 3103 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3104 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 3105 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3106 filter->dst_port_mask = 0xffff; 3107 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3108 filter->src_port_mask = 0xffff; 3109 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3110 filter->ip_addr_type = 3111 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3112 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 3113 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3114 rte_memcpy(filter->src_ipaddr, 3115 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 3116 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3117 rte_memcpy(filter->dst_ipaddr, 3118 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 3119 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3120 memset(filter->dst_ipaddr_mask, 0xff, 16); 3121 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3122 memset(filter->src_ipaddr_mask, 0xff, 16); 3123 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3124 filter->ethertype = 0x86dd; 3125 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3126 break; 3127 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 3128 filter->src_port = fdir->input.flow.udp6_flow.src_port; 3129 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3130 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 3131 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3132 filter->dst_port_mask = 0xffff; 3133 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3134 filter->src_port_mask = 0xffff; 3135 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3136 filter->ip_addr_type = 3137 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3138 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 3139 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3140 rte_memcpy(filter->src_ipaddr, 3141 fdir->input.flow.udp6_flow.ip.src_ip, 16); 3142 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3143 rte_memcpy(filter->dst_ipaddr, 3144 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 3145 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3146 memset(filter->dst_ipaddr_mask, 0xff, 16); 3147 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3148 memset(filter->src_ipaddr_mask, 0xff, 16); 3149 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3150 filter->ethertype = 0x86dd; 3151 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3152 break; 3153 case RTE_ETH_FLOW_L2_PAYLOAD: 3154 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 3155 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3156 break; 3157 case RTE_ETH_FLOW_VXLAN: 3158 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3159 return -EINVAL; 3160 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 3161 filter->tunnel_type = 3162 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 3163 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 3164 break; 3165 case RTE_ETH_FLOW_NVGRE: 3166 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3167 return -EINVAL; 3168 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 3169 filter->tunnel_type = 3170 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 3171 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 3172 break; 3173 case RTE_ETH_FLOW_UNKNOWN: 3174 case RTE_ETH_FLOW_RAW: 3175 case RTE_ETH_FLOW_FRAG_IPV4: 3176 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 3177 case RTE_ETH_FLOW_FRAG_IPV6: 3178 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 3179 case RTE_ETH_FLOW_IPV6_EX: 3180 case RTE_ETH_FLOW_IPV6_TCP_EX: 3181 case RTE_ETH_FLOW_IPV6_UDP_EX: 3182 case RTE_ETH_FLOW_GENEVE: 3183 /* FALLTHROUGH */ 3184 default: 3185 return -EINVAL; 3186 } 3187 3188 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3189 vnic = &bp->vnic_info[fdir->action.rx_queue]; 3190 if (vnic == NULL) { 3191 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); 3192 return -EINVAL; 3193 } 3194 3195 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3196 rte_memcpy(filter->dst_macaddr, 3197 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 3198 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 3199 } 3200 3201 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 3202 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 3203 filter1 = STAILQ_FIRST(&vnic0->filter); 3204 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 3205 } else { 3206 filter->dst_id = vnic->fw_vnic_id; 3207 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3208 if (filter->dst_macaddr[i] == 0x00) 3209 filter1 = STAILQ_FIRST(&vnic0->filter); 3210 else 3211 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 3212 } 3213 3214 if (filter1 == NULL) 3215 return -EINVAL; 3216 3217 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 3218 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 3219 3220 filter->enables = en; 3221 3222 return 0; 3223 } 3224 3225 static struct bnxt_filter_info * 3226 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, 3227 struct bnxt_vnic_info **mvnic) 3228 { 3229 struct bnxt_filter_info *mf = NULL; 3230 int i; 3231 3232 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3233 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3234 3235 STAILQ_FOREACH(mf, &vnic->filter, next) { 3236 if (mf->filter_type == nf->filter_type && 3237 mf->flags == nf->flags && 3238 mf->src_port == nf->src_port && 3239 mf->src_port_mask == nf->src_port_mask && 3240 mf->dst_port == nf->dst_port && 3241 mf->dst_port_mask == nf->dst_port_mask && 3242 mf->ip_protocol == nf->ip_protocol && 3243 mf->ip_addr_type == nf->ip_addr_type && 3244 mf->ethertype == nf->ethertype && 3245 mf->vni == nf->vni && 3246 mf->tunnel_type == nf->tunnel_type && 3247 mf->l2_ovlan == nf->l2_ovlan && 3248 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 3249 mf->l2_ivlan == nf->l2_ivlan && 3250 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 3251 !memcmp(mf->l2_addr, nf->l2_addr, 3252 RTE_ETHER_ADDR_LEN) && 3253 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 3254 RTE_ETHER_ADDR_LEN) && 3255 !memcmp(mf->src_macaddr, nf->src_macaddr, 3256 RTE_ETHER_ADDR_LEN) && 3257 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 3258 RTE_ETHER_ADDR_LEN) && 3259 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 3260 sizeof(nf->src_ipaddr)) && 3261 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 3262 sizeof(nf->src_ipaddr_mask)) && 3263 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 3264 sizeof(nf->dst_ipaddr)) && 3265 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 3266 sizeof(nf->dst_ipaddr_mask))) { 3267 if (mvnic) 3268 *mvnic = vnic; 3269 return mf; 3270 } 3271 } 3272 } 3273 return NULL; 3274 } 3275 3276 static int 3277 bnxt_fdir_filter(struct rte_eth_dev *dev, 3278 enum rte_filter_op filter_op, 3279 void *arg) 3280 { 3281 struct bnxt *bp = dev->data->dev_private; 3282 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 3283 struct bnxt_filter_info *filter, *match; 3284 struct bnxt_vnic_info *vnic, *mvnic; 3285 int ret = 0, i; 3286 3287 if (filter_op == RTE_ETH_FILTER_NOP) 3288 return 0; 3289 3290 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 3291 return -EINVAL; 3292 3293 switch (filter_op) { 3294 case RTE_ETH_FILTER_ADD: 3295 case RTE_ETH_FILTER_DELETE: 3296 /* FALLTHROUGH */ 3297 filter = bnxt_get_unused_filter(bp); 3298 if (filter == NULL) { 3299 PMD_DRV_LOG(ERR, 3300 "Not enough resources for a new flow.\n"); 3301 return -ENOMEM; 3302 } 3303 3304 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 3305 if (ret != 0) 3306 goto free_filter; 3307 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 3308 3309 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3310 vnic = &bp->vnic_info[0]; 3311 else 3312 vnic = &bp->vnic_info[fdir->action.rx_queue]; 3313 3314 match = bnxt_match_fdir(bp, filter, &mvnic); 3315 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 3316 if (match->dst_id == vnic->fw_vnic_id) { 3317 PMD_DRV_LOG(ERR, "Flow already exists.\n"); 3318 ret = -EEXIST; 3319 goto free_filter; 3320 } else { 3321 match->dst_id = vnic->fw_vnic_id; 3322 ret = bnxt_hwrm_set_ntuple_filter(bp, 3323 match->dst_id, 3324 match); 3325 STAILQ_REMOVE(&mvnic->filter, match, 3326 bnxt_filter_info, next); 3327 STAILQ_INSERT_TAIL(&vnic->filter, match, next); 3328 PMD_DRV_LOG(ERR, 3329 "Filter with matching pattern exist\n"); 3330 PMD_DRV_LOG(ERR, 3331 "Updated it to new destination q\n"); 3332 goto free_filter; 3333 } 3334 } 3335 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 3336 PMD_DRV_LOG(ERR, "Flow does not exist.\n"); 3337 ret = -ENOENT; 3338 goto free_filter; 3339 } 3340 3341 if (filter_op == RTE_ETH_FILTER_ADD) { 3342 ret = bnxt_hwrm_set_ntuple_filter(bp, 3343 filter->dst_id, 3344 filter); 3345 if (ret) 3346 goto free_filter; 3347 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 3348 } else { 3349 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 3350 STAILQ_REMOVE(&vnic->filter, match, 3351 bnxt_filter_info, next); 3352 bnxt_free_filter(bp, match); 3353 bnxt_free_filter(bp, filter); 3354 } 3355 break; 3356 case RTE_ETH_FILTER_FLUSH: 3357 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3358 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3359 3360 STAILQ_FOREACH(filter, &vnic->filter, next) { 3361 if (filter->filter_type == 3362 HWRM_CFA_NTUPLE_FILTER) { 3363 ret = 3364 bnxt_hwrm_clear_ntuple_filter(bp, 3365 filter); 3366 STAILQ_REMOVE(&vnic->filter, filter, 3367 bnxt_filter_info, next); 3368 } 3369 } 3370 } 3371 return ret; 3372 case RTE_ETH_FILTER_UPDATE: 3373 case RTE_ETH_FILTER_STATS: 3374 case RTE_ETH_FILTER_INFO: 3375 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); 3376 break; 3377 default: 3378 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); 3379 ret = -EINVAL; 3380 break; 3381 } 3382 return ret; 3383 3384 free_filter: 3385 bnxt_free_filter(bp, filter); 3386 return ret; 3387 } 3388 3389 int 3390 bnxt_filter_ctrl_op(struct rte_eth_dev *dev, 3391 enum rte_filter_type filter_type, 3392 enum rte_filter_op filter_op, void *arg) 3393 { 3394 struct bnxt *bp = dev->data->dev_private; 3395 int ret = 0; 3396 3397 if (!bp) 3398 return -EIO; 3399 3400 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3401 struct bnxt_representor *vfr = dev->data->dev_private; 3402 bp = vfr->parent_dev->data->dev_private; 3403 /* parent is deleted while children are still valid */ 3404 if (!bp) { 3405 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n", 3406 dev->data->port_id, 3407 filter_type, 3408 filter_op); 3409 return -EIO; 3410 } 3411 } 3412 3413 ret = is_bnxt_in_error(bp); 3414 if (ret) 3415 return ret; 3416 3417 switch (filter_type) { 3418 case RTE_ETH_FILTER_FDIR: 3419 ret = bnxt_fdir_filter(dev, filter_op, arg); 3420 break; 3421 case RTE_ETH_FILTER_GENERIC: 3422 if (filter_op != RTE_ETH_FILTER_GET) 3423 return -EINVAL; 3424 3425 /* PMD supports thread-safe flow operations. rte_flow API 3426 * functions can avoid mutex for multi-thread safety. 3427 */ 3428 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3429 3430 if (BNXT_TRUFLOW_EN(bp)) 3431 *(const void **)arg = &bnxt_ulp_rte_flow_ops; 3432 else 3433 *(const void **)arg = &bnxt_flow_ops; 3434 break; 3435 default: 3436 PMD_DRV_LOG(ERR, 3437 "Filter type (%d) not supported", filter_type); 3438 ret = -EINVAL; 3439 break; 3440 } 3441 return ret; 3442 } 3443 3444 static const uint32_t * 3445 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3446 { 3447 static const uint32_t ptypes[] = { 3448 RTE_PTYPE_L2_ETHER_VLAN, 3449 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3450 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3451 RTE_PTYPE_L4_ICMP, 3452 RTE_PTYPE_L4_TCP, 3453 RTE_PTYPE_L4_UDP, 3454 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3455 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3456 RTE_PTYPE_INNER_L4_ICMP, 3457 RTE_PTYPE_INNER_L4_TCP, 3458 RTE_PTYPE_INNER_L4_UDP, 3459 RTE_PTYPE_UNKNOWN 3460 }; 3461 3462 if (!dev->rx_pkt_burst) 3463 return NULL; 3464 3465 return ptypes; 3466 } 3467 3468 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3469 int reg_win) 3470 { 3471 uint32_t reg_base = *reg_arr & 0xfffff000; 3472 uint32_t win_off; 3473 int i; 3474 3475 for (i = 0; i < count; i++) { 3476 if ((reg_arr[i] & 0xfffff000) != reg_base) 3477 return -ERANGE; 3478 } 3479 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3480 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3481 return 0; 3482 } 3483 3484 static int bnxt_map_ptp_regs(struct bnxt *bp) 3485 { 3486 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3487 uint32_t *reg_arr; 3488 int rc, i; 3489 3490 reg_arr = ptp->rx_regs; 3491 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3492 if (rc) 3493 return rc; 3494 3495 reg_arr = ptp->tx_regs; 3496 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3497 if (rc) 3498 return rc; 3499 3500 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3501 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3502 3503 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3504 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3505 3506 return 0; 3507 } 3508 3509 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3510 { 3511 rte_write32(0, (uint8_t *)bp->bar0 + 3512 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3513 rte_write32(0, (uint8_t *)bp->bar0 + 3514 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3515 } 3516 3517 static uint64_t bnxt_cc_read(struct bnxt *bp) 3518 { 3519 uint64_t ns; 3520 3521 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3522 BNXT_GRCPF_REG_SYNC_TIME)); 3523 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3524 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3525 return ns; 3526 } 3527 3528 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3529 { 3530 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3531 uint32_t fifo; 3532 3533 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3534 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3535 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3536 return -EAGAIN; 3537 3538 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3539 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3540 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3541 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3542 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3543 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3544 3545 return 0; 3546 } 3547 3548 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3549 { 3550 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3551 struct bnxt_pf_info *pf = bp->pf; 3552 uint16_t port_id; 3553 uint32_t fifo; 3554 3555 if (!ptp) 3556 return -ENODEV; 3557 3558 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3559 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3560 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3561 return -EAGAIN; 3562 3563 port_id = pf->port_id; 3564 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3565 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3566 3567 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3568 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3569 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 3570 /* bnxt_clr_rx_ts(bp); TBD */ 3571 return -EBUSY; 3572 } 3573 3574 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3575 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3576 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3577 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3578 3579 return 0; 3580 } 3581 3582 static int 3583 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3584 { 3585 uint64_t ns; 3586 struct bnxt *bp = dev->data->dev_private; 3587 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3588 3589 if (!ptp) 3590 return 0; 3591 3592 ns = rte_timespec_to_ns(ts); 3593 /* Set the timecounters to a new value. */ 3594 ptp->tc.nsec = ns; 3595 3596 return 0; 3597 } 3598 3599 static int 3600 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3601 { 3602 struct bnxt *bp = dev->data->dev_private; 3603 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3604 uint64_t ns, systime_cycles = 0; 3605 int rc = 0; 3606 3607 if (!ptp) 3608 return 0; 3609 3610 if (BNXT_CHIP_THOR(bp)) 3611 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3612 &systime_cycles); 3613 else 3614 systime_cycles = bnxt_cc_read(bp); 3615 3616 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3617 *ts = rte_ns_to_timespec(ns); 3618 3619 return rc; 3620 } 3621 static int 3622 bnxt_timesync_enable(struct rte_eth_dev *dev) 3623 { 3624 struct bnxt *bp = dev->data->dev_private; 3625 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3626 uint32_t shift = 0; 3627 int rc; 3628 3629 if (!ptp) 3630 return 0; 3631 3632 ptp->rx_filter = 1; 3633 ptp->tx_tstamp_en = 1; 3634 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3635 3636 rc = bnxt_hwrm_ptp_cfg(bp); 3637 if (rc) 3638 return rc; 3639 3640 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3641 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3642 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3643 3644 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3645 ptp->tc.cc_shift = shift; 3646 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3647 3648 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3649 ptp->rx_tstamp_tc.cc_shift = shift; 3650 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3651 3652 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3653 ptp->tx_tstamp_tc.cc_shift = shift; 3654 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3655 3656 if (!BNXT_CHIP_THOR(bp)) 3657 bnxt_map_ptp_regs(bp); 3658 3659 return 0; 3660 } 3661 3662 static int 3663 bnxt_timesync_disable(struct rte_eth_dev *dev) 3664 { 3665 struct bnxt *bp = dev->data->dev_private; 3666 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3667 3668 if (!ptp) 3669 return 0; 3670 3671 ptp->rx_filter = 0; 3672 ptp->tx_tstamp_en = 0; 3673 ptp->rxctl = 0; 3674 3675 bnxt_hwrm_ptp_cfg(bp); 3676 3677 if (!BNXT_CHIP_THOR(bp)) 3678 bnxt_unmap_ptp_regs(bp); 3679 3680 return 0; 3681 } 3682 3683 static int 3684 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3685 struct timespec *timestamp, 3686 uint32_t flags __rte_unused) 3687 { 3688 struct bnxt *bp = dev->data->dev_private; 3689 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3690 uint64_t rx_tstamp_cycles = 0; 3691 uint64_t ns; 3692 3693 if (!ptp) 3694 return 0; 3695 3696 if (BNXT_CHIP_THOR(bp)) 3697 rx_tstamp_cycles = ptp->rx_timestamp; 3698 else 3699 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3700 3701 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3702 *timestamp = rte_ns_to_timespec(ns); 3703 return 0; 3704 } 3705 3706 static int 3707 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3708 struct timespec *timestamp) 3709 { 3710 struct bnxt *bp = dev->data->dev_private; 3711 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3712 uint64_t tx_tstamp_cycles = 0; 3713 uint64_t ns; 3714 int rc = 0; 3715 3716 if (!ptp) 3717 return 0; 3718 3719 if (BNXT_CHIP_THOR(bp)) 3720 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3721 &tx_tstamp_cycles); 3722 else 3723 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3724 3725 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3726 *timestamp = rte_ns_to_timespec(ns); 3727 3728 return rc; 3729 } 3730 3731 static int 3732 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3733 { 3734 struct bnxt *bp = dev->data->dev_private; 3735 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3736 3737 if (!ptp) 3738 return 0; 3739 3740 ptp->tc.nsec += delta; 3741 3742 return 0; 3743 } 3744 3745 static int 3746 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3747 { 3748 struct bnxt *bp = dev->data->dev_private; 3749 int rc; 3750 uint32_t dir_entries; 3751 uint32_t entry_length; 3752 3753 rc = is_bnxt_in_error(bp); 3754 if (rc) 3755 return rc; 3756 3757 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3758 bp->pdev->addr.domain, bp->pdev->addr.bus, 3759 bp->pdev->addr.devid, bp->pdev->addr.function); 3760 3761 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3762 if (rc != 0) 3763 return rc; 3764 3765 return dir_entries * entry_length; 3766 } 3767 3768 static int 3769 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3770 struct rte_dev_eeprom_info *in_eeprom) 3771 { 3772 struct bnxt *bp = dev->data->dev_private; 3773 uint32_t index; 3774 uint32_t offset; 3775 int rc; 3776 3777 rc = is_bnxt_in_error(bp); 3778 if (rc) 3779 return rc; 3780 3781 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3782 bp->pdev->addr.domain, bp->pdev->addr.bus, 3783 bp->pdev->addr.devid, bp->pdev->addr.function, 3784 in_eeprom->offset, in_eeprom->length); 3785 3786 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3787 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3788 in_eeprom->data); 3789 3790 index = in_eeprom->offset >> 24; 3791 offset = in_eeprom->offset & 0xffffff; 3792 3793 if (index != 0) 3794 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3795 in_eeprom->length, in_eeprom->data); 3796 3797 return 0; 3798 } 3799 3800 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3801 { 3802 switch (dir_type) { 3803 case BNX_DIR_TYPE_CHIMP_PATCH: 3804 case BNX_DIR_TYPE_BOOTCODE: 3805 case BNX_DIR_TYPE_BOOTCODE_2: 3806 case BNX_DIR_TYPE_APE_FW: 3807 case BNX_DIR_TYPE_APE_PATCH: 3808 case BNX_DIR_TYPE_KONG_FW: 3809 case BNX_DIR_TYPE_KONG_PATCH: 3810 case BNX_DIR_TYPE_BONO_FW: 3811 case BNX_DIR_TYPE_BONO_PATCH: 3812 /* FALLTHROUGH */ 3813 return true; 3814 } 3815 3816 return false; 3817 } 3818 3819 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3820 { 3821 switch (dir_type) { 3822 case BNX_DIR_TYPE_AVS: 3823 case BNX_DIR_TYPE_EXP_ROM_MBA: 3824 case BNX_DIR_TYPE_PCIE: 3825 case BNX_DIR_TYPE_TSCF_UCODE: 3826 case BNX_DIR_TYPE_EXT_PHY: 3827 case BNX_DIR_TYPE_CCM: 3828 case BNX_DIR_TYPE_ISCSI_BOOT: 3829 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3830 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3831 /* FALLTHROUGH */ 3832 return true; 3833 } 3834 3835 return false; 3836 } 3837 3838 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3839 { 3840 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3841 bnxt_dir_type_is_other_exec_format(dir_type); 3842 } 3843 3844 static int 3845 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3846 struct rte_dev_eeprom_info *in_eeprom) 3847 { 3848 struct bnxt *bp = dev->data->dev_private; 3849 uint8_t index, dir_op; 3850 uint16_t type, ext, ordinal, attr; 3851 int rc; 3852 3853 rc = is_bnxt_in_error(bp); 3854 if (rc) 3855 return rc; 3856 3857 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3858 bp->pdev->addr.domain, bp->pdev->addr.bus, 3859 bp->pdev->addr.devid, bp->pdev->addr.function, 3860 in_eeprom->offset, in_eeprom->length); 3861 3862 if (!BNXT_PF(bp)) { 3863 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3864 return -EINVAL; 3865 } 3866 3867 type = in_eeprom->magic >> 16; 3868 3869 if (type == 0xffff) { /* special value for directory operations */ 3870 index = in_eeprom->magic & 0xff; 3871 dir_op = in_eeprom->magic >> 8; 3872 if (index == 0) 3873 return -EINVAL; 3874 switch (dir_op) { 3875 case 0x0e: /* erase */ 3876 if (in_eeprom->offset != ~in_eeprom->magic) 3877 return -EINVAL; 3878 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3879 default: 3880 return -EINVAL; 3881 } 3882 } 3883 3884 /* Create or re-write an NVM item: */ 3885 if (bnxt_dir_type_is_executable(type) == true) 3886 return -EOPNOTSUPP; 3887 ext = in_eeprom->magic & 0xffff; 3888 ordinal = in_eeprom->offset >> 16; 3889 attr = in_eeprom->offset & 0xffff; 3890 3891 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3892 in_eeprom->data, in_eeprom->length); 3893 } 3894 3895 /* 3896 * Initialization 3897 */ 3898 3899 static const struct eth_dev_ops bnxt_dev_ops = { 3900 .dev_infos_get = bnxt_dev_info_get_op, 3901 .dev_close = bnxt_dev_close_op, 3902 .dev_configure = bnxt_dev_configure_op, 3903 .dev_start = bnxt_dev_start_op, 3904 .dev_stop = bnxt_dev_stop_op, 3905 .dev_set_link_up = bnxt_dev_set_link_up_op, 3906 .dev_set_link_down = bnxt_dev_set_link_down_op, 3907 .stats_get = bnxt_stats_get_op, 3908 .stats_reset = bnxt_stats_reset_op, 3909 .rx_queue_setup = bnxt_rx_queue_setup_op, 3910 .rx_queue_release = bnxt_rx_queue_release_op, 3911 .tx_queue_setup = bnxt_tx_queue_setup_op, 3912 .tx_queue_release = bnxt_tx_queue_release_op, 3913 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 3914 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 3915 .reta_update = bnxt_reta_update_op, 3916 .reta_query = bnxt_reta_query_op, 3917 .rss_hash_update = bnxt_rss_hash_update_op, 3918 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 3919 .link_update = bnxt_link_update_op, 3920 .promiscuous_enable = bnxt_promiscuous_enable_op, 3921 .promiscuous_disable = bnxt_promiscuous_disable_op, 3922 .allmulticast_enable = bnxt_allmulticast_enable_op, 3923 .allmulticast_disable = bnxt_allmulticast_disable_op, 3924 .mac_addr_add = bnxt_mac_addr_add_op, 3925 .mac_addr_remove = bnxt_mac_addr_remove_op, 3926 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 3927 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 3928 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 3929 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 3930 .vlan_filter_set = bnxt_vlan_filter_set_op, 3931 .vlan_offload_set = bnxt_vlan_offload_set_op, 3932 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 3933 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 3934 .mtu_set = bnxt_mtu_set_op, 3935 .mac_addr_set = bnxt_set_default_mac_addr_op, 3936 .xstats_get = bnxt_dev_xstats_get_op, 3937 .xstats_get_names = bnxt_dev_xstats_get_names_op, 3938 .xstats_reset = bnxt_dev_xstats_reset_op, 3939 .fw_version_get = bnxt_fw_version_get, 3940 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 3941 .rxq_info_get = bnxt_rxq_info_get_op, 3942 .txq_info_get = bnxt_txq_info_get_op, 3943 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 3944 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 3945 .dev_led_on = bnxt_dev_led_on_op, 3946 .dev_led_off = bnxt_dev_led_off_op, 3947 .rx_queue_start = bnxt_rx_queue_start, 3948 .rx_queue_stop = bnxt_rx_queue_stop, 3949 .tx_queue_start = bnxt_tx_queue_start, 3950 .tx_queue_stop = bnxt_tx_queue_stop, 3951 .filter_ctrl = bnxt_filter_ctrl_op, 3952 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 3953 .get_eeprom_length = bnxt_get_eeprom_length_op, 3954 .get_eeprom = bnxt_get_eeprom_op, 3955 .set_eeprom = bnxt_set_eeprom_op, 3956 .timesync_enable = bnxt_timesync_enable, 3957 .timesync_disable = bnxt_timesync_disable, 3958 .timesync_read_time = bnxt_timesync_read_time, 3959 .timesync_write_time = bnxt_timesync_write_time, 3960 .timesync_adjust_time = bnxt_timesync_adjust_time, 3961 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 3962 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 3963 }; 3964 3965 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 3966 { 3967 uint32_t offset; 3968 3969 /* Only pre-map the reset GRC registers using window 3 */ 3970 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 3971 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 3972 3973 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 3974 3975 return offset; 3976 } 3977 3978 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 3979 { 3980 struct bnxt_error_recovery_info *info = bp->recovery_info; 3981 uint32_t reg_base = 0xffffffff; 3982 int i; 3983 3984 /* Only pre-map the monitoring GRC registers using window 2 */ 3985 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 3986 uint32_t reg = info->status_regs[i]; 3987 3988 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 3989 continue; 3990 3991 if (reg_base == 0xffffffff) 3992 reg_base = reg & 0xfffff000; 3993 if ((reg & 0xfffff000) != reg_base) 3994 return -ERANGE; 3995 3996 /* Use mask 0xffc as the Lower 2 bits indicates 3997 * address space location 3998 */ 3999 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4000 (reg & 0xffc); 4001 } 4002 4003 if (reg_base == 0xffffffff) 4004 return 0; 4005 4006 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4007 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4008 4009 return 0; 4010 } 4011 4012 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4013 { 4014 struct bnxt_error_recovery_info *info = bp->recovery_info; 4015 uint32_t delay = info->delay_after_reset[index]; 4016 uint32_t val = info->reset_reg_val[index]; 4017 uint32_t reg = info->reset_reg[index]; 4018 uint32_t type, offset; 4019 4020 type = BNXT_FW_STATUS_REG_TYPE(reg); 4021 offset = BNXT_FW_STATUS_REG_OFF(reg); 4022 4023 switch (type) { 4024 case BNXT_FW_STATUS_REG_TYPE_CFG: 4025 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4026 break; 4027 case BNXT_FW_STATUS_REG_TYPE_GRC: 4028 offset = bnxt_map_reset_regs(bp, offset); 4029 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4030 break; 4031 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4032 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4033 break; 4034 } 4035 /* wait on a specific interval of time until core reset is complete */ 4036 if (delay) 4037 rte_delay_ms(delay); 4038 } 4039 4040 static void bnxt_dev_cleanup(struct bnxt *bp) 4041 { 4042 bp->eth_dev->data->dev_link.link_status = 0; 4043 bp->link_info->link_up = 0; 4044 if (bp->eth_dev->data->dev_started) 4045 bnxt_dev_stop_op(bp->eth_dev); 4046 4047 bnxt_uninit_resources(bp, true); 4048 } 4049 4050 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4051 { 4052 struct rte_eth_dev *dev = bp->eth_dev; 4053 struct rte_vlan_filter_conf *vfc; 4054 int vidx, vbit, rc; 4055 uint16_t vlan_id; 4056 4057 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4058 vfc = &dev->data->vlan_filter_conf; 4059 vidx = vlan_id / 64; 4060 vbit = vlan_id % 64; 4061 4062 /* Each bit corresponds to a VLAN id */ 4063 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4064 rc = bnxt_add_vlan_filter(bp, vlan_id); 4065 if (rc) 4066 return rc; 4067 } 4068 } 4069 4070 return 0; 4071 } 4072 4073 static int bnxt_restore_mac_filters(struct bnxt *bp) 4074 { 4075 struct rte_eth_dev *dev = bp->eth_dev; 4076 struct rte_eth_dev_info dev_info; 4077 struct rte_ether_addr *addr; 4078 uint64_t pool_mask; 4079 uint32_t pool = 0; 4080 uint16_t i; 4081 int rc; 4082 4083 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4084 return 0; 4085 4086 rc = bnxt_dev_info_get_op(dev, &dev_info); 4087 if (rc) 4088 return rc; 4089 4090 /* replay MAC address configuration */ 4091 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4092 addr = &dev->data->mac_addrs[i]; 4093 4094 /* skip zero address */ 4095 if (rte_is_zero_ether_addr(addr)) 4096 continue; 4097 4098 pool = 0; 4099 pool_mask = dev->data->mac_pool_sel[i]; 4100 4101 do { 4102 if (pool_mask & 1ULL) { 4103 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4104 if (rc) 4105 return rc; 4106 } 4107 pool_mask >>= 1; 4108 pool++; 4109 } while (pool_mask); 4110 } 4111 4112 return 0; 4113 } 4114 4115 static int bnxt_restore_filters(struct bnxt *bp) 4116 { 4117 struct rte_eth_dev *dev = bp->eth_dev; 4118 int ret = 0; 4119 4120 if (dev->data->all_multicast) { 4121 ret = bnxt_allmulticast_enable_op(dev); 4122 if (ret) 4123 return ret; 4124 } 4125 if (dev->data->promiscuous) { 4126 ret = bnxt_promiscuous_enable_op(dev); 4127 if (ret) 4128 return ret; 4129 } 4130 4131 ret = bnxt_restore_mac_filters(bp); 4132 if (ret) 4133 return ret; 4134 4135 ret = bnxt_restore_vlan_filters(bp); 4136 /* TODO restore other filters as well */ 4137 return ret; 4138 } 4139 4140 static void bnxt_dev_recover(void *arg) 4141 { 4142 struct bnxt *bp = arg; 4143 int timeout = bp->fw_reset_max_msecs; 4144 int rc = 0; 4145 4146 /* Clear Error flag so that device re-init should happen */ 4147 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4148 4149 do { 4150 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT); 4151 if (rc == 0) 4152 break; 4153 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4154 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4155 } while (rc && timeout); 4156 4157 if (rc) { 4158 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4159 goto err; 4160 } 4161 4162 rc = bnxt_init_resources(bp, true); 4163 if (rc) { 4164 PMD_DRV_LOG(ERR, 4165 "Failed to initialize resources after reset\n"); 4166 goto err; 4167 } 4168 /* clear reset flag as the device is initialized now */ 4169 bp->flags &= ~BNXT_FLAG_FW_RESET; 4170 4171 rc = bnxt_dev_start_op(bp->eth_dev); 4172 if (rc) { 4173 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4174 goto err_start; 4175 } 4176 4177 rc = bnxt_restore_filters(bp); 4178 if (rc) 4179 goto err_start; 4180 4181 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 4182 return; 4183 err_start: 4184 bnxt_dev_stop_op(bp->eth_dev); 4185 err: 4186 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4187 bnxt_uninit_resources(bp, false); 4188 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4189 } 4190 4191 void bnxt_dev_reset_and_resume(void *arg) 4192 { 4193 struct bnxt *bp = arg; 4194 int rc; 4195 4196 bnxt_dev_cleanup(bp); 4197 4198 bnxt_wait_for_device_shutdown(bp); 4199 4200 rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs, 4201 bnxt_dev_recover, (void *)bp); 4202 if (rc) 4203 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4204 } 4205 4206 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4207 { 4208 struct bnxt_error_recovery_info *info = bp->recovery_info; 4209 uint32_t reg = info->status_regs[index]; 4210 uint32_t type, offset, val = 0; 4211 4212 type = BNXT_FW_STATUS_REG_TYPE(reg); 4213 offset = BNXT_FW_STATUS_REG_OFF(reg); 4214 4215 switch (type) { 4216 case BNXT_FW_STATUS_REG_TYPE_CFG: 4217 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4218 break; 4219 case BNXT_FW_STATUS_REG_TYPE_GRC: 4220 offset = info->mapped_status_regs[index]; 4221 /* FALLTHROUGH */ 4222 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4223 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4224 offset)); 4225 break; 4226 } 4227 4228 return val; 4229 } 4230 4231 static int bnxt_fw_reset_all(struct bnxt *bp) 4232 { 4233 struct bnxt_error_recovery_info *info = bp->recovery_info; 4234 uint32_t i; 4235 int rc = 0; 4236 4237 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4238 /* Reset through master function driver */ 4239 for (i = 0; i < info->reg_array_cnt; i++) 4240 bnxt_write_fw_reset_reg(bp, i); 4241 /* Wait for time specified by FW after triggering reset */ 4242 rte_delay_ms(info->master_func_wait_period_after_reset); 4243 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4244 /* Reset with the help of Kong processor */ 4245 rc = bnxt_hwrm_fw_reset(bp); 4246 if (rc) 4247 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4248 } 4249 4250 return rc; 4251 } 4252 4253 static void bnxt_fw_reset_cb(void *arg) 4254 { 4255 struct bnxt *bp = arg; 4256 struct bnxt_error_recovery_info *info = bp->recovery_info; 4257 int rc = 0; 4258 4259 /* Only Master function can do FW reset */ 4260 if (bnxt_is_master_func(bp) && 4261 bnxt_is_recovery_enabled(bp)) { 4262 rc = bnxt_fw_reset_all(bp); 4263 if (rc) { 4264 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4265 return; 4266 } 4267 } 4268 4269 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4270 * EXCEPTION_FATAL_ASYNC event to all the functions 4271 * (including MASTER FUNC). After receiving this Async, all the active 4272 * drivers should treat this case as FW initiated recovery 4273 */ 4274 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4275 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4276 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4277 4278 /* To recover from error */ 4279 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4280 (void *)bp); 4281 } 4282 } 4283 4284 /* Driver should poll FW heartbeat, reset_counter with the frequency 4285 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4286 * When the driver detects heartbeat stop or change in reset_counter, 4287 * it has to trigger a reset to recover from the error condition. 4288 * A “master PF” is the function who will have the privilege to 4289 * initiate the chimp reset. The master PF will be elected by the 4290 * firmware and will be notified through async message. 4291 */ 4292 static void bnxt_check_fw_health(void *arg) 4293 { 4294 struct bnxt *bp = arg; 4295 struct bnxt_error_recovery_info *info = bp->recovery_info; 4296 uint32_t val = 0, wait_msec; 4297 4298 if (!info || !bnxt_is_recovery_enabled(bp) || 4299 is_bnxt_in_error(bp)) 4300 return; 4301 4302 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4303 if (val == info->last_heart_beat) 4304 goto reset; 4305 4306 info->last_heart_beat = val; 4307 4308 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4309 if (val != info->last_reset_counter) 4310 goto reset; 4311 4312 info->last_reset_counter = val; 4313 4314 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4315 bnxt_check_fw_health, (void *)bp); 4316 4317 return; 4318 reset: 4319 /* Stop DMA to/from device */ 4320 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4321 bp->flags |= BNXT_FLAG_FW_RESET; 4322 4323 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4324 4325 if (bnxt_is_master_func(bp)) 4326 wait_msec = info->master_func_wait_period; 4327 else 4328 wait_msec = info->normal_func_wait_period; 4329 4330 rte_eal_alarm_set(US_PER_MS * wait_msec, 4331 bnxt_fw_reset_cb, (void *)bp); 4332 } 4333 4334 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4335 { 4336 uint32_t polling_freq; 4337 4338 pthread_mutex_lock(&bp->health_check_lock); 4339 4340 if (!bnxt_is_recovery_enabled(bp)) 4341 goto done; 4342 4343 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4344 goto done; 4345 4346 polling_freq = bp->recovery_info->driver_polling_freq; 4347 4348 rte_eal_alarm_set(US_PER_MS * polling_freq, 4349 bnxt_check_fw_health, (void *)bp); 4350 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4351 4352 done: 4353 pthread_mutex_unlock(&bp->health_check_lock); 4354 } 4355 4356 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4357 { 4358 if (!bnxt_is_recovery_enabled(bp)) 4359 return; 4360 4361 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4362 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4363 } 4364 4365 static bool bnxt_vf_pciid(uint16_t device_id) 4366 { 4367 switch (device_id) { 4368 case BROADCOM_DEV_ID_57304_VF: 4369 case BROADCOM_DEV_ID_57406_VF: 4370 case BROADCOM_DEV_ID_5731X_VF: 4371 case BROADCOM_DEV_ID_5741X_VF: 4372 case BROADCOM_DEV_ID_57414_VF: 4373 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4374 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4375 case BROADCOM_DEV_ID_58802_VF: 4376 case BROADCOM_DEV_ID_57500_VF1: 4377 case BROADCOM_DEV_ID_57500_VF2: 4378 /* FALLTHROUGH */ 4379 return true; 4380 default: 4381 return false; 4382 } 4383 } 4384 4385 static bool bnxt_thor_device(uint16_t device_id) 4386 { 4387 switch (device_id) { 4388 case BROADCOM_DEV_ID_57508: 4389 case BROADCOM_DEV_ID_57504: 4390 case BROADCOM_DEV_ID_57502: 4391 case BROADCOM_DEV_ID_57508_MF1: 4392 case BROADCOM_DEV_ID_57504_MF1: 4393 case BROADCOM_DEV_ID_57502_MF1: 4394 case BROADCOM_DEV_ID_57508_MF2: 4395 case BROADCOM_DEV_ID_57504_MF2: 4396 case BROADCOM_DEV_ID_57502_MF2: 4397 case BROADCOM_DEV_ID_57500_VF1: 4398 case BROADCOM_DEV_ID_57500_VF2: 4399 /* FALLTHROUGH */ 4400 return true; 4401 default: 4402 return false; 4403 } 4404 } 4405 4406 bool bnxt_stratus_device(struct bnxt *bp) 4407 { 4408 uint16_t device_id = bp->pdev->id.device_id; 4409 4410 switch (device_id) { 4411 case BROADCOM_DEV_ID_STRATUS_NIC: 4412 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4413 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4414 /* FALLTHROUGH */ 4415 return true; 4416 default: 4417 return false; 4418 } 4419 } 4420 4421 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 4422 { 4423 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4424 struct bnxt *bp = eth_dev->data->dev_private; 4425 4426 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4427 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4428 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4429 if (!bp->bar0 || !bp->doorbell_base) { 4430 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4431 return -ENODEV; 4432 } 4433 4434 bp->eth_dev = eth_dev; 4435 bp->pdev = pci_dev; 4436 4437 return 0; 4438 } 4439 4440 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4441 struct bnxt_ctx_pg_info *ctx_pg, 4442 uint32_t mem_size, 4443 const char *suffix, 4444 uint16_t idx) 4445 { 4446 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4447 const struct rte_memzone *mz = NULL; 4448 char mz_name[RTE_MEMZONE_NAMESIZE]; 4449 rte_iova_t mz_phys_addr; 4450 uint64_t valid_bits = 0; 4451 uint32_t sz; 4452 int i; 4453 4454 if (!mem_size) 4455 return 0; 4456 4457 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4458 BNXT_PAGE_SIZE; 4459 rmem->page_size = BNXT_PAGE_SIZE; 4460 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4461 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4462 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4463 4464 valid_bits = PTU_PTE_VALID; 4465 4466 if (rmem->nr_pages > 1) { 4467 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4468 "bnxt_ctx_pg_tbl%s_%x_%d", 4469 suffix, idx, bp->eth_dev->data->port_id); 4470 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4471 mz = rte_memzone_lookup(mz_name); 4472 if (!mz) { 4473 mz = rte_memzone_reserve_aligned(mz_name, 4474 rmem->nr_pages * 8, 4475 SOCKET_ID_ANY, 4476 RTE_MEMZONE_2MB | 4477 RTE_MEMZONE_SIZE_HINT_ONLY | 4478 RTE_MEMZONE_IOVA_CONTIG, 4479 BNXT_PAGE_SIZE); 4480 if (mz == NULL) 4481 return -ENOMEM; 4482 } 4483 4484 memset(mz->addr, 0, mz->len); 4485 mz_phys_addr = mz->iova; 4486 4487 rmem->pg_tbl = mz->addr; 4488 rmem->pg_tbl_map = mz_phys_addr; 4489 rmem->pg_tbl_mz = mz; 4490 } 4491 4492 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4493 suffix, idx, bp->eth_dev->data->port_id); 4494 mz = rte_memzone_lookup(mz_name); 4495 if (!mz) { 4496 mz = rte_memzone_reserve_aligned(mz_name, 4497 mem_size, 4498 SOCKET_ID_ANY, 4499 RTE_MEMZONE_1GB | 4500 RTE_MEMZONE_SIZE_HINT_ONLY | 4501 RTE_MEMZONE_IOVA_CONTIG, 4502 BNXT_PAGE_SIZE); 4503 if (mz == NULL) 4504 return -ENOMEM; 4505 } 4506 4507 memset(mz->addr, 0, mz->len); 4508 mz_phys_addr = mz->iova; 4509 4510 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4511 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4512 rmem->dma_arr[i] = mz_phys_addr + sz; 4513 4514 if (rmem->nr_pages > 1) { 4515 if (i == rmem->nr_pages - 2 && 4516 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4517 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4518 else if (i == rmem->nr_pages - 1 && 4519 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4520 valid_bits |= PTU_PTE_LAST; 4521 4522 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4523 valid_bits); 4524 } 4525 } 4526 4527 rmem->mz = mz; 4528 if (rmem->vmem_size) 4529 rmem->vmem = (void **)mz->addr; 4530 rmem->dma_arr[0] = mz_phys_addr; 4531 return 0; 4532 } 4533 4534 static void bnxt_free_ctx_mem(struct bnxt *bp) 4535 { 4536 int i; 4537 4538 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4539 return; 4540 4541 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4542 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4543 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4544 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4545 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4546 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4547 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4548 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4549 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4550 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4551 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4552 4553 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4554 if (bp->ctx->tqm_mem[i]) 4555 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4556 } 4557 4558 rte_free(bp->ctx); 4559 bp->ctx = NULL; 4560 } 4561 4562 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4563 4564 #define min_t(type, x, y) ({ \ 4565 type __min1 = (x); \ 4566 type __min2 = (y); \ 4567 __min1 < __min2 ? __min1 : __min2; }) 4568 4569 #define max_t(type, x, y) ({ \ 4570 type __max1 = (x); \ 4571 type __max2 = (y); \ 4572 __max1 > __max2 ? __max1 : __max2; }) 4573 4574 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4575 4576 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4577 { 4578 struct bnxt_ctx_pg_info *ctx_pg; 4579 struct bnxt_ctx_mem_info *ctx; 4580 uint32_t mem_size, ena, entries; 4581 uint32_t entries_sp, min; 4582 int i, rc; 4583 4584 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4585 if (rc) { 4586 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4587 return rc; 4588 } 4589 ctx = bp->ctx; 4590 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4591 return 0; 4592 4593 ctx_pg = &ctx->qp_mem; 4594 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4595 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4596 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4597 if (rc) 4598 return rc; 4599 4600 ctx_pg = &ctx->srq_mem; 4601 ctx_pg->entries = ctx->srq_max_l2_entries; 4602 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4603 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4604 if (rc) 4605 return rc; 4606 4607 ctx_pg = &ctx->cq_mem; 4608 ctx_pg->entries = ctx->cq_max_l2_entries; 4609 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4610 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4611 if (rc) 4612 return rc; 4613 4614 ctx_pg = &ctx->vnic_mem; 4615 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4616 ctx->vnic_max_ring_table_entries; 4617 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4618 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4619 if (rc) 4620 return rc; 4621 4622 ctx_pg = &ctx->stat_mem; 4623 ctx_pg->entries = ctx->stat_max_entries; 4624 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4625 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4626 if (rc) 4627 return rc; 4628 4629 min = ctx->tqm_min_entries_per_ring; 4630 4631 entries_sp = ctx->qp_max_l2_entries + 4632 ctx->vnic_max_vnic_entries + 4633 2 * ctx->qp_min_qp1_entries + min; 4634 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4635 4636 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4637 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4638 entries = clamp_t(uint32_t, entries, min, 4639 ctx->tqm_max_entries_per_ring); 4640 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4641 ctx_pg = ctx->tqm_mem[i]; 4642 ctx_pg->entries = i ? entries : entries_sp; 4643 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4644 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); 4645 if (rc) 4646 return rc; 4647 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4648 } 4649 4650 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4651 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4652 if (rc) 4653 PMD_DRV_LOG(ERR, 4654 "Failed to configure context mem: rc = %d\n", rc); 4655 else 4656 ctx->flags |= BNXT_CTX_FLAG_INITED; 4657 4658 return rc; 4659 } 4660 4661 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4662 { 4663 struct rte_pci_device *pci_dev = bp->pdev; 4664 char mz_name[RTE_MEMZONE_NAMESIZE]; 4665 const struct rte_memzone *mz = NULL; 4666 uint32_t total_alloc_len; 4667 rte_iova_t mz_phys_addr; 4668 4669 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4670 return 0; 4671 4672 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4673 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4674 pci_dev->addr.bus, pci_dev->addr.devid, 4675 pci_dev->addr.function, "rx_port_stats"); 4676 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4677 mz = rte_memzone_lookup(mz_name); 4678 total_alloc_len = 4679 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4680 sizeof(struct rx_port_stats_ext) + 512); 4681 if (!mz) { 4682 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4683 SOCKET_ID_ANY, 4684 RTE_MEMZONE_2MB | 4685 RTE_MEMZONE_SIZE_HINT_ONLY | 4686 RTE_MEMZONE_IOVA_CONTIG); 4687 if (mz == NULL) 4688 return -ENOMEM; 4689 } 4690 memset(mz->addr, 0, mz->len); 4691 mz_phys_addr = mz->iova; 4692 4693 bp->rx_mem_zone = (const void *)mz; 4694 bp->hw_rx_port_stats = mz->addr; 4695 bp->hw_rx_port_stats_map = mz_phys_addr; 4696 4697 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4698 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4699 pci_dev->addr.bus, pci_dev->addr.devid, 4700 pci_dev->addr.function, "tx_port_stats"); 4701 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4702 mz = rte_memzone_lookup(mz_name); 4703 total_alloc_len = 4704 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4705 sizeof(struct tx_port_stats_ext) + 512); 4706 if (!mz) { 4707 mz = rte_memzone_reserve(mz_name, 4708 total_alloc_len, 4709 SOCKET_ID_ANY, 4710 RTE_MEMZONE_2MB | 4711 RTE_MEMZONE_SIZE_HINT_ONLY | 4712 RTE_MEMZONE_IOVA_CONTIG); 4713 if (mz == NULL) 4714 return -ENOMEM; 4715 } 4716 memset(mz->addr, 0, mz->len); 4717 mz_phys_addr = mz->iova; 4718 4719 bp->tx_mem_zone = (const void *)mz; 4720 bp->hw_tx_port_stats = mz->addr; 4721 bp->hw_tx_port_stats_map = mz_phys_addr; 4722 bp->flags |= BNXT_FLAG_PORT_STATS; 4723 4724 /* Display extended statistics if FW supports it */ 4725 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4726 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4727 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4728 return 0; 4729 4730 bp->hw_rx_port_stats_ext = (void *) 4731 ((uint8_t *)bp->hw_rx_port_stats + 4732 sizeof(struct rx_port_stats)); 4733 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4734 sizeof(struct rx_port_stats); 4735 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4736 4737 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4738 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4739 bp->hw_tx_port_stats_ext = (void *) 4740 ((uint8_t *)bp->hw_tx_port_stats + 4741 sizeof(struct tx_port_stats)); 4742 bp->hw_tx_port_stats_ext_map = 4743 bp->hw_tx_port_stats_map + 4744 sizeof(struct tx_port_stats); 4745 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 4746 } 4747 4748 return 0; 4749 } 4750 4751 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 4752 { 4753 struct bnxt *bp = eth_dev->data->dev_private; 4754 int rc = 0; 4755 4756 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 4757 RTE_ETHER_ADDR_LEN * 4758 bp->max_l2_ctx, 4759 0); 4760 if (eth_dev->data->mac_addrs == NULL) { 4761 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 4762 return -ENOMEM; 4763 } 4764 4765 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 4766 if (BNXT_PF(bp)) 4767 return -EINVAL; 4768 4769 /* Generate a random MAC address, if none was assigned by PF */ 4770 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 4771 bnxt_eth_hw_addr_random(bp->mac_addr); 4772 PMD_DRV_LOG(INFO, 4773 "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", 4774 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 4775 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 4776 4777 rc = bnxt_hwrm_set_mac(bp); 4778 if (rc) 4779 return rc; 4780 } 4781 4782 /* Copy the permanent MAC from the FUNC_QCAPS response */ 4783 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 4784 4785 return rc; 4786 } 4787 4788 static int bnxt_restore_dflt_mac(struct bnxt *bp) 4789 { 4790 int rc = 0; 4791 4792 /* MAC is already configured in FW */ 4793 if (BNXT_HAS_DFLT_MAC_SET(bp)) 4794 return 0; 4795 4796 /* Restore the old MAC configured */ 4797 rc = bnxt_hwrm_set_mac(bp); 4798 if (rc) 4799 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 4800 4801 return rc; 4802 } 4803 4804 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 4805 { 4806 if (!BNXT_PF(bp)) 4807 return; 4808 4809 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 4810 4811 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 4812 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 4813 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 4814 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 4815 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 4816 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 4817 } 4818 4819 uint16_t 4820 bnxt_get_svif(uint16_t port_id, bool func_svif, 4821 enum bnxt_ulp_intf_type type) 4822 { 4823 struct rte_eth_dev *eth_dev; 4824 struct bnxt *bp; 4825 4826 eth_dev = &rte_eth_devices[port_id]; 4827 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4828 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4829 if (!vfr) 4830 return 0; 4831 4832 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4833 return vfr->svif; 4834 4835 eth_dev = vfr->parent_dev; 4836 } 4837 4838 bp = eth_dev->data->dev_private; 4839 4840 return func_svif ? bp->func_svif : bp->port_svif; 4841 } 4842 4843 uint16_t 4844 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 4845 { 4846 struct rte_eth_dev *eth_dev; 4847 struct bnxt_vnic_info *vnic; 4848 struct bnxt *bp; 4849 4850 eth_dev = &rte_eth_devices[port]; 4851 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4852 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4853 if (!vfr) 4854 return 0; 4855 4856 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4857 return vfr->dflt_vnic_id; 4858 4859 eth_dev = vfr->parent_dev; 4860 } 4861 4862 bp = eth_dev->data->dev_private; 4863 4864 vnic = BNXT_GET_DEFAULT_VNIC(bp); 4865 4866 return vnic->fw_vnic_id; 4867 } 4868 4869 uint16_t 4870 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 4871 { 4872 struct rte_eth_dev *eth_dev; 4873 struct bnxt *bp; 4874 4875 eth_dev = &rte_eth_devices[port]; 4876 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4877 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4878 if (!vfr) 4879 return 0; 4880 4881 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4882 return vfr->fw_fid; 4883 4884 eth_dev = vfr->parent_dev; 4885 } 4886 4887 bp = eth_dev->data->dev_private; 4888 4889 return bp->fw_fid; 4890 } 4891 4892 enum bnxt_ulp_intf_type 4893 bnxt_get_interface_type(uint16_t port) 4894 { 4895 struct rte_eth_dev *eth_dev; 4896 struct bnxt *bp; 4897 4898 eth_dev = &rte_eth_devices[port]; 4899 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 4900 return BNXT_ULP_INTF_TYPE_VF_REP; 4901 4902 bp = eth_dev->data->dev_private; 4903 if (BNXT_PF(bp)) 4904 return BNXT_ULP_INTF_TYPE_PF; 4905 else if (BNXT_VF_IS_TRUSTED(bp)) 4906 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 4907 else if (BNXT_VF(bp)) 4908 return BNXT_ULP_INTF_TYPE_VF; 4909 4910 return BNXT_ULP_INTF_TYPE_INVALID; 4911 } 4912 4913 uint16_t 4914 bnxt_get_phy_port_id(uint16_t port_id) 4915 { 4916 struct bnxt_representor *vfr; 4917 struct rte_eth_dev *eth_dev; 4918 struct bnxt *bp; 4919 4920 eth_dev = &rte_eth_devices[port_id]; 4921 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4922 vfr = eth_dev->data->dev_private; 4923 if (!vfr) 4924 return 0; 4925 4926 eth_dev = vfr->parent_dev; 4927 } 4928 4929 bp = eth_dev->data->dev_private; 4930 4931 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 4932 } 4933 4934 uint16_t 4935 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 4936 { 4937 struct rte_eth_dev *eth_dev; 4938 struct bnxt *bp; 4939 4940 eth_dev = &rte_eth_devices[port_id]; 4941 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4942 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4943 if (!vfr) 4944 return 0; 4945 4946 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4947 return vfr->fw_fid - 1; 4948 4949 eth_dev = vfr->parent_dev; 4950 } 4951 4952 bp = eth_dev->data->dev_private; 4953 4954 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 4955 } 4956 4957 uint16_t 4958 bnxt_get_vport(uint16_t port_id) 4959 { 4960 return (1 << bnxt_get_phy_port_id(port_id)); 4961 } 4962 4963 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 4964 { 4965 struct bnxt_error_recovery_info *info = bp->recovery_info; 4966 4967 if (info) { 4968 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 4969 memset(info, 0, sizeof(*info)); 4970 return; 4971 } 4972 4973 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4974 return; 4975 4976 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 4977 sizeof(*info), 0); 4978 if (!info) 4979 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 4980 4981 bp->recovery_info = info; 4982 } 4983 4984 static void bnxt_check_fw_status(struct bnxt *bp) 4985 { 4986 uint32_t fw_status; 4987 4988 if (!(bp->recovery_info && 4989 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 4990 return; 4991 4992 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 4993 if (fw_status != BNXT_FW_STATUS_HEALTHY) 4994 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 4995 fw_status); 4996 } 4997 4998 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 4999 { 5000 struct bnxt_error_recovery_info *info = bp->recovery_info; 5001 uint32_t status_loc; 5002 uint32_t sig_ver; 5003 5004 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5005 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5006 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5007 BNXT_GRCP_WINDOW_2_BASE + 5008 offsetof(struct hcomm_status, 5009 sig_ver))); 5010 /* If the signature is absent, then FW does not support this feature */ 5011 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5012 HCOMM_STATUS_SIGNATURE_VAL) 5013 return 0; 5014 5015 if (!info) { 5016 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5017 sizeof(*info), 0); 5018 if (!info) 5019 return -ENOMEM; 5020 bp->recovery_info = info; 5021 } else { 5022 memset(info, 0, sizeof(*info)); 5023 } 5024 5025 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5026 BNXT_GRCP_WINDOW_2_BASE + 5027 offsetof(struct hcomm_status, 5028 fw_status_loc))); 5029 5030 /* Only pre-map the FW health status GRC register */ 5031 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5032 return 0; 5033 5034 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5035 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5036 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5037 5038 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5039 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5040 5041 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5042 5043 return 0; 5044 } 5045 5046 static int bnxt_init_fw(struct bnxt *bp) 5047 { 5048 uint16_t mtu; 5049 int rc = 0; 5050 5051 bp->fw_cap = 0; 5052 5053 rc = bnxt_map_hcomm_fw_status_reg(bp); 5054 if (rc) 5055 return rc; 5056 5057 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5058 if (rc) { 5059 bnxt_check_fw_status(bp); 5060 return rc; 5061 } 5062 5063 rc = bnxt_hwrm_func_reset(bp); 5064 if (rc) 5065 return -EIO; 5066 5067 rc = bnxt_hwrm_vnic_qcaps(bp); 5068 if (rc) 5069 return rc; 5070 5071 rc = bnxt_hwrm_queue_qportcfg(bp); 5072 if (rc) 5073 return rc; 5074 5075 /* Get the MAX capabilities for this function. 5076 * This function also allocates context memory for TQM rings and 5077 * informs the firmware about this allocated backing store memory. 5078 */ 5079 rc = bnxt_hwrm_func_qcaps(bp); 5080 if (rc) 5081 return rc; 5082 5083 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5084 if (rc) 5085 return rc; 5086 5087 bnxt_hwrm_port_mac_qcfg(bp); 5088 5089 bnxt_hwrm_parent_pf_qcfg(bp); 5090 5091 bnxt_hwrm_port_phy_qcaps(bp); 5092 5093 bnxt_alloc_error_recovery_info(bp); 5094 /* Get the adapter error recovery support info */ 5095 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5096 if (rc) 5097 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5098 5099 bnxt_hwrm_port_led_qcaps(bp); 5100 5101 return 0; 5102 } 5103 5104 static int 5105 bnxt_init_locks(struct bnxt *bp) 5106 { 5107 int err; 5108 5109 err = pthread_mutex_init(&bp->flow_lock, NULL); 5110 if (err) { 5111 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5112 return err; 5113 } 5114 5115 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5116 if (err) 5117 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5118 5119 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5120 if (err) 5121 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5122 return err; 5123 } 5124 5125 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5126 { 5127 int rc = 0; 5128 5129 rc = bnxt_init_fw(bp); 5130 if (rc) 5131 return rc; 5132 5133 if (!reconfig_dev) { 5134 rc = bnxt_setup_mac_addr(bp->eth_dev); 5135 if (rc) 5136 return rc; 5137 } else { 5138 rc = bnxt_restore_dflt_mac(bp); 5139 if (rc) 5140 return rc; 5141 } 5142 5143 bnxt_config_vf_req_fwd(bp); 5144 5145 rc = bnxt_hwrm_func_driver_register(bp); 5146 if (rc) { 5147 PMD_DRV_LOG(ERR, "Failed to register driver"); 5148 return -EBUSY; 5149 } 5150 5151 if (BNXT_PF(bp)) { 5152 if (bp->pdev->max_vfs) { 5153 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5154 if (rc) { 5155 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5156 return rc; 5157 } 5158 } else { 5159 rc = bnxt_hwrm_allocate_pf_only(bp); 5160 if (rc) { 5161 PMD_DRV_LOG(ERR, 5162 "Failed to allocate PF resources"); 5163 return rc; 5164 } 5165 } 5166 } 5167 5168 rc = bnxt_alloc_mem(bp, reconfig_dev); 5169 if (rc) 5170 return rc; 5171 5172 rc = bnxt_setup_int(bp); 5173 if (rc) 5174 return rc; 5175 5176 rc = bnxt_request_int(bp); 5177 if (rc) 5178 return rc; 5179 5180 rc = bnxt_init_ctx_mem(bp); 5181 if (rc) { 5182 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5183 return rc; 5184 } 5185 5186 rc = bnxt_init_locks(bp); 5187 if (rc) 5188 return rc; 5189 5190 return 0; 5191 } 5192 5193 static int 5194 bnxt_parse_devarg_truflow(__rte_unused const char *key, 5195 const char *value, void *opaque_arg) 5196 { 5197 struct bnxt *bp = opaque_arg; 5198 unsigned long truflow; 5199 char *end = NULL; 5200 5201 if (!value || !opaque_arg) { 5202 PMD_DRV_LOG(ERR, 5203 "Invalid parameter passed to truflow devargs.\n"); 5204 return -EINVAL; 5205 } 5206 5207 truflow = strtoul(value, &end, 10); 5208 if (end == NULL || *end != '\0' || 5209 (truflow == ULONG_MAX && errno == ERANGE)) { 5210 PMD_DRV_LOG(ERR, 5211 "Invalid parameter passed to truflow devargs.\n"); 5212 return -EINVAL; 5213 } 5214 5215 if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) { 5216 PMD_DRV_LOG(ERR, 5217 "Invalid value passed to truflow devargs.\n"); 5218 return -EINVAL; 5219 } 5220 5221 if (truflow) { 5222 bp->flags |= BNXT_FLAG_TRUFLOW_EN; 5223 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n"); 5224 } else { 5225 bp->flags &= ~BNXT_FLAG_TRUFLOW_EN; 5226 PMD_DRV_LOG(INFO, "Host-based truflow feature disabled.\n"); 5227 } 5228 5229 return 0; 5230 } 5231 5232 static int 5233 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5234 const char *value, void *opaque_arg) 5235 { 5236 struct bnxt *bp = opaque_arg; 5237 unsigned long flow_xstat; 5238 char *end = NULL; 5239 5240 if (!value || !opaque_arg) { 5241 PMD_DRV_LOG(ERR, 5242 "Invalid parameter passed to flow_xstat devarg.\n"); 5243 return -EINVAL; 5244 } 5245 5246 flow_xstat = strtoul(value, &end, 10); 5247 if (end == NULL || *end != '\0' || 5248 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5249 PMD_DRV_LOG(ERR, 5250 "Invalid parameter passed to flow_xstat devarg.\n"); 5251 return -EINVAL; 5252 } 5253 5254 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5255 PMD_DRV_LOG(ERR, 5256 "Invalid value passed to flow_xstat devarg.\n"); 5257 return -EINVAL; 5258 } 5259 5260 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5261 if (BNXT_FLOW_XSTATS_EN(bp)) 5262 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5263 5264 return 0; 5265 } 5266 5267 static int 5268 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5269 const char *value, void *opaque_arg) 5270 { 5271 struct bnxt *bp = opaque_arg; 5272 unsigned long max_num_kflows; 5273 char *end = NULL; 5274 5275 if (!value || !opaque_arg) { 5276 PMD_DRV_LOG(ERR, 5277 "Invalid parameter passed to max_num_kflows devarg.\n"); 5278 return -EINVAL; 5279 } 5280 5281 max_num_kflows = strtoul(value, &end, 10); 5282 if (end == NULL || *end != '\0' || 5283 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5284 PMD_DRV_LOG(ERR, 5285 "Invalid parameter passed to max_num_kflows devarg.\n"); 5286 return -EINVAL; 5287 } 5288 5289 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5290 PMD_DRV_LOG(ERR, 5291 "Invalid value passed to max_num_kflows devarg.\n"); 5292 return -EINVAL; 5293 } 5294 5295 bp->max_num_kflows = max_num_kflows; 5296 if (bp->max_num_kflows) 5297 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5298 max_num_kflows); 5299 5300 return 0; 5301 } 5302 5303 static int 5304 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5305 const char *value, void *opaque_arg) 5306 { 5307 struct bnxt_representor *vfr_bp = opaque_arg; 5308 unsigned long rep_is_pf; 5309 char *end = NULL; 5310 5311 if (!value || !opaque_arg) { 5312 PMD_DRV_LOG(ERR, 5313 "Invalid parameter passed to rep_is_pf devargs.\n"); 5314 return -EINVAL; 5315 } 5316 5317 rep_is_pf = strtoul(value, &end, 10); 5318 if (end == NULL || *end != '\0' || 5319 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5320 PMD_DRV_LOG(ERR, 5321 "Invalid parameter passed to rep_is_pf devargs.\n"); 5322 return -EINVAL; 5323 } 5324 5325 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5326 PMD_DRV_LOG(ERR, 5327 "Invalid value passed to rep_is_pf devargs.\n"); 5328 return -EINVAL; 5329 } 5330 5331 vfr_bp->flags |= rep_is_pf; 5332 if (BNXT_REP_PF(vfr_bp)) 5333 PMD_DRV_LOG(INFO, "PF representor\n"); 5334 else 5335 PMD_DRV_LOG(INFO, "VF representor\n"); 5336 5337 return 0; 5338 } 5339 5340 static int 5341 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5342 const char *value, void *opaque_arg) 5343 { 5344 struct bnxt_representor *vfr_bp = opaque_arg; 5345 unsigned long rep_based_pf; 5346 char *end = NULL; 5347 5348 if (!value || !opaque_arg) { 5349 PMD_DRV_LOG(ERR, 5350 "Invalid parameter passed to rep_based_pf " 5351 "devargs.\n"); 5352 return -EINVAL; 5353 } 5354 5355 rep_based_pf = strtoul(value, &end, 10); 5356 if (end == NULL || *end != '\0' || 5357 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5358 PMD_DRV_LOG(ERR, 5359 "Invalid parameter passed to rep_based_pf " 5360 "devargs.\n"); 5361 return -EINVAL; 5362 } 5363 5364 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5365 PMD_DRV_LOG(ERR, 5366 "Invalid value passed to rep_based_pf devargs.\n"); 5367 return -EINVAL; 5368 } 5369 5370 vfr_bp->rep_based_pf = rep_based_pf; 5371 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5372 5373 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5374 5375 return 0; 5376 } 5377 5378 static int 5379 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5380 const char *value, void *opaque_arg) 5381 { 5382 struct bnxt_representor *vfr_bp = opaque_arg; 5383 unsigned long rep_q_r2f; 5384 char *end = NULL; 5385 5386 if (!value || !opaque_arg) { 5387 PMD_DRV_LOG(ERR, 5388 "Invalid parameter passed to rep_q_r2f " 5389 "devargs.\n"); 5390 return -EINVAL; 5391 } 5392 5393 rep_q_r2f = strtoul(value, &end, 10); 5394 if (end == NULL || *end != '\0' || 5395 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5396 PMD_DRV_LOG(ERR, 5397 "Invalid parameter passed to rep_q_r2f " 5398 "devargs.\n"); 5399 return -EINVAL; 5400 } 5401 5402 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5403 PMD_DRV_LOG(ERR, 5404 "Invalid value passed to rep_q_r2f devargs.\n"); 5405 return -EINVAL; 5406 } 5407 5408 vfr_bp->rep_q_r2f = rep_q_r2f; 5409 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5410 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5411 5412 return 0; 5413 } 5414 5415 static int 5416 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5417 const char *value, void *opaque_arg) 5418 { 5419 struct bnxt_representor *vfr_bp = opaque_arg; 5420 unsigned long rep_q_f2r; 5421 char *end = NULL; 5422 5423 if (!value || !opaque_arg) { 5424 PMD_DRV_LOG(ERR, 5425 "Invalid parameter passed to rep_q_f2r " 5426 "devargs.\n"); 5427 return -EINVAL; 5428 } 5429 5430 rep_q_f2r = strtoul(value, &end, 10); 5431 if (end == NULL || *end != '\0' || 5432 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5433 PMD_DRV_LOG(ERR, 5434 "Invalid parameter passed to rep_q_f2r " 5435 "devargs.\n"); 5436 return -EINVAL; 5437 } 5438 5439 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5440 PMD_DRV_LOG(ERR, 5441 "Invalid value passed to rep_q_f2r devargs.\n"); 5442 return -EINVAL; 5443 } 5444 5445 vfr_bp->rep_q_f2r = rep_q_f2r; 5446 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5447 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5448 5449 return 0; 5450 } 5451 5452 static int 5453 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5454 const char *value, void *opaque_arg) 5455 { 5456 struct bnxt_representor *vfr_bp = opaque_arg; 5457 unsigned long rep_fc_r2f; 5458 char *end = NULL; 5459 5460 if (!value || !opaque_arg) { 5461 PMD_DRV_LOG(ERR, 5462 "Invalid parameter passed to rep_fc_r2f " 5463 "devargs.\n"); 5464 return -EINVAL; 5465 } 5466 5467 rep_fc_r2f = strtoul(value, &end, 10); 5468 if (end == NULL || *end != '\0' || 5469 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5470 PMD_DRV_LOG(ERR, 5471 "Invalid parameter passed to rep_fc_r2f " 5472 "devargs.\n"); 5473 return -EINVAL; 5474 } 5475 5476 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5477 PMD_DRV_LOG(ERR, 5478 "Invalid value passed to rep_fc_r2f devargs.\n"); 5479 return -EINVAL; 5480 } 5481 5482 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5483 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5484 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5485 5486 return 0; 5487 } 5488 5489 static int 5490 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5491 const char *value, void *opaque_arg) 5492 { 5493 struct bnxt_representor *vfr_bp = opaque_arg; 5494 unsigned long rep_fc_f2r; 5495 char *end = NULL; 5496 5497 if (!value || !opaque_arg) { 5498 PMD_DRV_LOG(ERR, 5499 "Invalid parameter passed to rep_fc_f2r " 5500 "devargs.\n"); 5501 return -EINVAL; 5502 } 5503 5504 rep_fc_f2r = strtoul(value, &end, 10); 5505 if (end == NULL || *end != '\0' || 5506 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5507 PMD_DRV_LOG(ERR, 5508 "Invalid parameter passed to rep_fc_f2r " 5509 "devargs.\n"); 5510 return -EINVAL; 5511 } 5512 5513 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5514 PMD_DRV_LOG(ERR, 5515 "Invalid value passed to rep_fc_f2r devargs.\n"); 5516 return -EINVAL; 5517 } 5518 5519 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5520 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5521 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5522 5523 return 0; 5524 } 5525 5526 static void 5527 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5528 { 5529 struct rte_kvargs *kvlist; 5530 5531 if (devargs == NULL) 5532 return; 5533 5534 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5535 if (kvlist == NULL) 5536 return; 5537 5538 /* 5539 * Handler for "truflow" devarg. 5540 * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1" 5541 */ 5542 rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW, 5543 bnxt_parse_devarg_truflow, bp); 5544 5545 /* 5546 * Handler for "flow_xstat" devarg. 5547 * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1" 5548 */ 5549 rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5550 bnxt_parse_devarg_flow_xstat, bp); 5551 5552 /* 5553 * Handler for "max_num_kflows" devarg. 5554 * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32" 5555 */ 5556 rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5557 bnxt_parse_devarg_max_num_kflows, bp); 5558 5559 rte_kvargs_free(kvlist); 5560 } 5561 5562 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5563 { 5564 int rc = 0; 5565 5566 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5567 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5568 if (rc) 5569 PMD_DRV_LOG(ERR, 5570 "Failed to alloc switch domain: %d\n", rc); 5571 else 5572 PMD_DRV_LOG(INFO, 5573 "Switch domain allocated %d\n", 5574 bp->switch_domain_id); 5575 } 5576 5577 return rc; 5578 } 5579 5580 static int 5581 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5582 { 5583 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5584 static int version_printed; 5585 struct bnxt *bp; 5586 int rc; 5587 5588 if (version_printed++ == 0) 5589 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5590 5591 eth_dev->dev_ops = &bnxt_dev_ops; 5592 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5593 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5594 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5595 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5596 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5597 5598 /* 5599 * For secondary processes, we don't initialise any further 5600 * as primary has already done this work. 5601 */ 5602 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5603 return 0; 5604 5605 rte_eth_copy_pci_info(eth_dev, pci_dev); 5606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 5607 5608 bp = eth_dev->data->dev_private; 5609 5610 /* Parse dev arguments passed on when starting the DPDK application. */ 5611 bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5612 5613 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5614 5615 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5616 bp->flags |= BNXT_FLAG_VF; 5617 5618 if (bnxt_thor_device(pci_dev->id.device_id)) 5619 bp->flags |= BNXT_FLAG_THOR_CHIP; 5620 5621 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5622 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5623 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5624 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5625 bp->flags |= BNXT_FLAG_STINGRAY; 5626 5627 if (BNXT_TRUFLOW_EN(bp)) { 5628 /* extra mbuf field is required to store CFA code from mark */ 5629 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5630 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5631 .size = sizeof(bnxt_cfa_code_dynfield_t), 5632 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5633 }; 5634 bnxt_cfa_code_dynfield_offset = 5635 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5636 if (bnxt_cfa_code_dynfield_offset < 0) { 5637 PMD_DRV_LOG(ERR, 5638 "Failed to register mbuf field for TruFlow mark\n"); 5639 return -rte_errno; 5640 } 5641 } 5642 5643 rc = bnxt_init_board(eth_dev); 5644 if (rc) { 5645 PMD_DRV_LOG(ERR, 5646 "Failed to initialize board rc: %x\n", rc); 5647 return rc; 5648 } 5649 5650 rc = bnxt_alloc_pf_info(bp); 5651 if (rc) 5652 goto error_free; 5653 5654 rc = bnxt_alloc_link_info(bp); 5655 if (rc) 5656 goto error_free; 5657 5658 rc = bnxt_alloc_parent_info(bp); 5659 if (rc) 5660 goto error_free; 5661 5662 rc = bnxt_alloc_hwrm_resources(bp); 5663 if (rc) { 5664 PMD_DRV_LOG(ERR, 5665 "Failed to allocate hwrm resource rc: %x\n", rc); 5666 goto error_free; 5667 } 5668 rc = bnxt_alloc_leds_info(bp); 5669 if (rc) 5670 goto error_free; 5671 5672 rc = bnxt_alloc_cos_queues(bp); 5673 if (rc) 5674 goto error_free; 5675 5676 rc = bnxt_init_resources(bp, false); 5677 if (rc) 5678 goto error_free; 5679 5680 rc = bnxt_alloc_stats_mem(bp); 5681 if (rc) 5682 goto error_free; 5683 5684 bnxt_alloc_switch_domain(bp); 5685 5686 PMD_DRV_LOG(INFO, 5687 DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", 5688 pci_dev->mem_resource[0].phys_addr, 5689 pci_dev->mem_resource[0].addr); 5690 5691 return 0; 5692 5693 error_free: 5694 bnxt_dev_uninit(eth_dev); 5695 return rc; 5696 } 5697 5698 5699 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5700 { 5701 if (!ctx) 5702 return; 5703 5704 if (ctx->va) 5705 rte_free(ctx->va); 5706 5707 ctx->va = NULL; 5708 ctx->dma = RTE_BAD_IOVA; 5709 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 5710 } 5711 5712 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 5713 { 5714 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 5715 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5716 bp->flow_stat->rx_fc_out_tbl.ctx_id, 5717 bp->flow_stat->max_fc, 5718 false); 5719 5720 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 5721 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5722 bp->flow_stat->tx_fc_out_tbl.ctx_id, 5723 bp->flow_stat->max_fc, 5724 false); 5725 5726 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5727 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 5728 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5729 5730 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5731 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 5732 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5733 5734 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5735 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 5736 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5737 5738 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5739 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 5740 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5741 } 5742 5743 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 5744 { 5745 bnxt_unregister_fc_ctx_mem(bp); 5746 5747 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 5748 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 5749 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 5750 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 5751 } 5752 5753 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 5754 { 5755 if (BNXT_FLOW_XSTATS_EN(bp)) 5756 bnxt_uninit_fc_ctx_mem(bp); 5757 } 5758 5759 static void 5760 bnxt_free_error_recovery_info(struct bnxt *bp) 5761 { 5762 rte_free(bp->recovery_info); 5763 bp->recovery_info = NULL; 5764 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5765 } 5766 5767 static void 5768 bnxt_uninit_locks(struct bnxt *bp) 5769 { 5770 pthread_mutex_destroy(&bp->flow_lock); 5771 pthread_mutex_destroy(&bp->def_cp_lock); 5772 pthread_mutex_destroy(&bp->health_check_lock); 5773 if (bp->rep_info) { 5774 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 5775 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 5776 } 5777 } 5778 5779 static int 5780 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 5781 { 5782 int rc; 5783 5784 bnxt_free_int(bp); 5785 bnxt_free_mem(bp, reconfig_dev); 5786 5787 bnxt_hwrm_func_buf_unrgtr(bp); 5788 rte_free(bp->pf->vf_req_buf); 5789 5790 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 5791 bp->flags &= ~BNXT_FLAG_REGISTERED; 5792 bnxt_free_ctx_mem(bp); 5793 if (!reconfig_dev) { 5794 bnxt_free_hwrm_resources(bp); 5795 bnxt_free_error_recovery_info(bp); 5796 } 5797 5798 bnxt_uninit_ctx_mem(bp); 5799 5800 bnxt_uninit_locks(bp); 5801 bnxt_free_flow_stats_info(bp); 5802 bnxt_free_rep_info(bp); 5803 rte_free(bp->ptp_cfg); 5804 bp->ptp_cfg = NULL; 5805 return rc; 5806 } 5807 5808 static int 5809 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 5810 { 5811 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5812 return -EPERM; 5813 5814 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 5815 5816 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 5817 bnxt_dev_close_op(eth_dev); 5818 5819 return 0; 5820 } 5821 5822 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 5823 { 5824 struct bnxt *bp = eth_dev->data->dev_private; 5825 struct rte_eth_dev *vf_rep_eth_dev; 5826 int ret = 0, i; 5827 5828 if (!bp) 5829 return -EINVAL; 5830 5831 for (i = 0; i < bp->num_reps; i++) { 5832 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 5833 if (!vf_rep_eth_dev) 5834 continue; 5835 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 5836 vf_rep_eth_dev->data->port_id); 5837 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 5838 } 5839 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 5840 eth_dev->data->port_id); 5841 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 5842 5843 return ret; 5844 } 5845 5846 static void bnxt_free_rep_info(struct bnxt *bp) 5847 { 5848 rte_free(bp->rep_info); 5849 bp->rep_info = NULL; 5850 rte_free(bp->cfa_code_map); 5851 bp->cfa_code_map = NULL; 5852 } 5853 5854 static int bnxt_init_rep_info(struct bnxt *bp) 5855 { 5856 int i = 0, rc; 5857 5858 if (bp->rep_info) 5859 return 0; 5860 5861 bp->rep_info = rte_zmalloc("bnxt_rep_info", 5862 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 5863 0); 5864 if (!bp->rep_info) { 5865 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 5866 return -ENOMEM; 5867 } 5868 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 5869 sizeof(*bp->cfa_code_map) * 5870 BNXT_MAX_CFA_CODE, 0); 5871 if (!bp->cfa_code_map) { 5872 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 5873 bnxt_free_rep_info(bp); 5874 return -ENOMEM; 5875 } 5876 5877 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 5878 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 5879 5880 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 5881 if (rc) { 5882 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 5883 bnxt_free_rep_info(bp); 5884 return rc; 5885 } 5886 5887 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 5888 if (rc) { 5889 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 5890 bnxt_free_rep_info(bp); 5891 return rc; 5892 } 5893 5894 return rc; 5895 } 5896 5897 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 5898 struct rte_eth_devargs eth_da, 5899 struct rte_eth_dev *backing_eth_dev, 5900 const char *dev_args) 5901 { 5902 struct rte_eth_dev *vf_rep_eth_dev; 5903 char name[RTE_ETH_NAME_MAX_LEN]; 5904 struct bnxt *backing_bp; 5905 uint16_t num_rep; 5906 int i, ret = 0; 5907 struct rte_kvargs *kvlist = NULL; 5908 5909 num_rep = eth_da.nb_representor_ports; 5910 if (num_rep > BNXT_MAX_VF_REPS) { 5911 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 5912 num_rep, BNXT_MAX_VF_REPS); 5913 return -EINVAL; 5914 } 5915 5916 if (num_rep >= RTE_MAX_ETHPORTS) { 5917 PMD_DRV_LOG(ERR, 5918 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 5919 num_rep, RTE_MAX_ETHPORTS); 5920 return -EINVAL; 5921 } 5922 5923 backing_bp = backing_eth_dev->data->dev_private; 5924 5925 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 5926 PMD_DRV_LOG(ERR, 5927 "Not a PF or trusted VF. No Representor support\n"); 5928 /* Returning an error is not an option. 5929 * Applications are not handling this correctly 5930 */ 5931 return 0; 5932 } 5933 5934 if (bnxt_init_rep_info(backing_bp)) 5935 return 0; 5936 5937 for (i = 0; i < num_rep; i++) { 5938 struct bnxt_representor representor = { 5939 .vf_id = eth_da.representor_ports[i], 5940 .switch_domain_id = backing_bp->switch_domain_id, 5941 .parent_dev = backing_eth_dev 5942 }; 5943 5944 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 5945 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 5946 representor.vf_id, BNXT_MAX_VF_REPS); 5947 continue; 5948 } 5949 5950 /* representor port net_bdf_port */ 5951 snprintf(name, sizeof(name), "net_%s_representor_%d", 5952 pci_dev->device.name, eth_da.representor_ports[i]); 5953 5954 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 5955 if (kvlist) { 5956 /* 5957 * Handler for "rep_is_pf" devarg. 5958 * Invoked as for ex: "-w 000:00:0d.0, 5959 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5960 */ 5961 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 5962 bnxt_parse_devarg_rep_is_pf, 5963 (void *)&representor); 5964 if (ret) { 5965 ret = -EINVAL; 5966 goto err; 5967 } 5968 /* 5969 * Handler for "rep_based_pf" devarg. 5970 * Invoked as for ex: "-w 000:00:0d.0, 5971 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5972 */ 5973 ret = rte_kvargs_process(kvlist, 5974 BNXT_DEVARG_REP_BASED_PF, 5975 bnxt_parse_devarg_rep_based_pf, 5976 (void *)&representor); 5977 if (ret) { 5978 ret = -EINVAL; 5979 goto err; 5980 } 5981 /* 5982 * Handler for "rep_based_pf" devarg. 5983 * Invoked as for ex: "-w 000:00:0d.0, 5984 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5985 */ 5986 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 5987 bnxt_parse_devarg_rep_q_r2f, 5988 (void *)&representor); 5989 if (ret) { 5990 ret = -EINVAL; 5991 goto err; 5992 } 5993 /* 5994 * Handler for "rep_based_pf" devarg. 5995 * Invoked as for ex: "-w 000:00:0d.0, 5996 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5997 */ 5998 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 5999 bnxt_parse_devarg_rep_q_f2r, 6000 (void *)&representor); 6001 if (ret) { 6002 ret = -EINVAL; 6003 goto err; 6004 } 6005 /* 6006 * Handler for "rep_based_pf" devarg. 6007 * Invoked as for ex: "-w 000:00:0d.0, 6008 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6009 */ 6010 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6011 bnxt_parse_devarg_rep_fc_r2f, 6012 (void *)&representor); 6013 if (ret) { 6014 ret = -EINVAL; 6015 goto err; 6016 } 6017 /* 6018 * Handler for "rep_based_pf" devarg. 6019 * Invoked as for ex: "-w 000:00:0d.0, 6020 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6021 */ 6022 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6023 bnxt_parse_devarg_rep_fc_f2r, 6024 (void *)&representor); 6025 if (ret) { 6026 ret = -EINVAL; 6027 goto err; 6028 } 6029 } 6030 6031 ret = rte_eth_dev_create(&pci_dev->device, name, 6032 sizeof(struct bnxt_representor), 6033 NULL, NULL, 6034 bnxt_representor_init, 6035 &representor); 6036 if (ret) { 6037 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6038 "representor %s.", name); 6039 goto err; 6040 } 6041 6042 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6043 if (!vf_rep_eth_dev) { 6044 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6045 " for VF-Rep: %s.", name); 6046 ret = -ENODEV; 6047 goto err; 6048 } 6049 6050 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6051 backing_eth_dev->data->port_id); 6052 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6053 vf_rep_eth_dev; 6054 backing_bp->num_reps++; 6055 6056 } 6057 6058 rte_kvargs_free(kvlist); 6059 return 0; 6060 6061 err: 6062 /* If num_rep > 1, then rollback already created 6063 * ports, since we'll be failing the probe anyway 6064 */ 6065 if (num_rep > 1) 6066 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6067 rte_errno = -ret; 6068 rte_kvargs_free(kvlist); 6069 6070 return ret; 6071 } 6072 6073 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6074 struct rte_pci_device *pci_dev) 6075 { 6076 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6077 struct rte_eth_dev *backing_eth_dev; 6078 uint16_t num_rep; 6079 int ret = 0; 6080 6081 if (pci_dev->device.devargs) { 6082 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6083 ð_da); 6084 if (ret) 6085 return ret; 6086 } 6087 6088 num_rep = eth_da.nb_representor_ports; 6089 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6090 num_rep); 6091 6092 /* We could come here after first level of probe is already invoked 6093 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6094 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6095 */ 6096 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6097 if (backing_eth_dev == NULL) { 6098 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6099 sizeof(struct bnxt), 6100 eth_dev_pci_specific_init, pci_dev, 6101 bnxt_dev_init, NULL); 6102 6103 if (ret || !num_rep) 6104 return ret; 6105 6106 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6107 } 6108 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6109 backing_eth_dev->data->port_id); 6110 6111 if (!num_rep) 6112 return ret; 6113 6114 /* probe representor ports now */ 6115 ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev, 6116 pci_dev->device.devargs->args); 6117 6118 return ret; 6119 } 6120 6121 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6122 { 6123 struct rte_eth_dev *eth_dev; 6124 6125 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6126 if (!eth_dev) 6127 return 0; /* Invoked typically only by OVS-DPDK, by the 6128 * time it comes here the eth_dev is already 6129 * deleted by rte_eth_dev_close(), so returning 6130 * +ve value will at least help in proper cleanup 6131 */ 6132 6133 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6134 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6135 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6136 return rte_eth_dev_destroy(eth_dev, 6137 bnxt_representor_uninit); 6138 else 6139 return rte_eth_dev_destroy(eth_dev, 6140 bnxt_dev_uninit); 6141 } else { 6142 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6143 } 6144 } 6145 6146 static struct rte_pci_driver bnxt_rte_pmd = { 6147 .id_table = bnxt_pci_id_map, 6148 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6149 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6150 * and OVS-DPDK 6151 */ 6152 .probe = bnxt_pci_probe, 6153 .remove = bnxt_pci_remove, 6154 }; 6155 6156 static bool 6157 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6158 { 6159 if (strcmp(dev->device->driver->name, drv->driver.name)) 6160 return false; 6161 6162 return true; 6163 } 6164 6165 bool is_bnxt_supported(struct rte_eth_dev *dev) 6166 { 6167 return is_device_supported(dev, &bnxt_rte_pmd); 6168 } 6169 6170 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE); 6171 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6172 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6173 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6174