1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <rte_ethdev_driver.h> 11 #include <rte_ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 97 { .vendor_id = 0, /* sentinel */ }, 98 }; 99 100 #define BNXT_DEVARG_TRUFLOW "host-based-truflow" 101 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 102 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 103 #define BNXT_DEVARG_REPRESENTOR "representor" 104 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 105 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 106 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 107 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 108 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 109 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 110 111 static const char *const bnxt_dev_args[] = { 112 BNXT_DEVARG_REPRESENTOR, 113 BNXT_DEVARG_TRUFLOW, 114 BNXT_DEVARG_FLOW_XSTAT, 115 BNXT_DEVARG_MAX_NUM_KFLOWS, 116 BNXT_DEVARG_REP_BASED_PF, 117 BNXT_DEVARG_REP_IS_PF, 118 BNXT_DEVARG_REP_Q_R2F, 119 BNXT_DEVARG_REP_Q_F2R, 120 BNXT_DEVARG_REP_FC_R2F, 121 BNXT_DEVARG_REP_FC_F2R, 122 NULL 123 }; 124 125 /* 126 * truflow == false to disable the feature 127 * truflow == true to enable the feature 128 */ 129 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow) ((truflow) > 1) 130 131 /* 132 * flow_xstat == false to disable the feature 133 * flow_xstat == true to enable the feature 134 */ 135 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 136 137 /* 138 * rep_is_pf == false to indicate VF representor 139 * rep_is_pf == true to indicate PF representor 140 */ 141 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 142 143 /* 144 * rep_based_pf == Physical index of the PF 145 */ 146 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 147 /* 148 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 149 */ 150 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 151 152 /* 153 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 154 */ 155 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 156 157 /* 158 * rep_fc_r2f == Flow control for the representor to endpoint direction 159 */ 160 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 161 162 /* 163 * rep_fc_f2r == Flow control for the endpoint to representor direction 164 */ 165 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 166 167 int bnxt_cfa_code_dynfield_offset = -1; 168 169 /* 170 * max_num_kflows must be >= 32 171 * and must be a power-of-2 supported value 172 * return: 1 -> invalid 173 * 0 -> valid 174 */ 175 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 176 { 177 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 178 return 1; 179 return 0; 180 } 181 182 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 183 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 184 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 185 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 186 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 187 static int bnxt_restore_vlan_filters(struct bnxt *bp); 188 static void bnxt_dev_recover(void *arg); 189 static void bnxt_free_error_recovery_info(struct bnxt *bp); 190 static void bnxt_free_rep_info(struct bnxt *bp); 191 192 int is_bnxt_in_error(struct bnxt *bp) 193 { 194 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 195 return -EIO; 196 if (bp->flags & BNXT_FLAG_FW_RESET) 197 return -EBUSY; 198 199 return 0; 200 } 201 202 /***********************/ 203 204 /* 205 * High level utility functions 206 */ 207 208 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 209 { 210 if (!BNXT_CHIP_THOR(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_THOR) / 215 BNXT_RSS_ENTRIES_PER_CTX_THOR; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_THOR(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 } 230 231 static void bnxt_free_pf_info(struct bnxt *bp) 232 { 233 rte_free(bp->pf); 234 } 235 236 static void bnxt_free_link_info(struct bnxt *bp) 237 { 238 rte_free(bp->link_info); 239 } 240 241 static void bnxt_free_leds_info(struct bnxt *bp) 242 { 243 if (BNXT_VF(bp)) 244 return; 245 246 rte_free(bp->leds); 247 bp->leds = NULL; 248 } 249 250 static void bnxt_free_flow_stats_info(struct bnxt *bp) 251 { 252 rte_free(bp->flow_stat); 253 bp->flow_stat = NULL; 254 } 255 256 static void bnxt_free_cos_queues(struct bnxt *bp) 257 { 258 rte_free(bp->rx_cos_queue); 259 rte_free(bp->tx_cos_queue); 260 } 261 262 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 263 { 264 bnxt_free_filter_mem(bp); 265 bnxt_free_vnic_attributes(bp); 266 bnxt_free_vnic_mem(bp); 267 268 /* tx/rx rings are configured as part of *_queue_setup callbacks. 269 * If the number of rings change across fw update, 270 * we don't have much choice except to warn the user. 271 */ 272 if (!reconfig) { 273 bnxt_free_stats(bp); 274 bnxt_free_tx_rings(bp); 275 bnxt_free_rx_rings(bp); 276 } 277 bnxt_free_async_cp_ring(bp); 278 bnxt_free_rxtx_nq_ring(bp); 279 280 rte_free(bp->grp_info); 281 bp->grp_info = NULL; 282 } 283 284 static int bnxt_alloc_parent_info(struct bnxt *bp) 285 { 286 bp->parent = rte_zmalloc("bnxt_parent_info", 287 sizeof(struct bnxt_parent_info), 0); 288 if (bp->parent == NULL) 289 return -ENOMEM; 290 291 return 0; 292 } 293 294 static int bnxt_alloc_pf_info(struct bnxt *bp) 295 { 296 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 297 if (bp->pf == NULL) 298 return -ENOMEM; 299 300 return 0; 301 } 302 303 static int bnxt_alloc_link_info(struct bnxt *bp) 304 { 305 bp->link_info = 306 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 307 if (bp->link_info == NULL) 308 return -ENOMEM; 309 310 return 0; 311 } 312 313 static int bnxt_alloc_leds_info(struct bnxt *bp) 314 { 315 if (BNXT_VF(bp)) 316 return 0; 317 318 bp->leds = rte_zmalloc("bnxt_leds", 319 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 320 0); 321 if (bp->leds == NULL) 322 return -ENOMEM; 323 324 return 0; 325 } 326 327 static int bnxt_alloc_cos_queues(struct bnxt *bp) 328 { 329 bp->rx_cos_queue = 330 rte_zmalloc("bnxt_rx_cosq", 331 BNXT_COS_QUEUE_COUNT * 332 sizeof(struct bnxt_cos_queue_info), 333 0); 334 if (bp->rx_cos_queue == NULL) 335 return -ENOMEM; 336 337 bp->tx_cos_queue = 338 rte_zmalloc("bnxt_tx_cosq", 339 BNXT_COS_QUEUE_COUNT * 340 sizeof(struct bnxt_cos_queue_info), 341 0); 342 if (bp->tx_cos_queue == NULL) 343 return -ENOMEM; 344 345 return 0; 346 } 347 348 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 349 { 350 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 351 sizeof(struct bnxt_flow_stat_info), 0); 352 if (bp->flow_stat == NULL) 353 return -ENOMEM; 354 355 return 0; 356 } 357 358 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 359 { 360 int rc; 361 362 rc = bnxt_alloc_ring_grps(bp); 363 if (rc) 364 goto alloc_mem_err; 365 366 rc = bnxt_alloc_async_ring_struct(bp); 367 if (rc) 368 goto alloc_mem_err; 369 370 rc = bnxt_alloc_vnic_mem(bp); 371 if (rc) 372 goto alloc_mem_err; 373 374 rc = bnxt_alloc_vnic_attributes(bp); 375 if (rc) 376 goto alloc_mem_err; 377 378 rc = bnxt_alloc_filter_mem(bp); 379 if (rc) 380 goto alloc_mem_err; 381 382 rc = bnxt_alloc_async_cp_ring(bp); 383 if (rc) 384 goto alloc_mem_err; 385 386 rc = bnxt_alloc_rxtx_nq_ring(bp); 387 if (rc) 388 goto alloc_mem_err; 389 390 if (BNXT_FLOW_XSTATS_EN(bp)) { 391 rc = bnxt_alloc_flow_stats_info(bp); 392 if (rc) 393 goto alloc_mem_err; 394 } 395 396 return 0; 397 398 alloc_mem_err: 399 bnxt_free_mem(bp, reconfig); 400 return rc; 401 } 402 403 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 404 { 405 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 406 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 407 uint64_t rx_offloads = dev_conf->rxmode.offloads; 408 struct bnxt_rx_queue *rxq; 409 unsigned int j; 410 int rc; 411 412 rc = bnxt_vnic_grp_alloc(bp, vnic); 413 if (rc) 414 goto err_out; 415 416 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 417 vnic_id, vnic, vnic->fw_grp_ids); 418 419 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 420 if (rc) 421 goto err_out; 422 423 /* Alloc RSS context only if RSS mode is enabled */ 424 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 425 int j, nr_ctxs = bnxt_rss_ctxts(bp); 426 427 rc = 0; 428 for (j = 0; j < nr_ctxs; j++) { 429 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 430 if (rc) 431 break; 432 } 433 if (rc) { 434 PMD_DRV_LOG(ERR, 435 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 436 vnic_id, j, rc); 437 goto err_out; 438 } 439 vnic->num_lb_ctxts = nr_ctxs; 440 } 441 442 /* 443 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 444 * setting is not available at this time, it will not be 445 * configured correctly in the CFA. 446 */ 447 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 448 vnic->vlan_strip = true; 449 else 450 vnic->vlan_strip = false; 451 452 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 453 if (rc) 454 goto err_out; 455 456 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 457 if (rc) 458 goto err_out; 459 460 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 461 rxq = bp->eth_dev->data->rx_queues[j]; 462 463 PMD_DRV_LOG(DEBUG, 464 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 465 j, rxq->vnic, rxq->vnic->fw_grp_ids); 466 467 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 468 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 469 else 470 vnic->rx_queue_cnt++; 471 } 472 473 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 474 475 rc = bnxt_vnic_rss_configure(bp, vnic); 476 if (rc) 477 goto err_out; 478 479 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 480 481 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 482 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 483 else 484 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 485 486 return 0; 487 err_out: 488 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 489 vnic_id, rc); 490 return rc; 491 } 492 493 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 494 { 495 int rc = 0; 496 497 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 498 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 499 if (rc) 500 return rc; 501 502 PMD_DRV_LOG(DEBUG, 503 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 504 " rx_fc_in_tbl.ctx_id = %d\n", 505 bp->flow_stat->rx_fc_in_tbl.va, 506 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 507 bp->flow_stat->rx_fc_in_tbl.ctx_id); 508 509 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 510 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 511 if (rc) 512 return rc; 513 514 PMD_DRV_LOG(DEBUG, 515 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 516 " rx_fc_out_tbl.ctx_id = %d\n", 517 bp->flow_stat->rx_fc_out_tbl.va, 518 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 519 bp->flow_stat->rx_fc_out_tbl.ctx_id); 520 521 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 522 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 523 if (rc) 524 return rc; 525 526 PMD_DRV_LOG(DEBUG, 527 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 528 " tx_fc_in_tbl.ctx_id = %d\n", 529 bp->flow_stat->tx_fc_in_tbl.va, 530 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 531 bp->flow_stat->tx_fc_in_tbl.ctx_id); 532 533 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 534 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 535 if (rc) 536 return rc; 537 538 PMD_DRV_LOG(DEBUG, 539 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 540 " tx_fc_out_tbl.ctx_id = %d\n", 541 bp->flow_stat->tx_fc_out_tbl.va, 542 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 543 bp->flow_stat->tx_fc_out_tbl.ctx_id); 544 545 memset(bp->flow_stat->rx_fc_out_tbl.va, 546 0, 547 bp->flow_stat->rx_fc_out_tbl.size); 548 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 549 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 550 bp->flow_stat->rx_fc_out_tbl.ctx_id, 551 bp->flow_stat->max_fc, 552 true); 553 if (rc) 554 return rc; 555 556 memset(bp->flow_stat->tx_fc_out_tbl.va, 557 0, 558 bp->flow_stat->tx_fc_out_tbl.size); 559 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 560 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 561 bp->flow_stat->tx_fc_out_tbl.ctx_id, 562 bp->flow_stat->max_fc, 563 true); 564 565 return rc; 566 } 567 568 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size, 569 struct bnxt_ctx_mem_buf_info *ctx) 570 { 571 if (!ctx) 572 return -EINVAL; 573 574 ctx->va = rte_zmalloc(type, size, 0); 575 if (ctx->va == NULL) 576 return -ENOMEM; 577 rte_mem_lock_page(ctx->va); 578 ctx->size = size; 579 ctx->dma = rte_mem_virt2iova(ctx->va); 580 if (ctx->dma == RTE_BAD_IOVA) 581 return -ENOMEM; 582 583 return 0; 584 } 585 586 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 587 { 588 struct rte_pci_device *pdev = bp->pdev; 589 char type[RTE_MEMZONE_NAMESIZE]; 590 uint16_t max_fc; 591 int rc = 0; 592 593 max_fc = bp->flow_stat->max_fc; 594 595 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 596 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 597 /* 4 bytes for each counter-id */ 598 rc = bnxt_alloc_ctx_mem_buf(type, 599 max_fc * 4, 600 &bp->flow_stat->rx_fc_in_tbl); 601 if (rc) 602 return rc; 603 604 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 605 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 606 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 607 rc = bnxt_alloc_ctx_mem_buf(type, 608 max_fc * 16, 609 &bp->flow_stat->rx_fc_out_tbl); 610 if (rc) 611 return rc; 612 613 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 614 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 615 /* 4 bytes for each counter-id */ 616 rc = bnxt_alloc_ctx_mem_buf(type, 617 max_fc * 4, 618 &bp->flow_stat->tx_fc_in_tbl); 619 if (rc) 620 return rc; 621 622 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 623 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 624 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 625 rc = bnxt_alloc_ctx_mem_buf(type, 626 max_fc * 16, 627 &bp->flow_stat->tx_fc_out_tbl); 628 if (rc) 629 return rc; 630 631 rc = bnxt_register_fc_ctx_mem(bp); 632 633 return rc; 634 } 635 636 static int bnxt_init_ctx_mem(struct bnxt *bp) 637 { 638 int rc = 0; 639 640 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 641 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 642 !BNXT_FLOW_XSTATS_EN(bp)) 643 return 0; 644 645 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 646 if (rc) 647 return rc; 648 649 rc = bnxt_init_fc_ctx_mem(bp); 650 651 return rc; 652 } 653 654 static int bnxt_update_phy_setting(struct bnxt *bp) 655 { 656 struct rte_eth_link new; 657 int rc; 658 659 rc = bnxt_get_hwrm_link_config(bp, &new); 660 if (rc) { 661 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 662 return rc; 663 } 664 665 /* 666 * On BCM957508-N2100 adapters, FW will not allow any user other 667 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 668 * always returns link up. Force phy update always in that case. 669 */ 670 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 671 rc = bnxt_set_hwrm_link_config(bp, true); 672 if (rc) { 673 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 674 return rc; 675 } 676 } 677 678 return rc; 679 } 680 681 static int bnxt_init_chip(struct bnxt *bp) 682 { 683 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 684 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 685 uint32_t intr_vector = 0; 686 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 687 uint32_t vec = BNXT_MISC_VEC_ID; 688 unsigned int i, j; 689 int rc; 690 691 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 692 bp->eth_dev->data->dev_conf.rxmode.offloads |= 693 DEV_RX_OFFLOAD_JUMBO_FRAME; 694 bp->flags |= BNXT_FLAG_JUMBO; 695 } else { 696 bp->eth_dev->data->dev_conf.rxmode.offloads &= 697 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 698 bp->flags &= ~BNXT_FLAG_JUMBO; 699 } 700 701 /* THOR does not support ring groups. 702 * But we will use the array to save RSS context IDs. 703 */ 704 if (BNXT_CHIP_THOR(bp)) 705 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 706 707 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 708 if (rc) { 709 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 710 goto err_out; 711 } 712 713 rc = bnxt_alloc_hwrm_rings(bp); 714 if (rc) { 715 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 716 goto err_out; 717 } 718 719 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 720 if (rc) { 721 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 722 goto err_out; 723 } 724 725 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 726 goto skip_cosq_cfg; 727 728 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 729 if (bp->rx_cos_queue[i].id != 0xff) { 730 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 731 732 if (!vnic) { 733 PMD_DRV_LOG(ERR, 734 "Num pools more than FW profile\n"); 735 rc = -EINVAL; 736 goto err_out; 737 } 738 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 739 bp->rx_cosq_cnt++; 740 } 741 } 742 743 skip_cosq_cfg: 744 rc = bnxt_mq_rx_configure(bp); 745 if (rc) { 746 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 747 goto err_out; 748 } 749 750 /* VNIC configuration */ 751 for (i = 0; i < bp->nr_vnics; i++) { 752 rc = bnxt_setup_one_vnic(bp, i); 753 if (rc) 754 goto err_out; 755 } 756 757 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 758 if (rc) { 759 PMD_DRV_LOG(ERR, 760 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 761 goto err_out; 762 } 763 764 /* check and configure queue intr-vector mapping */ 765 if ((rte_intr_cap_multiple(intr_handle) || 766 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 767 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 768 intr_vector = bp->eth_dev->data->nb_rx_queues; 769 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 770 if (intr_vector > bp->rx_cp_nr_rings) { 771 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 772 bp->rx_cp_nr_rings); 773 return -ENOTSUP; 774 } 775 rc = rte_intr_efd_enable(intr_handle, intr_vector); 776 if (rc) 777 return rc; 778 } 779 780 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 781 intr_handle->intr_vec = 782 rte_zmalloc("intr_vec", 783 bp->eth_dev->data->nb_rx_queues * 784 sizeof(int), 0); 785 if (intr_handle->intr_vec == NULL) { 786 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 787 " intr_vec", bp->eth_dev->data->nb_rx_queues); 788 rc = -ENOMEM; 789 goto err_disable; 790 } 791 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 792 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 793 intr_handle->intr_vec, intr_handle->nb_efd, 794 intr_handle->max_intr); 795 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 796 queue_id++) { 797 intr_handle->intr_vec[queue_id] = 798 vec + BNXT_RX_VEC_START; 799 if (vec < base + intr_handle->nb_efd - 1) 800 vec++; 801 } 802 } 803 804 /* enable uio/vfio intr/eventfd mapping */ 805 rc = rte_intr_enable(intr_handle); 806 #ifndef RTE_EXEC_ENV_FREEBSD 807 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 808 if (rc) 809 goto err_free; 810 #endif 811 812 rc = bnxt_update_phy_setting(bp); 813 if (rc) 814 goto err_free; 815 816 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 817 if (!bp->mark_table) 818 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 819 820 return 0; 821 822 err_free: 823 rte_free(intr_handle->intr_vec); 824 err_disable: 825 rte_intr_efd_disable(intr_handle); 826 err_out: 827 /* Some of the error status returned by FW may not be from errno.h */ 828 if (rc > 0) 829 rc = -EIO; 830 831 return rc; 832 } 833 834 static int bnxt_shutdown_nic(struct bnxt *bp) 835 { 836 bnxt_free_all_hwrm_resources(bp); 837 bnxt_free_all_filters(bp); 838 bnxt_free_all_vnics(bp); 839 return 0; 840 } 841 842 /* 843 * Device configuration and status function 844 */ 845 846 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 847 { 848 uint32_t link_speed = bp->link_info->support_speeds; 849 uint32_t speed_capa = 0; 850 851 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 852 speed_capa |= ETH_LINK_SPEED_100M; 853 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 854 speed_capa |= ETH_LINK_SPEED_100M_HD; 855 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 856 speed_capa |= ETH_LINK_SPEED_1G; 857 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 858 speed_capa |= ETH_LINK_SPEED_2_5G; 859 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 860 speed_capa |= ETH_LINK_SPEED_10G; 861 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 862 speed_capa |= ETH_LINK_SPEED_20G; 863 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 864 speed_capa |= ETH_LINK_SPEED_25G; 865 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 866 speed_capa |= ETH_LINK_SPEED_40G; 867 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 868 speed_capa |= ETH_LINK_SPEED_50G; 869 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 870 speed_capa |= ETH_LINK_SPEED_100G; 871 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 872 speed_capa |= ETH_LINK_SPEED_50G; 873 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 874 speed_capa |= ETH_LINK_SPEED_100G; 875 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 876 speed_capa |= ETH_LINK_SPEED_200G; 877 878 if (bp->link_info->auto_mode == 879 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 880 speed_capa |= ETH_LINK_SPEED_FIXED; 881 else 882 speed_capa |= ETH_LINK_SPEED_AUTONEG; 883 884 return speed_capa; 885 } 886 887 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 888 struct rte_eth_dev_info *dev_info) 889 { 890 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 891 struct bnxt *bp = eth_dev->data->dev_private; 892 uint16_t max_vnics, i, j, vpool, vrxq; 893 unsigned int max_rx_rings; 894 int rc; 895 896 rc = is_bnxt_in_error(bp); 897 if (rc) 898 return rc; 899 900 /* MAC Specifics */ 901 dev_info->max_mac_addrs = bp->max_l2_ctx; 902 dev_info->max_hash_mac_addrs = 0; 903 904 /* PF/VF specifics */ 905 if (BNXT_PF(bp)) 906 dev_info->max_vfs = pdev->max_vfs; 907 908 max_rx_rings = BNXT_MAX_RINGS(bp); 909 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 910 dev_info->max_rx_queues = max_rx_rings; 911 dev_info->max_tx_queues = max_rx_rings; 912 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 913 dev_info->hash_key_size = 40; 914 max_vnics = bp->max_vnics; 915 916 /* MTU specifics */ 917 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 918 dev_info->max_mtu = BNXT_MAX_MTU; 919 920 /* Fast path specifics */ 921 dev_info->min_rx_bufsize = 1; 922 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 923 924 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 925 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 926 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 927 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; 928 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 929 dev_info->tx_queue_offload_capa; 930 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 931 932 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 933 934 /* *INDENT-OFF* */ 935 dev_info->default_rxconf = (struct rte_eth_rxconf) { 936 .rx_thresh = { 937 .pthresh = 8, 938 .hthresh = 8, 939 .wthresh = 0, 940 }, 941 .rx_free_thresh = 32, 942 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 943 }; 944 945 dev_info->default_txconf = (struct rte_eth_txconf) { 946 .tx_thresh = { 947 .pthresh = 32, 948 .hthresh = 0, 949 .wthresh = 0, 950 }, 951 .tx_free_thresh = 32, 952 .tx_rs_thresh = 32, 953 }; 954 eth_dev->data->dev_conf.intr_conf.lsc = 1; 955 956 eth_dev->data->dev_conf.intr_conf.rxq = 1; 957 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 958 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 959 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 960 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 961 962 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 963 dev_info->switch_info.name = eth_dev->device->name; 964 dev_info->switch_info.domain_id = bp->switch_domain_id; 965 dev_info->switch_info.port_id = 966 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 967 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 968 } 969 970 /* *INDENT-ON* */ 971 972 /* 973 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 974 * need further investigation. 975 */ 976 977 /* VMDq resources */ 978 vpool = 64; /* ETH_64_POOLS */ 979 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 980 for (i = 0; i < 4; vpool >>= 1, i++) { 981 if (max_vnics > vpool) { 982 for (j = 0; j < 5; vrxq >>= 1, j++) { 983 if (dev_info->max_rx_queues > vrxq) { 984 if (vpool > vrxq) 985 vpool = vrxq; 986 goto found; 987 } 988 } 989 /* Not enough resources to support VMDq */ 990 break; 991 } 992 } 993 /* Not enough resources to support VMDq */ 994 vpool = 0; 995 vrxq = 0; 996 found: 997 dev_info->max_vmdq_pools = vpool; 998 dev_info->vmdq_queue_num = vrxq; 999 1000 dev_info->vmdq_pool_base = 0; 1001 dev_info->vmdq_queue_base = 0; 1002 1003 return 0; 1004 } 1005 1006 /* Configure the device based on the configuration provided */ 1007 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1008 { 1009 struct bnxt *bp = eth_dev->data->dev_private; 1010 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1011 int rc; 1012 1013 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1014 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1015 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1016 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1017 1018 rc = is_bnxt_in_error(bp); 1019 if (rc) 1020 return rc; 1021 1022 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1023 rc = bnxt_hwrm_check_vf_rings(bp); 1024 if (rc) { 1025 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1026 return -ENOSPC; 1027 } 1028 1029 /* If a resource has already been allocated - in this case 1030 * it is the async completion ring, free it. Reallocate it after 1031 * resource reservation. This will ensure the resource counts 1032 * are calculated correctly. 1033 */ 1034 1035 pthread_mutex_lock(&bp->def_cp_lock); 1036 1037 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1038 bnxt_disable_int(bp); 1039 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1040 } 1041 1042 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1043 if (rc) { 1044 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1045 pthread_mutex_unlock(&bp->def_cp_lock); 1046 return -ENOSPC; 1047 } 1048 1049 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1050 rc = bnxt_alloc_async_cp_ring(bp); 1051 if (rc) { 1052 pthread_mutex_unlock(&bp->def_cp_lock); 1053 return rc; 1054 } 1055 bnxt_enable_int(bp); 1056 } 1057 1058 pthread_mutex_unlock(&bp->def_cp_lock); 1059 } else { 1060 /* legacy driver needs to get updated values */ 1061 rc = bnxt_hwrm_func_qcaps(bp); 1062 if (rc) { 1063 PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); 1064 return rc; 1065 } 1066 } 1067 1068 /* Inherit new configurations */ 1069 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1070 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1071 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1072 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1073 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1074 bp->max_stat_ctx) 1075 goto resource_error; 1076 1077 if (BNXT_HAS_RING_GRPS(bp) && 1078 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1079 goto resource_error; 1080 1081 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 1082 bp->max_vnics < eth_dev->data->nb_rx_queues) 1083 goto resource_error; 1084 1085 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1086 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1087 1088 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1089 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1090 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1091 1092 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1093 eth_dev->data->mtu = 1094 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1095 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 1096 BNXT_NUM_VLANS; 1097 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1098 } 1099 return 0; 1100 1101 resource_error: 1102 PMD_DRV_LOG(ERR, 1103 "Insufficient resources to support requested config\n"); 1104 PMD_DRV_LOG(ERR, 1105 "Num Queues Requested: Tx %d, Rx %d\n", 1106 eth_dev->data->nb_tx_queues, 1107 eth_dev->data->nb_rx_queues); 1108 PMD_DRV_LOG(ERR, 1109 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1110 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1111 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1112 return -ENOSPC; 1113 } 1114 1115 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1116 { 1117 struct rte_eth_link *link = ð_dev->data->dev_link; 1118 1119 if (link->link_status) 1120 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1121 eth_dev->data->port_id, 1122 (uint32_t)link->link_speed, 1123 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 1124 ("full-duplex") : ("half-duplex\n")); 1125 else 1126 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1127 eth_dev->data->port_id); 1128 } 1129 1130 /* 1131 * Determine whether the current configuration requires support for scattered 1132 * receive; return 1 if scattered receive is required and 0 if not. 1133 */ 1134 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1135 { 1136 uint16_t buf_size; 1137 int i; 1138 1139 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) 1140 return 1; 1141 1142 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1143 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1144 1145 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1146 RTE_PKTMBUF_HEADROOM); 1147 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 1148 return 1; 1149 } 1150 return 0; 1151 } 1152 1153 static eth_rx_burst_t 1154 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1155 { 1156 struct bnxt *bp = eth_dev->data->dev_private; 1157 1158 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1159 #ifndef RTE_LIBRTE_IEEE1588 1160 /* 1161 * Vector mode receive can be enabled only if scatter rx is not 1162 * in use and rx offloads are limited to VLAN stripping and 1163 * CRC stripping. 1164 */ 1165 if (!eth_dev->data->scattered_rx && 1166 !(eth_dev->data->dev_conf.rxmode.offloads & 1167 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 1168 DEV_RX_OFFLOAD_KEEP_CRC | 1169 DEV_RX_OFFLOAD_JUMBO_FRAME | 1170 DEV_RX_OFFLOAD_IPV4_CKSUM | 1171 DEV_RX_OFFLOAD_UDP_CKSUM | 1172 DEV_RX_OFFLOAD_TCP_CKSUM | 1173 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1174 DEV_RX_OFFLOAD_RSS_HASH | 1175 DEV_RX_OFFLOAD_VLAN_FILTER)) && 1176 !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) && 1177 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1178 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", 1179 eth_dev->data->port_id); 1180 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1181 return bnxt_recv_pkts_vec; 1182 } 1183 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1184 eth_dev->data->port_id); 1185 PMD_DRV_LOG(INFO, 1186 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1187 eth_dev->data->port_id, 1188 eth_dev->data->scattered_rx, 1189 eth_dev->data->dev_conf.rxmode.offloads); 1190 #endif 1191 #endif 1192 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1193 return bnxt_recv_pkts; 1194 } 1195 1196 static eth_tx_burst_t 1197 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 1198 { 1199 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1200 #ifndef RTE_LIBRTE_IEEE1588 1201 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1202 struct bnxt *bp = eth_dev->data->dev_private; 1203 1204 /* 1205 * Vector mode transmit can be enabled only if not using scatter rx 1206 * or tx offloads. 1207 */ 1208 if (!eth_dev->data->scattered_rx && 1209 !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) && 1210 !BNXT_TRUFLOW_EN(bp) && 1211 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1212 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", 1213 eth_dev->data->port_id); 1214 return bnxt_xmit_pkts_vec; 1215 } 1216 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1217 eth_dev->data->port_id); 1218 PMD_DRV_LOG(INFO, 1219 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1220 eth_dev->data->port_id, 1221 eth_dev->data->scattered_rx, 1222 offloads); 1223 #endif 1224 #endif 1225 return bnxt_xmit_pkts; 1226 } 1227 1228 static int bnxt_handle_if_change_status(struct bnxt *bp) 1229 { 1230 int rc; 1231 1232 /* Since fw has undergone a reset and lost all contexts, 1233 * set fatal flag to not issue hwrm during cleanup 1234 */ 1235 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1236 bnxt_uninit_resources(bp, true); 1237 1238 /* clear fatal flag so that re-init happens */ 1239 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1240 rc = bnxt_init_resources(bp, true); 1241 1242 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1243 1244 return rc; 1245 } 1246 1247 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1248 { 1249 struct bnxt *bp = eth_dev->data->dev_private; 1250 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1251 int vlan_mask = 0; 1252 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1253 1254 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1255 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1256 return -EINVAL; 1257 } 1258 1259 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1260 PMD_DRV_LOG(ERR, 1261 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1262 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1263 } 1264 1265 do { 1266 rc = bnxt_hwrm_if_change(bp, true); 1267 if (rc == 0 || rc != -EAGAIN) 1268 break; 1269 1270 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1271 } while (retry_cnt--); 1272 1273 if (rc) 1274 return rc; 1275 1276 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1277 rc = bnxt_handle_if_change_status(bp); 1278 if (rc) 1279 return rc; 1280 } 1281 1282 bnxt_enable_int(bp); 1283 1284 rc = bnxt_init_chip(bp); 1285 if (rc) 1286 goto error; 1287 1288 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1289 eth_dev->data->dev_started = 1; 1290 1291 bnxt_link_update_op(eth_dev, 1); 1292 1293 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1294 vlan_mask |= ETH_VLAN_FILTER_MASK; 1295 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1296 vlan_mask |= ETH_VLAN_STRIP_MASK; 1297 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1298 if (rc) 1299 goto error; 1300 1301 /* Initialize bnxt ULP port details */ 1302 rc = bnxt_ulp_port_init(bp); 1303 if (rc) 1304 goto error; 1305 1306 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1307 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1308 1309 bnxt_schedule_fw_health_check(bp); 1310 1311 return 0; 1312 1313 error: 1314 bnxt_shutdown_nic(bp); 1315 bnxt_free_tx_mbufs(bp); 1316 bnxt_free_rx_mbufs(bp); 1317 bnxt_hwrm_if_change(bp, false); 1318 eth_dev->data->dev_started = 0; 1319 return rc; 1320 } 1321 1322 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1323 { 1324 struct bnxt *bp = eth_dev->data->dev_private; 1325 int rc = 0; 1326 1327 if (!bp->link_info->link_up) 1328 rc = bnxt_set_hwrm_link_config(bp, true); 1329 if (!rc) 1330 eth_dev->data->dev_link.link_status = 1; 1331 1332 bnxt_print_link_info(eth_dev); 1333 return rc; 1334 } 1335 1336 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1337 { 1338 struct bnxt *bp = eth_dev->data->dev_private; 1339 1340 eth_dev->data->dev_link.link_status = 0; 1341 bnxt_set_hwrm_link_config(bp, false); 1342 bp->link_info->link_up = 0; 1343 1344 return 0; 1345 } 1346 1347 static void bnxt_free_switch_domain(struct bnxt *bp) 1348 { 1349 if (bp->switch_domain_id) 1350 rte_eth_switch_domain_free(bp->switch_domain_id); 1351 } 1352 1353 /* Unload the driver, release resources */ 1354 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1355 { 1356 struct bnxt *bp = eth_dev->data->dev_private; 1357 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1358 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1359 struct rte_eth_link link; 1360 int ret; 1361 1362 eth_dev->data->dev_started = 0; 1363 eth_dev->data->scattered_rx = 0; 1364 1365 /* Prevent crashes when queues are still in use */ 1366 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1367 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1368 1369 bnxt_disable_int(bp); 1370 1371 /* disable uio/vfio intr/eventfd mapping */ 1372 rte_intr_disable(intr_handle); 1373 1374 /* Stop the child representors for this device */ 1375 ret = bnxt_rep_stop_all(bp); 1376 if (ret != 0) 1377 return ret; 1378 1379 /* delete the bnxt ULP port details */ 1380 bnxt_ulp_port_deinit(bp); 1381 1382 bnxt_cancel_fw_health_check(bp); 1383 1384 /* Do not bring link down during reset recovery */ 1385 if (!is_bnxt_in_error(bp)) { 1386 bnxt_dev_set_link_down_op(eth_dev); 1387 /* Wait for link to be reset */ 1388 if (BNXT_SINGLE_PF(bp)) 1389 rte_delay_ms(500); 1390 /* clear the recorded link status */ 1391 memset(&link, 0, sizeof(link)); 1392 rte_eth_linkstatus_set(eth_dev, &link); 1393 } 1394 1395 /* Clean queue intr-vector mapping */ 1396 rte_intr_efd_disable(intr_handle); 1397 if (intr_handle->intr_vec != NULL) { 1398 rte_free(intr_handle->intr_vec); 1399 intr_handle->intr_vec = NULL; 1400 } 1401 1402 bnxt_hwrm_port_clr_stats(bp); 1403 bnxt_free_tx_mbufs(bp); 1404 bnxt_free_rx_mbufs(bp); 1405 /* Process any remaining notifications in default completion queue */ 1406 bnxt_int_handler(eth_dev); 1407 bnxt_shutdown_nic(bp); 1408 bnxt_hwrm_if_change(bp, false); 1409 1410 rte_free(bp->mark_table); 1411 bp->mark_table = NULL; 1412 1413 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1414 bp->rx_cosq_cnt = 0; 1415 /* All filters are deleted on a port stop. */ 1416 if (BNXT_FLOW_XSTATS_EN(bp)) 1417 bp->flow_stat->flow_count = 0; 1418 1419 return 0; 1420 } 1421 1422 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1423 { 1424 struct bnxt *bp = eth_dev->data->dev_private; 1425 int ret = 0; 1426 1427 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1428 return 0; 1429 1430 /* cancel the recovery handler before remove dev */ 1431 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1432 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1433 bnxt_cancel_fc_thread(bp); 1434 1435 if (eth_dev->data->dev_started) 1436 ret = bnxt_dev_stop_op(eth_dev); 1437 1438 bnxt_free_switch_domain(bp); 1439 1440 bnxt_uninit_resources(bp, false); 1441 1442 bnxt_free_leds_info(bp); 1443 bnxt_free_cos_queues(bp); 1444 bnxt_free_link_info(bp); 1445 bnxt_free_pf_info(bp); 1446 bnxt_free_parent_info(bp); 1447 1448 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1449 bp->tx_mem_zone = NULL; 1450 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1451 bp->rx_mem_zone = NULL; 1452 1453 bnxt_hwrm_free_vf_info(bp); 1454 1455 rte_free(bp->grp_info); 1456 bp->grp_info = NULL; 1457 1458 return ret; 1459 } 1460 1461 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1462 uint32_t index) 1463 { 1464 struct bnxt *bp = eth_dev->data->dev_private; 1465 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1466 struct bnxt_vnic_info *vnic; 1467 struct bnxt_filter_info *filter, *temp_filter; 1468 uint32_t i; 1469 1470 if (is_bnxt_in_error(bp)) 1471 return; 1472 1473 /* 1474 * Loop through all VNICs from the specified filter flow pools to 1475 * remove the corresponding MAC addr filter 1476 */ 1477 for (i = 0; i < bp->nr_vnics; i++) { 1478 if (!(pool_mask & (1ULL << i))) 1479 continue; 1480 1481 vnic = &bp->vnic_info[i]; 1482 filter = STAILQ_FIRST(&vnic->filter); 1483 while (filter) { 1484 temp_filter = STAILQ_NEXT(filter, next); 1485 if (filter->mac_index == index) { 1486 STAILQ_REMOVE(&vnic->filter, filter, 1487 bnxt_filter_info, next); 1488 bnxt_hwrm_clear_l2_filter(bp, filter); 1489 bnxt_free_filter(bp, filter); 1490 } 1491 filter = temp_filter; 1492 } 1493 } 1494 } 1495 1496 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1497 struct rte_ether_addr *mac_addr, uint32_t index, 1498 uint32_t pool) 1499 { 1500 struct bnxt_filter_info *filter; 1501 int rc = 0; 1502 1503 /* Attach requested MAC address to the new l2_filter */ 1504 STAILQ_FOREACH(filter, &vnic->filter, next) { 1505 if (filter->mac_index == index) { 1506 PMD_DRV_LOG(DEBUG, 1507 "MAC addr already existed for pool %d\n", 1508 pool); 1509 return 0; 1510 } 1511 } 1512 1513 filter = bnxt_alloc_filter(bp); 1514 if (!filter) { 1515 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1516 return -ENODEV; 1517 } 1518 1519 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1520 * if the MAC that's been programmed now is a different one, then, 1521 * copy that addr to filter->l2_addr 1522 */ 1523 if (mac_addr) 1524 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1525 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1526 1527 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1528 if (!rc) { 1529 filter->mac_index = index; 1530 if (filter->mac_index == 0) 1531 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1532 else 1533 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1534 } else { 1535 bnxt_free_filter(bp, filter); 1536 } 1537 1538 return rc; 1539 } 1540 1541 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1542 struct rte_ether_addr *mac_addr, 1543 uint32_t index, uint32_t pool) 1544 { 1545 struct bnxt *bp = eth_dev->data->dev_private; 1546 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1547 int rc = 0; 1548 1549 rc = is_bnxt_in_error(bp); 1550 if (rc) 1551 return rc; 1552 1553 if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { 1554 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1555 return -ENOTSUP; 1556 } 1557 1558 if (!vnic) { 1559 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1560 return -EINVAL; 1561 } 1562 1563 /* Filter settings will get applied when port is started */ 1564 if (!eth_dev->data->dev_started) 1565 return 0; 1566 1567 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1568 1569 return rc; 1570 } 1571 1572 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1573 { 1574 int rc = 0; 1575 struct bnxt *bp = eth_dev->data->dev_private; 1576 struct rte_eth_link new; 1577 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1578 BNXT_MIN_LINK_WAIT_CNT; 1579 1580 rc = is_bnxt_in_error(bp); 1581 if (rc) 1582 return rc; 1583 1584 memset(&new, 0, sizeof(new)); 1585 do { 1586 /* Retrieve link info from hardware */ 1587 rc = bnxt_get_hwrm_link_config(bp, &new); 1588 if (rc) { 1589 new.link_speed = ETH_LINK_SPEED_100M; 1590 new.link_duplex = ETH_LINK_FULL_DUPLEX; 1591 PMD_DRV_LOG(ERR, 1592 "Failed to retrieve link rc = 0x%x!\n", rc); 1593 goto out; 1594 } 1595 1596 if (!wait_to_complete || new.link_status) 1597 break; 1598 1599 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1600 } while (cnt--); 1601 1602 /* Only single function PF can bring phy down. 1603 * When port is stopped, report link down for VF/MH/NPAR functions. 1604 */ 1605 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1606 memset(&new, 0, sizeof(new)); 1607 1608 out: 1609 /* Timed out or success */ 1610 if (new.link_status != eth_dev->data->dev_link.link_status || 1611 new.link_speed != eth_dev->data->dev_link.link_speed) { 1612 rte_eth_linkstatus_set(eth_dev, &new); 1613 1614 rte_eth_dev_callback_process(eth_dev, 1615 RTE_ETH_EVENT_INTR_LSC, 1616 NULL); 1617 1618 bnxt_print_link_info(eth_dev); 1619 } 1620 1621 return rc; 1622 } 1623 1624 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1625 { 1626 struct bnxt *bp = eth_dev->data->dev_private; 1627 struct bnxt_vnic_info *vnic; 1628 uint32_t old_flags; 1629 int rc; 1630 1631 rc = is_bnxt_in_error(bp); 1632 if (rc) 1633 return rc; 1634 1635 /* Filter settings will get applied when port is started */ 1636 if (!eth_dev->data->dev_started) 1637 return 0; 1638 1639 if (bp->vnic_info == NULL) 1640 return 0; 1641 1642 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1643 1644 old_flags = vnic->flags; 1645 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1646 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1647 if (rc != 0) 1648 vnic->flags = old_flags; 1649 1650 return rc; 1651 } 1652 1653 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1654 { 1655 struct bnxt *bp = eth_dev->data->dev_private; 1656 struct bnxt_vnic_info *vnic; 1657 uint32_t old_flags; 1658 int rc; 1659 1660 rc = is_bnxt_in_error(bp); 1661 if (rc) 1662 return rc; 1663 1664 /* Filter settings will get applied when port is started */ 1665 if (!eth_dev->data->dev_started) 1666 return 0; 1667 1668 if (bp->vnic_info == NULL) 1669 return 0; 1670 1671 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1672 1673 old_flags = vnic->flags; 1674 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1675 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1676 if (rc != 0) 1677 vnic->flags = old_flags; 1678 1679 return rc; 1680 } 1681 1682 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1683 { 1684 struct bnxt *bp = eth_dev->data->dev_private; 1685 struct bnxt_vnic_info *vnic; 1686 uint32_t old_flags; 1687 int rc; 1688 1689 rc = is_bnxt_in_error(bp); 1690 if (rc) 1691 return rc; 1692 1693 /* Filter settings will get applied when port is started */ 1694 if (!eth_dev->data->dev_started) 1695 return 0; 1696 1697 if (bp->vnic_info == NULL) 1698 return 0; 1699 1700 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1701 1702 old_flags = vnic->flags; 1703 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1704 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1705 if (rc != 0) 1706 vnic->flags = old_flags; 1707 1708 return rc; 1709 } 1710 1711 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1712 { 1713 struct bnxt *bp = eth_dev->data->dev_private; 1714 struct bnxt_vnic_info *vnic; 1715 uint32_t old_flags; 1716 int rc; 1717 1718 rc = is_bnxt_in_error(bp); 1719 if (rc) 1720 return rc; 1721 1722 /* Filter settings will get applied when port is started */ 1723 if (!eth_dev->data->dev_started) 1724 return 0; 1725 1726 if (bp->vnic_info == NULL) 1727 return 0; 1728 1729 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1730 1731 old_flags = vnic->flags; 1732 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1733 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1734 if (rc != 0) 1735 vnic->flags = old_flags; 1736 1737 return rc; 1738 } 1739 1740 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1741 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1742 { 1743 if (qid >= bp->rx_nr_rings) 1744 return NULL; 1745 1746 return bp->eth_dev->data->rx_queues[qid]; 1747 } 1748 1749 /* Return rxq corresponding to a given rss table ring/group ID. */ 1750 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1751 { 1752 struct bnxt_rx_queue *rxq; 1753 unsigned int i; 1754 1755 if (!BNXT_HAS_RING_GRPS(bp)) { 1756 for (i = 0; i < bp->rx_nr_rings; i++) { 1757 rxq = bp->eth_dev->data->rx_queues[i]; 1758 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1759 return rxq->index; 1760 } 1761 } else { 1762 for (i = 0; i < bp->rx_nr_rings; i++) { 1763 if (bp->grp_info[i].fw_grp_id == fwr) 1764 return i; 1765 } 1766 } 1767 1768 return INVALID_HW_RING_ID; 1769 } 1770 1771 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1772 struct rte_eth_rss_reta_entry64 *reta_conf, 1773 uint16_t reta_size) 1774 { 1775 struct bnxt *bp = eth_dev->data->dev_private; 1776 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1777 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1778 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1779 uint16_t idx, sft; 1780 int i, rc; 1781 1782 rc = is_bnxt_in_error(bp); 1783 if (rc) 1784 return rc; 1785 1786 if (!vnic->rss_table) 1787 return -EINVAL; 1788 1789 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 1790 return -EINVAL; 1791 1792 if (reta_size != tbl_size) { 1793 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1794 "(%d) must equal the size supported by the hardware " 1795 "(%d)\n", reta_size, tbl_size); 1796 return -EINVAL; 1797 } 1798 1799 for (i = 0; i < reta_size; i++) { 1800 struct bnxt_rx_queue *rxq; 1801 1802 idx = i / RTE_RETA_GROUP_SIZE; 1803 sft = i % RTE_RETA_GROUP_SIZE; 1804 1805 if (!(reta_conf[idx].mask & (1ULL << sft))) 1806 continue; 1807 1808 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 1809 if (!rxq) { 1810 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 1811 return -EINVAL; 1812 } 1813 1814 if (BNXT_CHIP_THOR(bp)) { 1815 vnic->rss_table[i * 2] = 1816 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1817 vnic->rss_table[i * 2 + 1] = 1818 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1819 } else { 1820 vnic->rss_table[i] = 1821 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1822 } 1823 } 1824 1825 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1826 return 0; 1827 } 1828 1829 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 1830 struct rte_eth_rss_reta_entry64 *reta_conf, 1831 uint16_t reta_size) 1832 { 1833 struct bnxt *bp = eth_dev->data->dev_private; 1834 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1835 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1836 uint16_t idx, sft, i; 1837 int rc; 1838 1839 rc = is_bnxt_in_error(bp); 1840 if (rc) 1841 return rc; 1842 1843 /* Retrieve from the default VNIC */ 1844 if (!vnic) 1845 return -EINVAL; 1846 if (!vnic->rss_table) 1847 return -EINVAL; 1848 1849 if (reta_size != tbl_size) { 1850 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1851 "(%d) must equal the size supported by the hardware " 1852 "(%d)\n", reta_size, tbl_size); 1853 return -EINVAL; 1854 } 1855 1856 for (idx = 0, i = 0; i < reta_size; i++) { 1857 idx = i / RTE_RETA_GROUP_SIZE; 1858 sft = i % RTE_RETA_GROUP_SIZE; 1859 1860 if (reta_conf[idx].mask & (1ULL << sft)) { 1861 uint16_t qid; 1862 1863 if (BNXT_CHIP_THOR(bp)) 1864 qid = bnxt_rss_to_qid(bp, 1865 vnic->rss_table[i * 2]); 1866 else 1867 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 1868 1869 if (qid == INVALID_HW_RING_ID) { 1870 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 1871 return -EINVAL; 1872 } 1873 reta_conf[idx].reta[sft] = qid; 1874 } 1875 } 1876 1877 return 0; 1878 } 1879 1880 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 1881 struct rte_eth_rss_conf *rss_conf) 1882 { 1883 struct bnxt *bp = eth_dev->data->dev_private; 1884 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1885 struct bnxt_vnic_info *vnic; 1886 int rc; 1887 1888 rc = is_bnxt_in_error(bp); 1889 if (rc) 1890 return rc; 1891 1892 /* 1893 * If RSS enablement were different than dev_configure, 1894 * then return -EINVAL 1895 */ 1896 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 1897 if (!rss_conf->rss_hf) 1898 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 1899 } else { 1900 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 1901 return -EINVAL; 1902 } 1903 1904 bp->flags |= BNXT_FLAG_UPDATE_HASH; 1905 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 1906 rss_conf, 1907 sizeof(*rss_conf)); 1908 1909 /* Update the default RSS VNIC(s) */ 1910 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1911 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 1912 vnic->hash_mode = 1913 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 1914 ETH_RSS_LEVEL(rss_conf->rss_hf)); 1915 1916 /* 1917 * If hashkey is not specified, use the previously configured 1918 * hashkey 1919 */ 1920 if (!rss_conf->rss_key) 1921 goto rss_config; 1922 1923 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 1924 PMD_DRV_LOG(ERR, 1925 "Invalid hashkey length, should be 16 bytes\n"); 1926 return -EINVAL; 1927 } 1928 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 1929 1930 rss_config: 1931 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1932 return 0; 1933 } 1934 1935 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 1936 struct rte_eth_rss_conf *rss_conf) 1937 { 1938 struct bnxt *bp = eth_dev->data->dev_private; 1939 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1940 int len, rc; 1941 uint32_t hash_types; 1942 1943 rc = is_bnxt_in_error(bp); 1944 if (rc) 1945 return rc; 1946 1947 /* RSS configuration is the same for all VNICs */ 1948 if (vnic && vnic->rss_hash_key) { 1949 if (rss_conf->rss_key) { 1950 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 1951 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 1952 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 1953 } 1954 1955 hash_types = vnic->hash_type; 1956 rss_conf->rss_hf = 0; 1957 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 1958 rss_conf->rss_hf |= ETH_RSS_IPV4; 1959 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1960 } 1961 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 1962 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1963 hash_types &= 1964 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1965 } 1966 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 1967 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 1968 hash_types &= 1969 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1970 } 1971 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 1972 rss_conf->rss_hf |= ETH_RSS_IPV6; 1973 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1974 } 1975 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 1976 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 1977 hash_types &= 1978 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1979 } 1980 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 1981 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 1982 hash_types &= 1983 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1984 } 1985 1986 rss_conf->rss_hf |= 1987 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 1988 1989 if (hash_types) { 1990 PMD_DRV_LOG(ERR, 1991 "Unknown RSS config from firmware (%08x), RSS disabled", 1992 vnic->hash_type); 1993 return -ENOTSUP; 1994 } 1995 } else { 1996 rss_conf->rss_hf = 0; 1997 } 1998 return 0; 1999 } 2000 2001 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2002 struct rte_eth_fc_conf *fc_conf) 2003 { 2004 struct bnxt *bp = dev->data->dev_private; 2005 struct rte_eth_link link_info; 2006 int rc; 2007 2008 rc = is_bnxt_in_error(bp); 2009 if (rc) 2010 return rc; 2011 2012 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2013 if (rc) 2014 return rc; 2015 2016 memset(fc_conf, 0, sizeof(*fc_conf)); 2017 if (bp->link_info->auto_pause) 2018 fc_conf->autoneg = 1; 2019 switch (bp->link_info->pause) { 2020 case 0: 2021 fc_conf->mode = RTE_FC_NONE; 2022 break; 2023 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2024 fc_conf->mode = RTE_FC_TX_PAUSE; 2025 break; 2026 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2027 fc_conf->mode = RTE_FC_RX_PAUSE; 2028 break; 2029 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2030 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2031 fc_conf->mode = RTE_FC_FULL; 2032 break; 2033 } 2034 return 0; 2035 } 2036 2037 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2038 struct rte_eth_fc_conf *fc_conf) 2039 { 2040 struct bnxt *bp = dev->data->dev_private; 2041 int rc; 2042 2043 rc = is_bnxt_in_error(bp); 2044 if (rc) 2045 return rc; 2046 2047 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2048 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); 2049 return -ENOTSUP; 2050 } 2051 2052 switch (fc_conf->mode) { 2053 case RTE_FC_NONE: 2054 bp->link_info->auto_pause = 0; 2055 bp->link_info->force_pause = 0; 2056 break; 2057 case RTE_FC_RX_PAUSE: 2058 if (fc_conf->autoneg) { 2059 bp->link_info->auto_pause = 2060 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2061 bp->link_info->force_pause = 0; 2062 } else { 2063 bp->link_info->auto_pause = 0; 2064 bp->link_info->force_pause = 2065 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2066 } 2067 break; 2068 case RTE_FC_TX_PAUSE: 2069 if (fc_conf->autoneg) { 2070 bp->link_info->auto_pause = 2071 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2072 bp->link_info->force_pause = 0; 2073 } else { 2074 bp->link_info->auto_pause = 0; 2075 bp->link_info->force_pause = 2076 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2077 } 2078 break; 2079 case RTE_FC_FULL: 2080 if (fc_conf->autoneg) { 2081 bp->link_info->auto_pause = 2082 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2083 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2084 bp->link_info->force_pause = 0; 2085 } else { 2086 bp->link_info->auto_pause = 0; 2087 bp->link_info->force_pause = 2088 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2089 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2090 } 2091 break; 2092 } 2093 return bnxt_set_hwrm_link_config(bp, true); 2094 } 2095 2096 /* Add UDP tunneling port */ 2097 static int 2098 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2099 struct rte_eth_udp_tunnel *udp_tunnel) 2100 { 2101 struct bnxt *bp = eth_dev->data->dev_private; 2102 uint16_t tunnel_type = 0; 2103 int rc = 0; 2104 2105 rc = is_bnxt_in_error(bp); 2106 if (rc) 2107 return rc; 2108 2109 switch (udp_tunnel->prot_type) { 2110 case RTE_TUNNEL_TYPE_VXLAN: 2111 if (bp->vxlan_port_cnt) { 2112 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2113 udp_tunnel->udp_port); 2114 if (bp->vxlan_port != udp_tunnel->udp_port) { 2115 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2116 return -ENOSPC; 2117 } 2118 bp->vxlan_port_cnt++; 2119 return 0; 2120 } 2121 tunnel_type = 2122 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2123 bp->vxlan_port_cnt++; 2124 break; 2125 case RTE_TUNNEL_TYPE_GENEVE: 2126 if (bp->geneve_port_cnt) { 2127 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2128 udp_tunnel->udp_port); 2129 if (bp->geneve_port != udp_tunnel->udp_port) { 2130 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2131 return -ENOSPC; 2132 } 2133 bp->geneve_port_cnt++; 2134 return 0; 2135 } 2136 tunnel_type = 2137 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2138 bp->geneve_port_cnt++; 2139 break; 2140 default: 2141 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2142 return -ENOTSUP; 2143 } 2144 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2145 tunnel_type); 2146 return rc; 2147 } 2148 2149 static int 2150 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2151 struct rte_eth_udp_tunnel *udp_tunnel) 2152 { 2153 struct bnxt *bp = eth_dev->data->dev_private; 2154 uint16_t tunnel_type = 0; 2155 uint16_t port = 0; 2156 int rc = 0; 2157 2158 rc = is_bnxt_in_error(bp); 2159 if (rc) 2160 return rc; 2161 2162 switch (udp_tunnel->prot_type) { 2163 case RTE_TUNNEL_TYPE_VXLAN: 2164 if (!bp->vxlan_port_cnt) { 2165 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2166 return -EINVAL; 2167 } 2168 if (bp->vxlan_port != udp_tunnel->udp_port) { 2169 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2170 udp_tunnel->udp_port, bp->vxlan_port); 2171 return -EINVAL; 2172 } 2173 if (--bp->vxlan_port_cnt) 2174 return 0; 2175 2176 tunnel_type = 2177 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2178 port = bp->vxlan_fw_dst_port_id; 2179 break; 2180 case RTE_TUNNEL_TYPE_GENEVE: 2181 if (!bp->geneve_port_cnt) { 2182 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2183 return -EINVAL; 2184 } 2185 if (bp->geneve_port != udp_tunnel->udp_port) { 2186 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2187 udp_tunnel->udp_port, bp->geneve_port); 2188 return -EINVAL; 2189 } 2190 if (--bp->geneve_port_cnt) 2191 return 0; 2192 2193 tunnel_type = 2194 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2195 port = bp->geneve_fw_dst_port_id; 2196 break; 2197 default: 2198 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2199 return -ENOTSUP; 2200 } 2201 2202 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2203 return rc; 2204 } 2205 2206 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2207 { 2208 struct bnxt_filter_info *filter; 2209 struct bnxt_vnic_info *vnic; 2210 int rc = 0; 2211 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2212 2213 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2214 filter = STAILQ_FIRST(&vnic->filter); 2215 while (filter) { 2216 /* Search for this matching MAC+VLAN filter */ 2217 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2218 /* Delete the filter */ 2219 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2220 if (rc) 2221 return rc; 2222 STAILQ_REMOVE(&vnic->filter, filter, 2223 bnxt_filter_info, next); 2224 bnxt_free_filter(bp, filter); 2225 PMD_DRV_LOG(INFO, 2226 "Deleted vlan filter for %d\n", 2227 vlan_id); 2228 return 0; 2229 } 2230 filter = STAILQ_NEXT(filter, next); 2231 } 2232 return -ENOENT; 2233 } 2234 2235 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2236 { 2237 struct bnxt_filter_info *filter; 2238 struct bnxt_vnic_info *vnic; 2239 int rc = 0; 2240 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2241 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2242 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2243 2244 /* Implementation notes on the use of VNIC in this command: 2245 * 2246 * By default, these filters belong to default vnic for the function. 2247 * Once these filters are set up, only destination VNIC can be modified. 2248 * If the destination VNIC is not specified in this command, 2249 * then the HWRM shall only create an l2 context id. 2250 */ 2251 2252 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2253 filter = STAILQ_FIRST(&vnic->filter); 2254 /* Check if the VLAN has already been added */ 2255 while (filter) { 2256 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2257 return -EEXIST; 2258 2259 filter = STAILQ_NEXT(filter, next); 2260 } 2261 2262 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2263 * command to create MAC+VLAN filter with the right flags, enables set. 2264 */ 2265 filter = bnxt_alloc_filter(bp); 2266 if (!filter) { 2267 PMD_DRV_LOG(ERR, 2268 "MAC/VLAN filter alloc failed\n"); 2269 return -ENOMEM; 2270 } 2271 /* MAC + VLAN ID filter */ 2272 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2273 * untagged packets are received 2274 * 2275 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2276 * packets and only the programmed vlan's packets are received 2277 */ 2278 filter->l2_ivlan = vlan_id; 2279 filter->l2_ivlan_mask = 0x0FFF; 2280 filter->enables |= en; 2281 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2282 2283 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2284 if (rc) { 2285 /* Free the newly allocated filter as we were 2286 * not able to create the filter in hardware. 2287 */ 2288 bnxt_free_filter(bp, filter); 2289 return rc; 2290 } 2291 2292 filter->mac_index = 0; 2293 /* Add this new filter to the list */ 2294 if (vlan_id == 0) 2295 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2296 else 2297 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2298 2299 PMD_DRV_LOG(INFO, 2300 "Added Vlan filter for %d\n", vlan_id); 2301 return rc; 2302 } 2303 2304 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2305 uint16_t vlan_id, int on) 2306 { 2307 struct bnxt *bp = eth_dev->data->dev_private; 2308 int rc; 2309 2310 rc = is_bnxt_in_error(bp); 2311 if (rc) 2312 return rc; 2313 2314 if (!eth_dev->data->dev_started) { 2315 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2316 return -EINVAL; 2317 } 2318 2319 /* These operations apply to ALL existing MAC/VLAN filters */ 2320 if (on) 2321 return bnxt_add_vlan_filter(bp, vlan_id); 2322 else 2323 return bnxt_del_vlan_filter(bp, vlan_id); 2324 } 2325 2326 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2327 struct bnxt_vnic_info *vnic) 2328 { 2329 struct bnxt_filter_info *filter; 2330 int rc; 2331 2332 filter = STAILQ_FIRST(&vnic->filter); 2333 while (filter) { 2334 if (filter->mac_index == 0 && 2335 !memcmp(filter->l2_addr, bp->mac_addr, 2336 RTE_ETHER_ADDR_LEN)) { 2337 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2338 if (!rc) { 2339 STAILQ_REMOVE(&vnic->filter, filter, 2340 bnxt_filter_info, next); 2341 bnxt_free_filter(bp, filter); 2342 } 2343 return rc; 2344 } 2345 filter = STAILQ_NEXT(filter, next); 2346 } 2347 return 0; 2348 } 2349 2350 static int 2351 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2352 { 2353 struct bnxt_vnic_info *vnic; 2354 unsigned int i; 2355 int rc; 2356 2357 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2358 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 2359 /* Remove any VLAN filters programmed */ 2360 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2361 bnxt_del_vlan_filter(bp, i); 2362 2363 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2364 if (rc) 2365 return rc; 2366 } else { 2367 /* Default filter will allow packets that match the 2368 * dest mac. So, it has to be deleted, otherwise, we 2369 * will endup receiving vlan packets for which the 2370 * filter is not programmed, when hw-vlan-filter 2371 * configuration is ON 2372 */ 2373 bnxt_del_dflt_mac_filter(bp, vnic); 2374 /* This filter will allow only untagged packets */ 2375 bnxt_add_vlan_filter(bp, 0); 2376 } 2377 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2378 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 2379 2380 return 0; 2381 } 2382 2383 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2384 { 2385 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2386 unsigned int i; 2387 int rc; 2388 2389 /* Destroy vnic filters and vnic */ 2390 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2391 DEV_RX_OFFLOAD_VLAN_FILTER) { 2392 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2393 bnxt_del_vlan_filter(bp, i); 2394 } 2395 bnxt_del_dflt_mac_filter(bp, vnic); 2396 2397 rc = bnxt_hwrm_vnic_free(bp, vnic); 2398 if (rc) 2399 return rc; 2400 2401 rte_free(vnic->fw_grp_ids); 2402 vnic->fw_grp_ids = NULL; 2403 2404 vnic->rx_queue_cnt = 0; 2405 2406 return 0; 2407 } 2408 2409 static int 2410 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2411 { 2412 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2413 int rc; 2414 2415 /* Destroy, recreate and reconfigure the default vnic */ 2416 rc = bnxt_free_one_vnic(bp, 0); 2417 if (rc) 2418 return rc; 2419 2420 /* default vnic 0 */ 2421 rc = bnxt_setup_one_vnic(bp, 0); 2422 if (rc) 2423 return rc; 2424 2425 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2426 DEV_RX_OFFLOAD_VLAN_FILTER) { 2427 rc = bnxt_add_vlan_filter(bp, 0); 2428 if (rc) 2429 return rc; 2430 rc = bnxt_restore_vlan_filters(bp); 2431 if (rc) 2432 return rc; 2433 } else { 2434 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2435 if (rc) 2436 return rc; 2437 } 2438 2439 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2440 if (rc) 2441 return rc; 2442 2443 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2444 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 2445 2446 return rc; 2447 } 2448 2449 static int 2450 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2451 { 2452 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2453 struct bnxt *bp = dev->data->dev_private; 2454 int rc; 2455 2456 rc = is_bnxt_in_error(bp); 2457 if (rc) 2458 return rc; 2459 2460 /* Filter settings will get applied when port is started */ 2461 if (!dev->data->dev_started) 2462 return 0; 2463 2464 if (mask & ETH_VLAN_FILTER_MASK) { 2465 /* Enable or disable VLAN filtering */ 2466 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2467 if (rc) 2468 return rc; 2469 } 2470 2471 if (mask & ETH_VLAN_STRIP_MASK) { 2472 /* Enable or disable VLAN stripping */ 2473 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2474 if (rc) 2475 return rc; 2476 } 2477 2478 if (mask & ETH_VLAN_EXTEND_MASK) { 2479 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2480 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2481 else 2482 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2483 } 2484 2485 return 0; 2486 } 2487 2488 static int 2489 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2490 uint16_t tpid) 2491 { 2492 struct bnxt *bp = dev->data->dev_private; 2493 int qinq = dev->data->dev_conf.rxmode.offloads & 2494 DEV_RX_OFFLOAD_VLAN_EXTEND; 2495 2496 if (vlan_type != ETH_VLAN_TYPE_INNER && 2497 vlan_type != ETH_VLAN_TYPE_OUTER) { 2498 PMD_DRV_LOG(ERR, 2499 "Unsupported vlan type."); 2500 return -EINVAL; 2501 } 2502 if (!qinq) { 2503 PMD_DRV_LOG(ERR, 2504 "QinQ not enabled. Needs to be ON as we can " 2505 "accelerate only outer vlan\n"); 2506 return -EINVAL; 2507 } 2508 2509 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2510 switch (tpid) { 2511 case RTE_ETHER_TYPE_QINQ: 2512 bp->outer_tpid_bd = 2513 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2514 break; 2515 case RTE_ETHER_TYPE_VLAN: 2516 bp->outer_tpid_bd = 2517 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2518 break; 2519 case RTE_ETHER_TYPE_QINQ1: 2520 bp->outer_tpid_bd = 2521 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2522 break; 2523 case RTE_ETHER_TYPE_QINQ2: 2524 bp->outer_tpid_bd = 2525 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2526 break; 2527 case RTE_ETHER_TYPE_QINQ3: 2528 bp->outer_tpid_bd = 2529 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2530 break; 2531 default: 2532 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2533 return -EINVAL; 2534 } 2535 bp->outer_tpid_bd |= tpid; 2536 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2537 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 2538 PMD_DRV_LOG(ERR, 2539 "Can accelerate only outer vlan in QinQ\n"); 2540 return -EINVAL; 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int 2547 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2548 struct rte_ether_addr *addr) 2549 { 2550 struct bnxt *bp = dev->data->dev_private; 2551 /* Default Filter is tied to VNIC 0 */ 2552 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2553 int rc; 2554 2555 rc = is_bnxt_in_error(bp); 2556 if (rc) 2557 return rc; 2558 2559 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2560 return -EPERM; 2561 2562 if (rte_is_zero_ether_addr(addr)) 2563 return -EINVAL; 2564 2565 /* Filter settings will get applied when port is started */ 2566 if (!dev->data->dev_started) 2567 return 0; 2568 2569 /* Check if the requested MAC is already added */ 2570 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2571 return 0; 2572 2573 /* Destroy filter and re-create it */ 2574 bnxt_del_dflt_mac_filter(bp, vnic); 2575 2576 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2577 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 2578 /* This filter will allow only untagged packets */ 2579 rc = bnxt_add_vlan_filter(bp, 0); 2580 } else { 2581 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2582 } 2583 2584 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2585 return rc; 2586 } 2587 2588 static int 2589 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2590 struct rte_ether_addr *mc_addr_set, 2591 uint32_t nb_mc_addr) 2592 { 2593 struct bnxt *bp = eth_dev->data->dev_private; 2594 char *mc_addr_list = (char *)mc_addr_set; 2595 struct bnxt_vnic_info *vnic; 2596 uint32_t off = 0, i = 0; 2597 int rc; 2598 2599 rc = is_bnxt_in_error(bp); 2600 if (rc) 2601 return rc; 2602 2603 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2604 2605 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2606 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2607 goto allmulti; 2608 } 2609 2610 /* TODO Check for Duplicate mcast addresses */ 2611 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2612 for (i = 0; i < nb_mc_addr; i++) { 2613 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2614 RTE_ETHER_ADDR_LEN); 2615 off += RTE_ETHER_ADDR_LEN; 2616 } 2617 2618 vnic->mc_addr_cnt = i; 2619 if (vnic->mc_addr_cnt) 2620 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2621 else 2622 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2623 2624 allmulti: 2625 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2626 } 2627 2628 static int 2629 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2630 { 2631 struct bnxt *bp = dev->data->dev_private; 2632 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2633 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2634 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2635 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2636 int ret; 2637 2638 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2639 fw_major, fw_minor, fw_updt, fw_rsvd); 2640 2641 ret += 1; /* add the size of '\0' */ 2642 if (fw_size < (uint32_t)ret) 2643 return ret; 2644 else 2645 return 0; 2646 } 2647 2648 static void 2649 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2650 struct rte_eth_rxq_info *qinfo) 2651 { 2652 struct bnxt *bp = dev->data->dev_private; 2653 struct bnxt_rx_queue *rxq; 2654 2655 if (is_bnxt_in_error(bp)) 2656 return; 2657 2658 rxq = dev->data->rx_queues[queue_id]; 2659 2660 qinfo->mp = rxq->mb_pool; 2661 qinfo->scattered_rx = dev->data->scattered_rx; 2662 qinfo->nb_desc = rxq->nb_rx_desc; 2663 2664 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2665 qinfo->conf.rx_drop_en = rxq->drop_en; 2666 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2667 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2668 } 2669 2670 static void 2671 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2672 struct rte_eth_txq_info *qinfo) 2673 { 2674 struct bnxt *bp = dev->data->dev_private; 2675 struct bnxt_tx_queue *txq; 2676 2677 if (is_bnxt_in_error(bp)) 2678 return; 2679 2680 txq = dev->data->tx_queues[queue_id]; 2681 2682 qinfo->nb_desc = txq->nb_tx_desc; 2683 2684 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2685 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2686 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2687 2688 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2689 qinfo->conf.tx_rs_thresh = 0; 2690 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2691 qinfo->conf.offloads = txq->offloads; 2692 } 2693 2694 static const struct { 2695 eth_rx_burst_t pkt_burst; 2696 const char *info; 2697 } bnxt_rx_burst_info[] = { 2698 {bnxt_recv_pkts, "Scalar"}, 2699 #if defined(RTE_ARCH_X86) 2700 {bnxt_recv_pkts_vec, "Vector SSE"}, 2701 #elif defined(RTE_ARCH_ARM64) 2702 {bnxt_recv_pkts_vec, "Vector Neon"}, 2703 #endif 2704 }; 2705 2706 static int 2707 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2708 struct rte_eth_burst_mode *mode) 2709 { 2710 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2711 size_t i; 2712 2713 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2714 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2715 snprintf(mode->info, sizeof(mode->info), "%s", 2716 bnxt_rx_burst_info[i].info); 2717 return 0; 2718 } 2719 } 2720 2721 return -EINVAL; 2722 } 2723 2724 static const struct { 2725 eth_tx_burst_t pkt_burst; 2726 const char *info; 2727 } bnxt_tx_burst_info[] = { 2728 {bnxt_xmit_pkts, "Scalar"}, 2729 #if defined(RTE_ARCH_X86) 2730 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2731 #elif defined(RTE_ARCH_ARM64) 2732 {bnxt_xmit_pkts_vec, "Vector Neon"}, 2733 #endif 2734 }; 2735 2736 static int 2737 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2738 struct rte_eth_burst_mode *mode) 2739 { 2740 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 2741 size_t i; 2742 2743 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 2744 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 2745 snprintf(mode->info, sizeof(mode->info), "%s", 2746 bnxt_tx_burst_info[i].info); 2747 return 0; 2748 } 2749 } 2750 2751 return -EINVAL; 2752 } 2753 2754 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 2755 { 2756 struct bnxt *bp = eth_dev->data->dev_private; 2757 uint32_t new_pkt_size; 2758 uint32_t rc = 0; 2759 uint32_t i; 2760 2761 rc = is_bnxt_in_error(bp); 2762 if (rc) 2763 return rc; 2764 2765 /* Exit if receive queues are not configured yet */ 2766 if (!eth_dev->data->nb_rx_queues) 2767 return rc; 2768 2769 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 2770 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 2771 2772 /* 2773 * Disallow any MTU change that would require scattered receive support 2774 * if it is not already enabled. 2775 */ 2776 if (eth_dev->data->dev_started && 2777 !eth_dev->data->scattered_rx && 2778 (new_pkt_size > 2779 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 2780 PMD_DRV_LOG(ERR, 2781 "MTU change would require scattered rx support. "); 2782 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 2783 return -EINVAL; 2784 } 2785 2786 if (new_mtu > RTE_ETHER_MTU) { 2787 bp->flags |= BNXT_FLAG_JUMBO; 2788 bp->eth_dev->data->dev_conf.rxmode.offloads |= 2789 DEV_RX_OFFLOAD_JUMBO_FRAME; 2790 } else { 2791 bp->eth_dev->data->dev_conf.rxmode.offloads &= 2792 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2793 bp->flags &= ~BNXT_FLAG_JUMBO; 2794 } 2795 2796 /* Is there a change in mtu setting? */ 2797 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) 2798 return rc; 2799 2800 for (i = 0; i < bp->nr_vnics; i++) { 2801 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2802 uint16_t size = 0; 2803 2804 vnic->mru = BNXT_VNIC_MRU(new_mtu); 2805 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 2806 if (rc) 2807 break; 2808 2809 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 2810 size -= RTE_PKTMBUF_HEADROOM; 2811 2812 if (size < new_mtu) { 2813 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 2814 if (rc) 2815 return rc; 2816 } 2817 } 2818 2819 if (!rc) 2820 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 2821 2822 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 2823 2824 return rc; 2825 } 2826 2827 static int 2828 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 2829 { 2830 struct bnxt *bp = dev->data->dev_private; 2831 uint16_t vlan = bp->vlan; 2832 int rc; 2833 2834 rc = is_bnxt_in_error(bp); 2835 if (rc) 2836 return rc; 2837 2838 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2839 PMD_DRV_LOG(ERR, 2840 "PVID cannot be modified for this function\n"); 2841 return -ENOTSUP; 2842 } 2843 bp->vlan = on ? pvid : 0; 2844 2845 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 2846 if (rc) 2847 bp->vlan = vlan; 2848 return rc; 2849 } 2850 2851 static int 2852 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 2853 { 2854 struct bnxt *bp = dev->data->dev_private; 2855 int rc; 2856 2857 rc = is_bnxt_in_error(bp); 2858 if (rc) 2859 return rc; 2860 2861 return bnxt_hwrm_port_led_cfg(bp, true); 2862 } 2863 2864 static int 2865 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 2866 { 2867 struct bnxt *bp = dev->data->dev_private; 2868 int rc; 2869 2870 rc = is_bnxt_in_error(bp); 2871 if (rc) 2872 return rc; 2873 2874 return bnxt_hwrm_port_led_cfg(bp, false); 2875 } 2876 2877 static uint32_t 2878 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 2879 { 2880 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2881 uint32_t desc = 0, raw_cons = 0, cons; 2882 struct bnxt_cp_ring_info *cpr; 2883 struct bnxt_rx_queue *rxq; 2884 struct rx_pkt_cmpl *rxcmp; 2885 int rc; 2886 2887 rc = is_bnxt_in_error(bp); 2888 if (rc) 2889 return rc; 2890 2891 rxq = dev->data->rx_queues[rx_queue_id]; 2892 cpr = rxq->cp_ring; 2893 raw_cons = cpr->cp_raw_cons; 2894 2895 while (1) { 2896 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 2897 rte_prefetch0(&cpr->cp_desc_ring[cons]); 2898 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2899 2900 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) { 2901 break; 2902 } else { 2903 raw_cons++; 2904 desc++; 2905 } 2906 } 2907 2908 return desc; 2909 } 2910 2911 static int 2912 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 2913 { 2914 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 2915 struct bnxt_rx_ring_info *rxr; 2916 struct bnxt_cp_ring_info *cpr; 2917 struct rte_mbuf *rx_buf; 2918 struct rx_pkt_cmpl *rxcmp; 2919 uint32_t cons, cp_cons; 2920 int rc; 2921 2922 if (!rxq) 2923 return -EINVAL; 2924 2925 rc = is_bnxt_in_error(rxq->bp); 2926 if (rc) 2927 return rc; 2928 2929 cpr = rxq->cp_ring; 2930 rxr = rxq->rx_ring; 2931 2932 if (offset >= rxq->nb_rx_desc) 2933 return -EINVAL; 2934 2935 cons = RING_CMP(cpr->cp_ring_struct, offset); 2936 cp_cons = cpr->cp_raw_cons; 2937 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2938 2939 if (cons > cp_cons) { 2940 if (CMPL_VALID(rxcmp, cpr->valid)) 2941 return RTE_ETH_RX_DESC_DONE; 2942 } else { 2943 if (CMPL_VALID(rxcmp, !cpr->valid)) 2944 return RTE_ETH_RX_DESC_DONE; 2945 } 2946 rx_buf = rxr->rx_buf_ring[cons]; 2947 if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf) 2948 return RTE_ETH_RX_DESC_UNAVAIL; 2949 2950 2951 return RTE_ETH_RX_DESC_AVAIL; 2952 } 2953 2954 static int 2955 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 2956 { 2957 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 2958 struct bnxt_tx_ring_info *txr; 2959 struct bnxt_cp_ring_info *cpr; 2960 struct bnxt_sw_tx_bd *tx_buf; 2961 struct tx_pkt_cmpl *txcmp; 2962 uint32_t cons, cp_cons; 2963 int rc; 2964 2965 if (!txq) 2966 return -EINVAL; 2967 2968 rc = is_bnxt_in_error(txq->bp); 2969 if (rc) 2970 return rc; 2971 2972 cpr = txq->cp_ring; 2973 txr = txq->tx_ring; 2974 2975 if (offset >= txq->nb_tx_desc) 2976 return -EINVAL; 2977 2978 cons = RING_CMP(cpr->cp_ring_struct, offset); 2979 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2980 cp_cons = cpr->cp_raw_cons; 2981 2982 if (cons > cp_cons) { 2983 if (CMPL_VALID(txcmp, cpr->valid)) 2984 return RTE_ETH_TX_DESC_UNAVAIL; 2985 } else { 2986 if (CMPL_VALID(txcmp, !cpr->valid)) 2987 return RTE_ETH_TX_DESC_UNAVAIL; 2988 } 2989 tx_buf = &txr->tx_buf_ring[cons]; 2990 if (tx_buf->mbuf == NULL) 2991 return RTE_ETH_TX_DESC_DONE; 2992 2993 return RTE_ETH_TX_DESC_FULL; 2994 } 2995 2996 static struct bnxt_filter_info * 2997 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 2998 struct rte_eth_ethertype_filter *efilter, 2999 struct bnxt_vnic_info *vnic0, 3000 struct bnxt_vnic_info *vnic, 3001 int *ret) 3002 { 3003 struct bnxt_filter_info *mfilter = NULL; 3004 int match = 0; 3005 *ret = 0; 3006 3007 if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 || 3008 efilter->ether_type == RTE_ETHER_TYPE_IPV6) { 3009 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" 3010 " ethertype filter.", efilter->ether_type); 3011 *ret = -EINVAL; 3012 goto exit; 3013 } 3014 if (efilter->queue >= bp->rx_nr_rings) { 3015 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 3016 *ret = -EINVAL; 3017 goto exit; 3018 } 3019 3020 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3021 vnic = &bp->vnic_info[efilter->queue]; 3022 if (vnic == NULL) { 3023 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 3024 *ret = -EINVAL; 3025 goto exit; 3026 } 3027 3028 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 3029 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 3030 if ((!memcmp(efilter->mac_addr.addr_bytes, 3031 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 3032 mfilter->flags == 3033 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 3034 mfilter->ethertype == efilter->ether_type)) { 3035 match = 1; 3036 break; 3037 } 3038 } 3039 } else { 3040 STAILQ_FOREACH(mfilter, &vnic->filter, next) 3041 if ((!memcmp(efilter->mac_addr.addr_bytes, 3042 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 3043 mfilter->ethertype == efilter->ether_type && 3044 mfilter->flags == 3045 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 3046 match = 1; 3047 break; 3048 } 3049 } 3050 3051 if (match) 3052 *ret = -EEXIST; 3053 3054 exit: 3055 return mfilter; 3056 } 3057 3058 static int 3059 bnxt_ethertype_filter(struct rte_eth_dev *dev, 3060 enum rte_filter_op filter_op, 3061 void *arg) 3062 { 3063 struct bnxt *bp = dev->data->dev_private; 3064 struct rte_eth_ethertype_filter *efilter = 3065 (struct rte_eth_ethertype_filter *)arg; 3066 struct bnxt_filter_info *bfilter, *filter1; 3067 struct bnxt_vnic_info *vnic, *vnic0; 3068 int ret; 3069 3070 if (filter_op == RTE_ETH_FILTER_NOP) 3071 return 0; 3072 3073 if (arg == NULL) { 3074 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 3075 filter_op); 3076 return -EINVAL; 3077 } 3078 3079 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3080 vnic = &bp->vnic_info[efilter->queue]; 3081 3082 switch (filter_op) { 3083 case RTE_ETH_FILTER_ADD: 3084 bnxt_match_and_validate_ether_filter(bp, efilter, 3085 vnic0, vnic, &ret); 3086 if (ret < 0) 3087 return ret; 3088 3089 bfilter = bnxt_get_unused_filter(bp); 3090 if (bfilter == NULL) { 3091 PMD_DRV_LOG(ERR, 3092 "Not enough resources for a new filter.\n"); 3093 return -ENOMEM; 3094 } 3095 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 3096 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 3097 RTE_ETHER_ADDR_LEN); 3098 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 3099 RTE_ETHER_ADDR_LEN); 3100 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 3101 bfilter->ethertype = efilter->ether_type; 3102 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3103 3104 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 3105 if (filter1 == NULL) { 3106 ret = -EINVAL; 3107 goto cleanup; 3108 } 3109 bfilter->enables |= 3110 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 3111 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 3112 3113 bfilter->dst_id = vnic->fw_vnic_id; 3114 3115 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 3116 bfilter->flags = 3117 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 3118 } 3119 3120 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 3121 if (ret) 3122 goto cleanup; 3123 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 3124 break; 3125 case RTE_ETH_FILTER_DELETE: 3126 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 3127 vnic0, vnic, &ret); 3128 if (ret == -EEXIST) { 3129 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 3130 3131 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 3132 next); 3133 bnxt_free_filter(bp, filter1); 3134 } else if (ret == 0) { 3135 PMD_DRV_LOG(ERR, "No matching filter found\n"); 3136 } 3137 break; 3138 default: 3139 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 3140 ret = -EINVAL; 3141 goto error; 3142 } 3143 return ret; 3144 cleanup: 3145 bnxt_free_filter(bp, bfilter); 3146 error: 3147 return ret; 3148 } 3149 3150 static inline int 3151 parse_ntuple_filter(struct bnxt *bp, 3152 struct rte_eth_ntuple_filter *nfilter, 3153 struct bnxt_filter_info *bfilter) 3154 { 3155 uint32_t en = 0; 3156 3157 if (nfilter->queue >= bp->rx_nr_rings) { 3158 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); 3159 return -EINVAL; 3160 } 3161 3162 switch (nfilter->dst_port_mask) { 3163 case UINT16_MAX: 3164 bfilter->dst_port_mask = -1; 3165 bfilter->dst_port = nfilter->dst_port; 3166 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 3167 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3168 break; 3169 default: 3170 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3171 return -EINVAL; 3172 } 3173 3174 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3175 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3176 3177 switch (nfilter->proto_mask) { 3178 case UINT8_MAX: 3179 if (nfilter->proto == 17) /* IPPROTO_UDP */ 3180 bfilter->ip_protocol = 17; 3181 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 3182 bfilter->ip_protocol = 6; 3183 else 3184 return -EINVAL; 3185 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3186 break; 3187 default: 3188 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3189 return -EINVAL; 3190 } 3191 3192 switch (nfilter->dst_ip_mask) { 3193 case UINT32_MAX: 3194 bfilter->dst_ipaddr_mask[0] = -1; 3195 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 3196 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 3197 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3198 break; 3199 default: 3200 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 3201 return -EINVAL; 3202 } 3203 3204 switch (nfilter->src_ip_mask) { 3205 case UINT32_MAX: 3206 bfilter->src_ipaddr_mask[0] = -1; 3207 bfilter->src_ipaddr[0] = nfilter->src_ip; 3208 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 3209 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3210 break; 3211 default: 3212 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 3213 return -EINVAL; 3214 } 3215 3216 switch (nfilter->src_port_mask) { 3217 case UINT16_MAX: 3218 bfilter->src_port_mask = -1; 3219 bfilter->src_port = nfilter->src_port; 3220 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 3221 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3222 break; 3223 default: 3224 PMD_DRV_LOG(ERR, "invalid src_port mask."); 3225 return -EINVAL; 3226 } 3227 3228 bfilter->enables = en; 3229 return 0; 3230 } 3231 3232 static struct bnxt_filter_info* 3233 bnxt_match_ntuple_filter(struct bnxt *bp, 3234 struct bnxt_filter_info *bfilter, 3235 struct bnxt_vnic_info **mvnic) 3236 { 3237 struct bnxt_filter_info *mfilter = NULL; 3238 int i; 3239 3240 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3241 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3242 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 3243 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 3244 bfilter->src_ipaddr_mask[0] == 3245 mfilter->src_ipaddr_mask[0] && 3246 bfilter->src_port == mfilter->src_port && 3247 bfilter->src_port_mask == mfilter->src_port_mask && 3248 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 3249 bfilter->dst_ipaddr_mask[0] == 3250 mfilter->dst_ipaddr_mask[0] && 3251 bfilter->dst_port == mfilter->dst_port && 3252 bfilter->dst_port_mask == mfilter->dst_port_mask && 3253 bfilter->flags == mfilter->flags && 3254 bfilter->enables == mfilter->enables) { 3255 if (mvnic) 3256 *mvnic = vnic; 3257 return mfilter; 3258 } 3259 } 3260 } 3261 return NULL; 3262 } 3263 3264 static int 3265 bnxt_cfg_ntuple_filter(struct bnxt *bp, 3266 struct rte_eth_ntuple_filter *nfilter, 3267 enum rte_filter_op filter_op) 3268 { 3269 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 3270 struct bnxt_vnic_info *vnic, *vnic0, *mvnic; 3271 int ret; 3272 3273 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 3274 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 3275 return -EINVAL; 3276 } 3277 3278 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 3279 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); 3280 return -EINVAL; 3281 } 3282 3283 bfilter = bnxt_get_unused_filter(bp); 3284 if (bfilter == NULL) { 3285 PMD_DRV_LOG(ERR, 3286 "Not enough resources for a new filter.\n"); 3287 return -ENOMEM; 3288 } 3289 ret = parse_ntuple_filter(bp, nfilter, bfilter); 3290 if (ret < 0) 3291 goto free_filter; 3292 3293 vnic = &bp->vnic_info[nfilter->queue]; 3294 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3295 filter1 = STAILQ_FIRST(&vnic0->filter); 3296 if (filter1 == NULL) { 3297 ret = -EINVAL; 3298 goto free_filter; 3299 } 3300 3301 bfilter->dst_id = vnic->fw_vnic_id; 3302 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 3303 bfilter->enables |= 3304 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 3305 bfilter->ethertype = 0x800; 3306 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3307 3308 mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); 3309 3310 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 3311 bfilter->dst_id == mfilter->dst_id) { 3312 PMD_DRV_LOG(ERR, "filter exists.\n"); 3313 ret = -EEXIST; 3314 goto free_filter; 3315 } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 3316 bfilter->dst_id != mfilter->dst_id) { 3317 mfilter->dst_id = vnic->fw_vnic_id; 3318 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); 3319 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); 3320 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); 3321 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); 3322 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); 3323 goto free_filter; 3324 } 3325 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 3326 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3327 ret = -ENOENT; 3328 goto free_filter; 3329 } 3330 3331 if (filter_op == RTE_ETH_FILTER_ADD) { 3332 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 3333 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 3334 if (ret) 3335 goto free_filter; 3336 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 3337 } else { 3338 if (mfilter == NULL) { 3339 /* This should not happen. But for Coverity! */ 3340 ret = -ENOENT; 3341 goto free_filter; 3342 } 3343 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 3344 3345 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); 3346 bnxt_free_filter(bp, mfilter); 3347 bnxt_free_filter(bp, bfilter); 3348 } 3349 3350 return 0; 3351 free_filter: 3352 bnxt_free_filter(bp, bfilter); 3353 return ret; 3354 } 3355 3356 static int 3357 bnxt_ntuple_filter(struct rte_eth_dev *dev, 3358 enum rte_filter_op filter_op, 3359 void *arg) 3360 { 3361 struct bnxt *bp = dev->data->dev_private; 3362 int ret; 3363 3364 if (filter_op == RTE_ETH_FILTER_NOP) 3365 return 0; 3366 3367 if (arg == NULL) { 3368 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 3369 filter_op); 3370 return -EINVAL; 3371 } 3372 3373 switch (filter_op) { 3374 case RTE_ETH_FILTER_ADD: 3375 ret = bnxt_cfg_ntuple_filter(bp, 3376 (struct rte_eth_ntuple_filter *)arg, 3377 filter_op); 3378 break; 3379 case RTE_ETH_FILTER_DELETE: 3380 ret = bnxt_cfg_ntuple_filter(bp, 3381 (struct rte_eth_ntuple_filter *)arg, 3382 filter_op); 3383 break; 3384 default: 3385 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 3386 ret = -EINVAL; 3387 break; 3388 } 3389 return ret; 3390 } 3391 3392 static int 3393 bnxt_parse_fdir_filter(struct bnxt *bp, 3394 struct rte_eth_fdir_filter *fdir, 3395 struct bnxt_filter_info *filter) 3396 { 3397 enum rte_fdir_mode fdir_mode = 3398 bp->eth_dev->data->dev_conf.fdir_conf.mode; 3399 struct bnxt_vnic_info *vnic0, *vnic; 3400 struct bnxt_filter_info *filter1; 3401 uint32_t en = 0; 3402 int i; 3403 3404 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3405 return -EINVAL; 3406 3407 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 3408 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 3409 3410 switch (fdir->input.flow_type) { 3411 case RTE_ETH_FLOW_IPV4: 3412 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 3413 /* FALLTHROUGH */ 3414 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 3415 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3416 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 3417 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3418 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 3419 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3420 filter->ip_addr_type = 3421 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3422 filter->src_ipaddr_mask[0] = 0xffffffff; 3423 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3424 filter->dst_ipaddr_mask[0] = 0xffffffff; 3425 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3426 filter->ethertype = 0x800; 3427 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3428 break; 3429 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 3430 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 3431 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3432 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 3433 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3434 filter->dst_port_mask = 0xffff; 3435 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3436 filter->src_port_mask = 0xffff; 3437 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3438 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 3439 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3440 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 3441 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3442 filter->ip_protocol = 6; 3443 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3444 filter->ip_addr_type = 3445 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3446 filter->src_ipaddr_mask[0] = 0xffffffff; 3447 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3448 filter->dst_ipaddr_mask[0] = 0xffffffff; 3449 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3450 filter->ethertype = 0x800; 3451 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3452 break; 3453 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 3454 filter->src_port = fdir->input.flow.udp4_flow.src_port; 3455 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3456 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 3457 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3458 filter->dst_port_mask = 0xffff; 3459 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3460 filter->src_port_mask = 0xffff; 3461 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3462 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 3463 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3464 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 3465 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3466 filter->ip_protocol = 17; 3467 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3468 filter->ip_addr_type = 3469 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3470 filter->src_ipaddr_mask[0] = 0xffffffff; 3471 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3472 filter->dst_ipaddr_mask[0] = 0xffffffff; 3473 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3474 filter->ethertype = 0x800; 3475 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3476 break; 3477 case RTE_ETH_FLOW_IPV6: 3478 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 3479 /* FALLTHROUGH */ 3480 filter->ip_addr_type = 3481 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3482 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 3483 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3484 rte_memcpy(filter->src_ipaddr, 3485 fdir->input.flow.ipv6_flow.src_ip, 16); 3486 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3487 rte_memcpy(filter->dst_ipaddr, 3488 fdir->input.flow.ipv6_flow.dst_ip, 16); 3489 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3490 memset(filter->dst_ipaddr_mask, 0xff, 16); 3491 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3492 memset(filter->src_ipaddr_mask, 0xff, 16); 3493 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3494 filter->ethertype = 0x86dd; 3495 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3496 break; 3497 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 3498 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 3499 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3500 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 3501 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3502 filter->dst_port_mask = 0xffff; 3503 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3504 filter->src_port_mask = 0xffff; 3505 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3506 filter->ip_addr_type = 3507 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3508 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 3509 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3510 rte_memcpy(filter->src_ipaddr, 3511 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 3512 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3513 rte_memcpy(filter->dst_ipaddr, 3514 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 3515 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3516 memset(filter->dst_ipaddr_mask, 0xff, 16); 3517 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3518 memset(filter->src_ipaddr_mask, 0xff, 16); 3519 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3520 filter->ethertype = 0x86dd; 3521 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3522 break; 3523 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 3524 filter->src_port = fdir->input.flow.udp6_flow.src_port; 3525 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3526 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 3527 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3528 filter->dst_port_mask = 0xffff; 3529 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3530 filter->src_port_mask = 0xffff; 3531 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3532 filter->ip_addr_type = 3533 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3534 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 3535 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3536 rte_memcpy(filter->src_ipaddr, 3537 fdir->input.flow.udp6_flow.ip.src_ip, 16); 3538 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3539 rte_memcpy(filter->dst_ipaddr, 3540 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 3541 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3542 memset(filter->dst_ipaddr_mask, 0xff, 16); 3543 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3544 memset(filter->src_ipaddr_mask, 0xff, 16); 3545 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3546 filter->ethertype = 0x86dd; 3547 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3548 break; 3549 case RTE_ETH_FLOW_L2_PAYLOAD: 3550 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 3551 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3552 break; 3553 case RTE_ETH_FLOW_VXLAN: 3554 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3555 return -EINVAL; 3556 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 3557 filter->tunnel_type = 3558 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 3559 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 3560 break; 3561 case RTE_ETH_FLOW_NVGRE: 3562 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3563 return -EINVAL; 3564 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 3565 filter->tunnel_type = 3566 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 3567 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 3568 break; 3569 case RTE_ETH_FLOW_UNKNOWN: 3570 case RTE_ETH_FLOW_RAW: 3571 case RTE_ETH_FLOW_FRAG_IPV4: 3572 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 3573 case RTE_ETH_FLOW_FRAG_IPV6: 3574 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 3575 case RTE_ETH_FLOW_IPV6_EX: 3576 case RTE_ETH_FLOW_IPV6_TCP_EX: 3577 case RTE_ETH_FLOW_IPV6_UDP_EX: 3578 case RTE_ETH_FLOW_GENEVE: 3579 /* FALLTHROUGH */ 3580 default: 3581 return -EINVAL; 3582 } 3583 3584 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3585 vnic = &bp->vnic_info[fdir->action.rx_queue]; 3586 if (vnic == NULL) { 3587 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); 3588 return -EINVAL; 3589 } 3590 3591 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3592 rte_memcpy(filter->dst_macaddr, 3593 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 3594 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 3595 } 3596 3597 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 3598 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 3599 filter1 = STAILQ_FIRST(&vnic0->filter); 3600 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 3601 } else { 3602 filter->dst_id = vnic->fw_vnic_id; 3603 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3604 if (filter->dst_macaddr[i] == 0x00) 3605 filter1 = STAILQ_FIRST(&vnic0->filter); 3606 else 3607 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 3608 } 3609 3610 if (filter1 == NULL) 3611 return -EINVAL; 3612 3613 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 3614 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 3615 3616 filter->enables = en; 3617 3618 return 0; 3619 } 3620 3621 static struct bnxt_filter_info * 3622 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, 3623 struct bnxt_vnic_info **mvnic) 3624 { 3625 struct bnxt_filter_info *mf = NULL; 3626 int i; 3627 3628 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3629 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3630 3631 STAILQ_FOREACH(mf, &vnic->filter, next) { 3632 if (mf->filter_type == nf->filter_type && 3633 mf->flags == nf->flags && 3634 mf->src_port == nf->src_port && 3635 mf->src_port_mask == nf->src_port_mask && 3636 mf->dst_port == nf->dst_port && 3637 mf->dst_port_mask == nf->dst_port_mask && 3638 mf->ip_protocol == nf->ip_protocol && 3639 mf->ip_addr_type == nf->ip_addr_type && 3640 mf->ethertype == nf->ethertype && 3641 mf->vni == nf->vni && 3642 mf->tunnel_type == nf->tunnel_type && 3643 mf->l2_ovlan == nf->l2_ovlan && 3644 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 3645 mf->l2_ivlan == nf->l2_ivlan && 3646 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 3647 !memcmp(mf->l2_addr, nf->l2_addr, 3648 RTE_ETHER_ADDR_LEN) && 3649 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 3650 RTE_ETHER_ADDR_LEN) && 3651 !memcmp(mf->src_macaddr, nf->src_macaddr, 3652 RTE_ETHER_ADDR_LEN) && 3653 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 3654 RTE_ETHER_ADDR_LEN) && 3655 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 3656 sizeof(nf->src_ipaddr)) && 3657 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 3658 sizeof(nf->src_ipaddr_mask)) && 3659 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 3660 sizeof(nf->dst_ipaddr)) && 3661 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 3662 sizeof(nf->dst_ipaddr_mask))) { 3663 if (mvnic) 3664 *mvnic = vnic; 3665 return mf; 3666 } 3667 } 3668 } 3669 return NULL; 3670 } 3671 3672 static int 3673 bnxt_fdir_filter(struct rte_eth_dev *dev, 3674 enum rte_filter_op filter_op, 3675 void *arg) 3676 { 3677 struct bnxt *bp = dev->data->dev_private; 3678 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 3679 struct bnxt_filter_info *filter, *match; 3680 struct bnxt_vnic_info *vnic, *mvnic; 3681 int ret = 0, i; 3682 3683 if (filter_op == RTE_ETH_FILTER_NOP) 3684 return 0; 3685 3686 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 3687 return -EINVAL; 3688 3689 switch (filter_op) { 3690 case RTE_ETH_FILTER_ADD: 3691 case RTE_ETH_FILTER_DELETE: 3692 /* FALLTHROUGH */ 3693 filter = bnxt_get_unused_filter(bp); 3694 if (filter == NULL) { 3695 PMD_DRV_LOG(ERR, 3696 "Not enough resources for a new flow.\n"); 3697 return -ENOMEM; 3698 } 3699 3700 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 3701 if (ret != 0) 3702 goto free_filter; 3703 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 3704 3705 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3706 vnic = &bp->vnic_info[0]; 3707 else 3708 vnic = &bp->vnic_info[fdir->action.rx_queue]; 3709 3710 match = bnxt_match_fdir(bp, filter, &mvnic); 3711 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 3712 if (match->dst_id == vnic->fw_vnic_id) { 3713 PMD_DRV_LOG(ERR, "Flow already exists.\n"); 3714 ret = -EEXIST; 3715 goto free_filter; 3716 } else { 3717 match->dst_id = vnic->fw_vnic_id; 3718 ret = bnxt_hwrm_set_ntuple_filter(bp, 3719 match->dst_id, 3720 match); 3721 STAILQ_REMOVE(&mvnic->filter, match, 3722 bnxt_filter_info, next); 3723 STAILQ_INSERT_TAIL(&vnic->filter, match, next); 3724 PMD_DRV_LOG(ERR, 3725 "Filter with matching pattern exist\n"); 3726 PMD_DRV_LOG(ERR, 3727 "Updated it to new destination q\n"); 3728 goto free_filter; 3729 } 3730 } 3731 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 3732 PMD_DRV_LOG(ERR, "Flow does not exist.\n"); 3733 ret = -ENOENT; 3734 goto free_filter; 3735 } 3736 3737 if (filter_op == RTE_ETH_FILTER_ADD) { 3738 ret = bnxt_hwrm_set_ntuple_filter(bp, 3739 filter->dst_id, 3740 filter); 3741 if (ret) 3742 goto free_filter; 3743 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 3744 } else { 3745 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 3746 STAILQ_REMOVE(&vnic->filter, match, 3747 bnxt_filter_info, next); 3748 bnxt_free_filter(bp, match); 3749 bnxt_free_filter(bp, filter); 3750 } 3751 break; 3752 case RTE_ETH_FILTER_FLUSH: 3753 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3754 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3755 3756 STAILQ_FOREACH(filter, &vnic->filter, next) { 3757 if (filter->filter_type == 3758 HWRM_CFA_NTUPLE_FILTER) { 3759 ret = 3760 bnxt_hwrm_clear_ntuple_filter(bp, 3761 filter); 3762 STAILQ_REMOVE(&vnic->filter, filter, 3763 bnxt_filter_info, next); 3764 } 3765 } 3766 } 3767 return ret; 3768 case RTE_ETH_FILTER_UPDATE: 3769 case RTE_ETH_FILTER_STATS: 3770 case RTE_ETH_FILTER_INFO: 3771 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); 3772 break; 3773 default: 3774 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); 3775 ret = -EINVAL; 3776 break; 3777 } 3778 return ret; 3779 3780 free_filter: 3781 bnxt_free_filter(bp, filter); 3782 return ret; 3783 } 3784 3785 int 3786 bnxt_filter_ctrl_op(struct rte_eth_dev *dev, 3787 enum rte_filter_type filter_type, 3788 enum rte_filter_op filter_op, void *arg) 3789 { 3790 struct bnxt *bp = dev->data->dev_private; 3791 int ret = 0; 3792 3793 if (!bp) 3794 return -EIO; 3795 3796 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3797 struct bnxt_representor *vfr = dev->data->dev_private; 3798 bp = vfr->parent_dev->data->dev_private; 3799 /* parent is deleted while children are still valid */ 3800 if (!bp) { 3801 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n", 3802 dev->data->port_id, 3803 filter_type, 3804 filter_op); 3805 return -EIO; 3806 } 3807 } 3808 3809 ret = is_bnxt_in_error(bp); 3810 if (ret) 3811 return ret; 3812 3813 switch (filter_type) { 3814 case RTE_ETH_FILTER_TUNNEL: 3815 PMD_DRV_LOG(ERR, 3816 "filter type: %d: To be implemented\n", filter_type); 3817 break; 3818 case RTE_ETH_FILTER_FDIR: 3819 ret = bnxt_fdir_filter(dev, filter_op, arg); 3820 break; 3821 case RTE_ETH_FILTER_NTUPLE: 3822 ret = bnxt_ntuple_filter(dev, filter_op, arg); 3823 break; 3824 case RTE_ETH_FILTER_ETHERTYPE: 3825 ret = bnxt_ethertype_filter(dev, filter_op, arg); 3826 break; 3827 case RTE_ETH_FILTER_GENERIC: 3828 if (filter_op != RTE_ETH_FILTER_GET) 3829 return -EINVAL; 3830 if (BNXT_TRUFLOW_EN(bp)) 3831 *(const void **)arg = &bnxt_ulp_rte_flow_ops; 3832 else 3833 *(const void **)arg = &bnxt_flow_ops; 3834 break; 3835 default: 3836 PMD_DRV_LOG(ERR, 3837 "Filter type (%d) not supported", filter_type); 3838 ret = -EINVAL; 3839 break; 3840 } 3841 return ret; 3842 } 3843 3844 static const uint32_t * 3845 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3846 { 3847 static const uint32_t ptypes[] = { 3848 RTE_PTYPE_L2_ETHER_VLAN, 3849 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3850 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3851 RTE_PTYPE_L4_ICMP, 3852 RTE_PTYPE_L4_TCP, 3853 RTE_PTYPE_L4_UDP, 3854 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3855 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3856 RTE_PTYPE_INNER_L4_ICMP, 3857 RTE_PTYPE_INNER_L4_TCP, 3858 RTE_PTYPE_INNER_L4_UDP, 3859 RTE_PTYPE_UNKNOWN 3860 }; 3861 3862 if (!dev->rx_pkt_burst) 3863 return NULL; 3864 3865 return ptypes; 3866 } 3867 3868 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3869 int reg_win) 3870 { 3871 uint32_t reg_base = *reg_arr & 0xfffff000; 3872 uint32_t win_off; 3873 int i; 3874 3875 for (i = 0; i < count; i++) { 3876 if ((reg_arr[i] & 0xfffff000) != reg_base) 3877 return -ERANGE; 3878 } 3879 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3880 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3881 return 0; 3882 } 3883 3884 static int bnxt_map_ptp_regs(struct bnxt *bp) 3885 { 3886 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3887 uint32_t *reg_arr; 3888 int rc, i; 3889 3890 reg_arr = ptp->rx_regs; 3891 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3892 if (rc) 3893 return rc; 3894 3895 reg_arr = ptp->tx_regs; 3896 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3897 if (rc) 3898 return rc; 3899 3900 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3901 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3902 3903 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3904 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3905 3906 return 0; 3907 } 3908 3909 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3910 { 3911 rte_write32(0, (uint8_t *)bp->bar0 + 3912 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3913 rte_write32(0, (uint8_t *)bp->bar0 + 3914 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3915 } 3916 3917 static uint64_t bnxt_cc_read(struct bnxt *bp) 3918 { 3919 uint64_t ns; 3920 3921 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3922 BNXT_GRCPF_REG_SYNC_TIME)); 3923 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3924 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3925 return ns; 3926 } 3927 3928 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3929 { 3930 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3931 uint32_t fifo; 3932 3933 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3934 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3935 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3936 return -EAGAIN; 3937 3938 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3939 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3940 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3941 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3942 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3943 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3944 3945 return 0; 3946 } 3947 3948 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3949 { 3950 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3951 struct bnxt_pf_info *pf = bp->pf; 3952 uint16_t port_id; 3953 uint32_t fifo; 3954 3955 if (!ptp) 3956 return -ENODEV; 3957 3958 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3959 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3960 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3961 return -EAGAIN; 3962 3963 port_id = pf->port_id; 3964 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3965 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3966 3967 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3968 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3969 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 3970 /* bnxt_clr_rx_ts(bp); TBD */ 3971 return -EBUSY; 3972 } 3973 3974 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3975 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3976 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3977 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3978 3979 return 0; 3980 } 3981 3982 static int 3983 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3984 { 3985 uint64_t ns; 3986 struct bnxt *bp = dev->data->dev_private; 3987 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3988 3989 if (!ptp) 3990 return 0; 3991 3992 ns = rte_timespec_to_ns(ts); 3993 /* Set the timecounters to a new value. */ 3994 ptp->tc.nsec = ns; 3995 3996 return 0; 3997 } 3998 3999 static int 4000 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 4001 { 4002 struct bnxt *bp = dev->data->dev_private; 4003 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4004 uint64_t ns, systime_cycles = 0; 4005 int rc = 0; 4006 4007 if (!ptp) 4008 return 0; 4009 4010 if (BNXT_CHIP_THOR(bp)) 4011 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 4012 &systime_cycles); 4013 else 4014 systime_cycles = bnxt_cc_read(bp); 4015 4016 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 4017 *ts = rte_ns_to_timespec(ns); 4018 4019 return rc; 4020 } 4021 static int 4022 bnxt_timesync_enable(struct rte_eth_dev *dev) 4023 { 4024 struct bnxt *bp = dev->data->dev_private; 4025 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4026 uint32_t shift = 0; 4027 int rc; 4028 4029 if (!ptp) 4030 return 0; 4031 4032 ptp->rx_filter = 1; 4033 ptp->tx_tstamp_en = 1; 4034 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 4035 4036 rc = bnxt_hwrm_ptp_cfg(bp); 4037 if (rc) 4038 return rc; 4039 4040 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 4041 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4042 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4043 4044 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 4045 ptp->tc.cc_shift = shift; 4046 ptp->tc.nsec_mask = (1ULL << shift) - 1; 4047 4048 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 4049 ptp->rx_tstamp_tc.cc_shift = shift; 4050 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4051 4052 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 4053 ptp->tx_tstamp_tc.cc_shift = shift; 4054 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4055 4056 if (!BNXT_CHIP_THOR(bp)) 4057 bnxt_map_ptp_regs(bp); 4058 4059 return 0; 4060 } 4061 4062 static int 4063 bnxt_timesync_disable(struct rte_eth_dev *dev) 4064 { 4065 struct bnxt *bp = dev->data->dev_private; 4066 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4067 4068 if (!ptp) 4069 return 0; 4070 4071 ptp->rx_filter = 0; 4072 ptp->tx_tstamp_en = 0; 4073 ptp->rxctl = 0; 4074 4075 bnxt_hwrm_ptp_cfg(bp); 4076 4077 if (!BNXT_CHIP_THOR(bp)) 4078 bnxt_unmap_ptp_regs(bp); 4079 4080 return 0; 4081 } 4082 4083 static int 4084 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 4085 struct timespec *timestamp, 4086 uint32_t flags __rte_unused) 4087 { 4088 struct bnxt *bp = dev->data->dev_private; 4089 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4090 uint64_t rx_tstamp_cycles = 0; 4091 uint64_t ns; 4092 4093 if (!ptp) 4094 return 0; 4095 4096 if (BNXT_CHIP_THOR(bp)) 4097 rx_tstamp_cycles = ptp->rx_timestamp; 4098 else 4099 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 4100 4101 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 4102 *timestamp = rte_ns_to_timespec(ns); 4103 return 0; 4104 } 4105 4106 static int 4107 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 4108 struct timespec *timestamp) 4109 { 4110 struct bnxt *bp = dev->data->dev_private; 4111 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4112 uint64_t tx_tstamp_cycles = 0; 4113 uint64_t ns; 4114 int rc = 0; 4115 4116 if (!ptp) 4117 return 0; 4118 4119 if (BNXT_CHIP_THOR(bp)) 4120 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 4121 &tx_tstamp_cycles); 4122 else 4123 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 4124 4125 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 4126 *timestamp = rte_ns_to_timespec(ns); 4127 4128 return rc; 4129 } 4130 4131 static int 4132 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 4133 { 4134 struct bnxt *bp = dev->data->dev_private; 4135 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4136 4137 if (!ptp) 4138 return 0; 4139 4140 ptp->tc.nsec += delta; 4141 4142 return 0; 4143 } 4144 4145 static int 4146 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 4147 { 4148 struct bnxt *bp = dev->data->dev_private; 4149 int rc; 4150 uint32_t dir_entries; 4151 uint32_t entry_length; 4152 4153 rc = is_bnxt_in_error(bp); 4154 if (rc) 4155 return rc; 4156 4157 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 4158 bp->pdev->addr.domain, bp->pdev->addr.bus, 4159 bp->pdev->addr.devid, bp->pdev->addr.function); 4160 4161 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 4162 if (rc != 0) 4163 return rc; 4164 4165 return dir_entries * entry_length; 4166 } 4167 4168 static int 4169 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 4170 struct rte_dev_eeprom_info *in_eeprom) 4171 { 4172 struct bnxt *bp = dev->data->dev_private; 4173 uint32_t index; 4174 uint32_t offset; 4175 int rc; 4176 4177 rc = is_bnxt_in_error(bp); 4178 if (rc) 4179 return rc; 4180 4181 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 4182 bp->pdev->addr.domain, bp->pdev->addr.bus, 4183 bp->pdev->addr.devid, bp->pdev->addr.function, 4184 in_eeprom->offset, in_eeprom->length); 4185 4186 if (in_eeprom->offset == 0) /* special offset value to get directory */ 4187 return bnxt_get_nvram_directory(bp, in_eeprom->length, 4188 in_eeprom->data); 4189 4190 index = in_eeprom->offset >> 24; 4191 offset = in_eeprom->offset & 0xffffff; 4192 4193 if (index != 0) 4194 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 4195 in_eeprom->length, in_eeprom->data); 4196 4197 return 0; 4198 } 4199 4200 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 4201 { 4202 switch (dir_type) { 4203 case BNX_DIR_TYPE_CHIMP_PATCH: 4204 case BNX_DIR_TYPE_BOOTCODE: 4205 case BNX_DIR_TYPE_BOOTCODE_2: 4206 case BNX_DIR_TYPE_APE_FW: 4207 case BNX_DIR_TYPE_APE_PATCH: 4208 case BNX_DIR_TYPE_KONG_FW: 4209 case BNX_DIR_TYPE_KONG_PATCH: 4210 case BNX_DIR_TYPE_BONO_FW: 4211 case BNX_DIR_TYPE_BONO_PATCH: 4212 /* FALLTHROUGH */ 4213 return true; 4214 } 4215 4216 return false; 4217 } 4218 4219 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 4220 { 4221 switch (dir_type) { 4222 case BNX_DIR_TYPE_AVS: 4223 case BNX_DIR_TYPE_EXP_ROM_MBA: 4224 case BNX_DIR_TYPE_PCIE: 4225 case BNX_DIR_TYPE_TSCF_UCODE: 4226 case BNX_DIR_TYPE_EXT_PHY: 4227 case BNX_DIR_TYPE_CCM: 4228 case BNX_DIR_TYPE_ISCSI_BOOT: 4229 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 4230 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 4231 /* FALLTHROUGH */ 4232 return true; 4233 } 4234 4235 return false; 4236 } 4237 4238 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 4239 { 4240 return bnxt_dir_type_is_ape_bin_format(dir_type) || 4241 bnxt_dir_type_is_other_exec_format(dir_type); 4242 } 4243 4244 static int 4245 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 4246 struct rte_dev_eeprom_info *in_eeprom) 4247 { 4248 struct bnxt *bp = dev->data->dev_private; 4249 uint8_t index, dir_op; 4250 uint16_t type, ext, ordinal, attr; 4251 int rc; 4252 4253 rc = is_bnxt_in_error(bp); 4254 if (rc) 4255 return rc; 4256 4257 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 4258 bp->pdev->addr.domain, bp->pdev->addr.bus, 4259 bp->pdev->addr.devid, bp->pdev->addr.function, 4260 in_eeprom->offset, in_eeprom->length); 4261 4262 if (!BNXT_PF(bp)) { 4263 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 4264 return -EINVAL; 4265 } 4266 4267 type = in_eeprom->magic >> 16; 4268 4269 if (type == 0xffff) { /* special value for directory operations */ 4270 index = in_eeprom->magic & 0xff; 4271 dir_op = in_eeprom->magic >> 8; 4272 if (index == 0) 4273 return -EINVAL; 4274 switch (dir_op) { 4275 case 0x0e: /* erase */ 4276 if (in_eeprom->offset != ~in_eeprom->magic) 4277 return -EINVAL; 4278 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 4279 default: 4280 return -EINVAL; 4281 } 4282 } 4283 4284 /* Create or re-write an NVM item: */ 4285 if (bnxt_dir_type_is_executable(type) == true) 4286 return -EOPNOTSUPP; 4287 ext = in_eeprom->magic & 0xffff; 4288 ordinal = in_eeprom->offset >> 16; 4289 attr = in_eeprom->offset & 0xffff; 4290 4291 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 4292 in_eeprom->data, in_eeprom->length); 4293 } 4294 4295 /* 4296 * Initialization 4297 */ 4298 4299 static const struct eth_dev_ops bnxt_dev_ops = { 4300 .dev_infos_get = bnxt_dev_info_get_op, 4301 .dev_close = bnxt_dev_close_op, 4302 .dev_configure = bnxt_dev_configure_op, 4303 .dev_start = bnxt_dev_start_op, 4304 .dev_stop = bnxt_dev_stop_op, 4305 .dev_set_link_up = bnxt_dev_set_link_up_op, 4306 .dev_set_link_down = bnxt_dev_set_link_down_op, 4307 .stats_get = bnxt_stats_get_op, 4308 .stats_reset = bnxt_stats_reset_op, 4309 .rx_queue_setup = bnxt_rx_queue_setup_op, 4310 .rx_queue_release = bnxt_rx_queue_release_op, 4311 .tx_queue_setup = bnxt_tx_queue_setup_op, 4312 .tx_queue_release = bnxt_tx_queue_release_op, 4313 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4314 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4315 .reta_update = bnxt_reta_update_op, 4316 .reta_query = bnxt_reta_query_op, 4317 .rss_hash_update = bnxt_rss_hash_update_op, 4318 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4319 .link_update = bnxt_link_update_op, 4320 .promiscuous_enable = bnxt_promiscuous_enable_op, 4321 .promiscuous_disable = bnxt_promiscuous_disable_op, 4322 .allmulticast_enable = bnxt_allmulticast_enable_op, 4323 .allmulticast_disable = bnxt_allmulticast_disable_op, 4324 .mac_addr_add = bnxt_mac_addr_add_op, 4325 .mac_addr_remove = bnxt_mac_addr_remove_op, 4326 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4327 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4328 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4329 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4330 .vlan_filter_set = bnxt_vlan_filter_set_op, 4331 .vlan_offload_set = bnxt_vlan_offload_set_op, 4332 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4333 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4334 .mtu_set = bnxt_mtu_set_op, 4335 .mac_addr_set = bnxt_set_default_mac_addr_op, 4336 .xstats_get = bnxt_dev_xstats_get_op, 4337 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4338 .xstats_reset = bnxt_dev_xstats_reset_op, 4339 .fw_version_get = bnxt_fw_version_get, 4340 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4341 .rxq_info_get = bnxt_rxq_info_get_op, 4342 .txq_info_get = bnxt_txq_info_get_op, 4343 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4344 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4345 .dev_led_on = bnxt_dev_led_on_op, 4346 .dev_led_off = bnxt_dev_led_off_op, 4347 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 4348 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 4349 .rx_queue_start = bnxt_rx_queue_start, 4350 .rx_queue_stop = bnxt_rx_queue_stop, 4351 .tx_queue_start = bnxt_tx_queue_start, 4352 .tx_queue_stop = bnxt_tx_queue_stop, 4353 .filter_ctrl = bnxt_filter_ctrl_op, 4354 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4355 .get_eeprom_length = bnxt_get_eeprom_length_op, 4356 .get_eeprom = bnxt_get_eeprom_op, 4357 .set_eeprom = bnxt_set_eeprom_op, 4358 .timesync_enable = bnxt_timesync_enable, 4359 .timesync_disable = bnxt_timesync_disable, 4360 .timesync_read_time = bnxt_timesync_read_time, 4361 .timesync_write_time = bnxt_timesync_write_time, 4362 .timesync_adjust_time = bnxt_timesync_adjust_time, 4363 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4364 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4365 }; 4366 4367 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4368 { 4369 uint32_t offset; 4370 4371 /* Only pre-map the reset GRC registers using window 3 */ 4372 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4373 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4374 4375 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4376 4377 return offset; 4378 } 4379 4380 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4381 { 4382 struct bnxt_error_recovery_info *info = bp->recovery_info; 4383 uint32_t reg_base = 0xffffffff; 4384 int i; 4385 4386 /* Only pre-map the monitoring GRC registers using window 2 */ 4387 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4388 uint32_t reg = info->status_regs[i]; 4389 4390 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4391 continue; 4392 4393 if (reg_base == 0xffffffff) 4394 reg_base = reg & 0xfffff000; 4395 if ((reg & 0xfffff000) != reg_base) 4396 return -ERANGE; 4397 4398 /* Use mask 0xffc as the Lower 2 bits indicates 4399 * address space location 4400 */ 4401 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4402 (reg & 0xffc); 4403 } 4404 4405 if (reg_base == 0xffffffff) 4406 return 0; 4407 4408 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4409 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4410 4411 return 0; 4412 } 4413 4414 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4415 { 4416 struct bnxt_error_recovery_info *info = bp->recovery_info; 4417 uint32_t delay = info->delay_after_reset[index]; 4418 uint32_t val = info->reset_reg_val[index]; 4419 uint32_t reg = info->reset_reg[index]; 4420 uint32_t type, offset; 4421 4422 type = BNXT_FW_STATUS_REG_TYPE(reg); 4423 offset = BNXT_FW_STATUS_REG_OFF(reg); 4424 4425 switch (type) { 4426 case BNXT_FW_STATUS_REG_TYPE_CFG: 4427 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4428 break; 4429 case BNXT_FW_STATUS_REG_TYPE_GRC: 4430 offset = bnxt_map_reset_regs(bp, offset); 4431 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4432 break; 4433 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4434 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4435 break; 4436 } 4437 /* wait on a specific interval of time until core reset is complete */ 4438 if (delay) 4439 rte_delay_ms(delay); 4440 } 4441 4442 static void bnxt_dev_cleanup(struct bnxt *bp) 4443 { 4444 bp->eth_dev->data->dev_link.link_status = 0; 4445 bp->link_info->link_up = 0; 4446 if (bp->eth_dev->data->dev_started) 4447 bnxt_dev_stop_op(bp->eth_dev); 4448 4449 bnxt_uninit_resources(bp, true); 4450 } 4451 4452 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4453 { 4454 struct rte_eth_dev *dev = bp->eth_dev; 4455 struct rte_vlan_filter_conf *vfc; 4456 int vidx, vbit, rc; 4457 uint16_t vlan_id; 4458 4459 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4460 vfc = &dev->data->vlan_filter_conf; 4461 vidx = vlan_id / 64; 4462 vbit = vlan_id % 64; 4463 4464 /* Each bit corresponds to a VLAN id */ 4465 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4466 rc = bnxt_add_vlan_filter(bp, vlan_id); 4467 if (rc) 4468 return rc; 4469 } 4470 } 4471 4472 return 0; 4473 } 4474 4475 static int bnxt_restore_mac_filters(struct bnxt *bp) 4476 { 4477 struct rte_eth_dev *dev = bp->eth_dev; 4478 struct rte_eth_dev_info dev_info; 4479 struct rte_ether_addr *addr; 4480 uint64_t pool_mask; 4481 uint32_t pool = 0; 4482 uint16_t i; 4483 int rc; 4484 4485 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4486 return 0; 4487 4488 rc = bnxt_dev_info_get_op(dev, &dev_info); 4489 if (rc) 4490 return rc; 4491 4492 /* replay MAC address configuration */ 4493 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4494 addr = &dev->data->mac_addrs[i]; 4495 4496 /* skip zero address */ 4497 if (rte_is_zero_ether_addr(addr)) 4498 continue; 4499 4500 pool = 0; 4501 pool_mask = dev->data->mac_pool_sel[i]; 4502 4503 do { 4504 if (pool_mask & 1ULL) { 4505 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4506 if (rc) 4507 return rc; 4508 } 4509 pool_mask >>= 1; 4510 pool++; 4511 } while (pool_mask); 4512 } 4513 4514 return 0; 4515 } 4516 4517 static int bnxt_restore_filters(struct bnxt *bp) 4518 { 4519 struct rte_eth_dev *dev = bp->eth_dev; 4520 int ret = 0; 4521 4522 if (dev->data->all_multicast) { 4523 ret = bnxt_allmulticast_enable_op(dev); 4524 if (ret) 4525 return ret; 4526 } 4527 if (dev->data->promiscuous) { 4528 ret = bnxt_promiscuous_enable_op(dev); 4529 if (ret) 4530 return ret; 4531 } 4532 4533 ret = bnxt_restore_mac_filters(bp); 4534 if (ret) 4535 return ret; 4536 4537 ret = bnxt_restore_vlan_filters(bp); 4538 /* TODO restore other filters as well */ 4539 return ret; 4540 } 4541 4542 static void bnxt_dev_recover(void *arg) 4543 { 4544 struct bnxt *bp = arg; 4545 int timeout = bp->fw_reset_max_msecs; 4546 int rc = 0; 4547 4548 /* Clear Error flag so that device re-init should happen */ 4549 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4550 4551 do { 4552 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT); 4553 if (rc == 0) 4554 break; 4555 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4556 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4557 } while (rc && timeout); 4558 4559 if (rc) { 4560 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4561 goto err; 4562 } 4563 4564 rc = bnxt_init_resources(bp, true); 4565 if (rc) { 4566 PMD_DRV_LOG(ERR, 4567 "Failed to initialize resources after reset\n"); 4568 goto err; 4569 } 4570 /* clear reset flag as the device is initialized now */ 4571 bp->flags &= ~BNXT_FLAG_FW_RESET; 4572 4573 rc = bnxt_dev_start_op(bp->eth_dev); 4574 if (rc) { 4575 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4576 goto err_start; 4577 } 4578 4579 rc = bnxt_restore_filters(bp); 4580 if (rc) 4581 goto err_start; 4582 4583 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 4584 return; 4585 err_start: 4586 bnxt_dev_stop_op(bp->eth_dev); 4587 err: 4588 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4589 bnxt_uninit_resources(bp, false); 4590 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4591 } 4592 4593 void bnxt_dev_reset_and_resume(void *arg) 4594 { 4595 struct bnxt *bp = arg; 4596 int rc; 4597 4598 bnxt_dev_cleanup(bp); 4599 4600 bnxt_wait_for_device_shutdown(bp); 4601 4602 rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs, 4603 bnxt_dev_recover, (void *)bp); 4604 if (rc) 4605 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4606 } 4607 4608 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4609 { 4610 struct bnxt_error_recovery_info *info = bp->recovery_info; 4611 uint32_t reg = info->status_regs[index]; 4612 uint32_t type, offset, val = 0; 4613 4614 type = BNXT_FW_STATUS_REG_TYPE(reg); 4615 offset = BNXT_FW_STATUS_REG_OFF(reg); 4616 4617 switch (type) { 4618 case BNXT_FW_STATUS_REG_TYPE_CFG: 4619 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4620 break; 4621 case BNXT_FW_STATUS_REG_TYPE_GRC: 4622 offset = info->mapped_status_regs[index]; 4623 /* FALLTHROUGH */ 4624 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4625 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4626 offset)); 4627 break; 4628 } 4629 4630 return val; 4631 } 4632 4633 static int bnxt_fw_reset_all(struct bnxt *bp) 4634 { 4635 struct bnxt_error_recovery_info *info = bp->recovery_info; 4636 uint32_t i; 4637 int rc = 0; 4638 4639 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4640 /* Reset through master function driver */ 4641 for (i = 0; i < info->reg_array_cnt; i++) 4642 bnxt_write_fw_reset_reg(bp, i); 4643 /* Wait for time specified by FW after triggering reset */ 4644 rte_delay_ms(info->master_func_wait_period_after_reset); 4645 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4646 /* Reset with the help of Kong processor */ 4647 rc = bnxt_hwrm_fw_reset(bp); 4648 if (rc) 4649 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4650 } 4651 4652 return rc; 4653 } 4654 4655 static void bnxt_fw_reset_cb(void *arg) 4656 { 4657 struct bnxt *bp = arg; 4658 struct bnxt_error_recovery_info *info = bp->recovery_info; 4659 int rc = 0; 4660 4661 /* Only Master function can do FW reset */ 4662 if (bnxt_is_master_func(bp) && 4663 bnxt_is_recovery_enabled(bp)) { 4664 rc = bnxt_fw_reset_all(bp); 4665 if (rc) { 4666 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4667 return; 4668 } 4669 } 4670 4671 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4672 * EXCEPTION_FATAL_ASYNC event to all the functions 4673 * (including MASTER FUNC). After receiving this Async, all the active 4674 * drivers should treat this case as FW initiated recovery 4675 */ 4676 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4677 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4678 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4679 4680 /* To recover from error */ 4681 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4682 (void *)bp); 4683 } 4684 } 4685 4686 /* Driver should poll FW heartbeat, reset_counter with the frequency 4687 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4688 * When the driver detects heartbeat stop or change in reset_counter, 4689 * it has to trigger a reset to recover from the error condition. 4690 * A “master PF” is the function who will have the privilege to 4691 * initiate the chimp reset. The master PF will be elected by the 4692 * firmware and will be notified through async message. 4693 */ 4694 static void bnxt_check_fw_health(void *arg) 4695 { 4696 struct bnxt *bp = arg; 4697 struct bnxt_error_recovery_info *info = bp->recovery_info; 4698 uint32_t val = 0, wait_msec; 4699 4700 if (!info || !bnxt_is_recovery_enabled(bp) || 4701 is_bnxt_in_error(bp)) 4702 return; 4703 4704 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4705 if (val == info->last_heart_beat) 4706 goto reset; 4707 4708 info->last_heart_beat = val; 4709 4710 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4711 if (val != info->last_reset_counter) 4712 goto reset; 4713 4714 info->last_reset_counter = val; 4715 4716 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4717 bnxt_check_fw_health, (void *)bp); 4718 4719 return; 4720 reset: 4721 /* Stop DMA to/from device */ 4722 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4723 bp->flags |= BNXT_FLAG_FW_RESET; 4724 4725 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4726 4727 if (bnxt_is_master_func(bp)) 4728 wait_msec = info->master_func_wait_period; 4729 else 4730 wait_msec = info->normal_func_wait_period; 4731 4732 rte_eal_alarm_set(US_PER_MS * wait_msec, 4733 bnxt_fw_reset_cb, (void *)bp); 4734 } 4735 4736 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4737 { 4738 uint32_t polling_freq; 4739 4740 pthread_mutex_lock(&bp->health_check_lock); 4741 4742 if (!bnxt_is_recovery_enabled(bp)) 4743 goto done; 4744 4745 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4746 goto done; 4747 4748 polling_freq = bp->recovery_info->driver_polling_freq; 4749 4750 rte_eal_alarm_set(US_PER_MS * polling_freq, 4751 bnxt_check_fw_health, (void *)bp); 4752 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4753 4754 done: 4755 pthread_mutex_unlock(&bp->health_check_lock); 4756 } 4757 4758 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4759 { 4760 if (!bnxt_is_recovery_enabled(bp)) 4761 return; 4762 4763 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4764 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4765 } 4766 4767 static bool bnxt_vf_pciid(uint16_t device_id) 4768 { 4769 switch (device_id) { 4770 case BROADCOM_DEV_ID_57304_VF: 4771 case BROADCOM_DEV_ID_57406_VF: 4772 case BROADCOM_DEV_ID_5731X_VF: 4773 case BROADCOM_DEV_ID_5741X_VF: 4774 case BROADCOM_DEV_ID_57414_VF: 4775 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4776 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4777 case BROADCOM_DEV_ID_58802_VF: 4778 case BROADCOM_DEV_ID_57500_VF1: 4779 case BROADCOM_DEV_ID_57500_VF2: 4780 /* FALLTHROUGH */ 4781 return true; 4782 default: 4783 return false; 4784 } 4785 } 4786 4787 static bool bnxt_thor_device(uint16_t device_id) 4788 { 4789 switch (device_id) { 4790 case BROADCOM_DEV_ID_57508: 4791 case BROADCOM_DEV_ID_57504: 4792 case BROADCOM_DEV_ID_57502: 4793 case BROADCOM_DEV_ID_57508_MF1: 4794 case BROADCOM_DEV_ID_57504_MF1: 4795 case BROADCOM_DEV_ID_57502_MF1: 4796 case BROADCOM_DEV_ID_57508_MF2: 4797 case BROADCOM_DEV_ID_57504_MF2: 4798 case BROADCOM_DEV_ID_57502_MF2: 4799 case BROADCOM_DEV_ID_57500_VF1: 4800 case BROADCOM_DEV_ID_57500_VF2: 4801 /* FALLTHROUGH */ 4802 return true; 4803 default: 4804 return false; 4805 } 4806 } 4807 4808 bool bnxt_stratus_device(struct bnxt *bp) 4809 { 4810 uint16_t device_id = bp->pdev->id.device_id; 4811 4812 switch (device_id) { 4813 case BROADCOM_DEV_ID_STRATUS_NIC: 4814 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4815 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4816 /* FALLTHROUGH */ 4817 return true; 4818 default: 4819 return false; 4820 } 4821 } 4822 4823 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 4824 { 4825 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4826 struct bnxt *bp = eth_dev->data->dev_private; 4827 4828 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4829 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4830 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4831 if (!bp->bar0 || !bp->doorbell_base) { 4832 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4833 return -ENODEV; 4834 } 4835 4836 bp->eth_dev = eth_dev; 4837 bp->pdev = pci_dev; 4838 4839 return 0; 4840 } 4841 4842 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4843 struct bnxt_ctx_pg_info *ctx_pg, 4844 uint32_t mem_size, 4845 const char *suffix, 4846 uint16_t idx) 4847 { 4848 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4849 const struct rte_memzone *mz = NULL; 4850 char mz_name[RTE_MEMZONE_NAMESIZE]; 4851 rte_iova_t mz_phys_addr; 4852 uint64_t valid_bits = 0; 4853 uint32_t sz; 4854 int i; 4855 4856 if (!mem_size) 4857 return 0; 4858 4859 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4860 BNXT_PAGE_SIZE; 4861 rmem->page_size = BNXT_PAGE_SIZE; 4862 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4863 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4864 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4865 4866 valid_bits = PTU_PTE_VALID; 4867 4868 if (rmem->nr_pages > 1) { 4869 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4870 "bnxt_ctx_pg_tbl%s_%x_%d", 4871 suffix, idx, bp->eth_dev->data->port_id); 4872 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4873 mz = rte_memzone_lookup(mz_name); 4874 if (!mz) { 4875 mz = rte_memzone_reserve_aligned(mz_name, 4876 rmem->nr_pages * 8, 4877 SOCKET_ID_ANY, 4878 RTE_MEMZONE_2MB | 4879 RTE_MEMZONE_SIZE_HINT_ONLY | 4880 RTE_MEMZONE_IOVA_CONTIG, 4881 BNXT_PAGE_SIZE); 4882 if (mz == NULL) 4883 return -ENOMEM; 4884 } 4885 4886 memset(mz->addr, 0, mz->len); 4887 mz_phys_addr = mz->iova; 4888 4889 rmem->pg_tbl = mz->addr; 4890 rmem->pg_tbl_map = mz_phys_addr; 4891 rmem->pg_tbl_mz = mz; 4892 } 4893 4894 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4895 suffix, idx, bp->eth_dev->data->port_id); 4896 mz = rte_memzone_lookup(mz_name); 4897 if (!mz) { 4898 mz = rte_memzone_reserve_aligned(mz_name, 4899 mem_size, 4900 SOCKET_ID_ANY, 4901 RTE_MEMZONE_1GB | 4902 RTE_MEMZONE_SIZE_HINT_ONLY | 4903 RTE_MEMZONE_IOVA_CONTIG, 4904 BNXT_PAGE_SIZE); 4905 if (mz == NULL) 4906 return -ENOMEM; 4907 } 4908 4909 memset(mz->addr, 0, mz->len); 4910 mz_phys_addr = mz->iova; 4911 4912 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4913 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4914 rmem->dma_arr[i] = mz_phys_addr + sz; 4915 4916 if (rmem->nr_pages > 1) { 4917 if (i == rmem->nr_pages - 2 && 4918 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4919 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4920 else if (i == rmem->nr_pages - 1 && 4921 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4922 valid_bits |= PTU_PTE_LAST; 4923 4924 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4925 valid_bits); 4926 } 4927 } 4928 4929 rmem->mz = mz; 4930 if (rmem->vmem_size) 4931 rmem->vmem = (void **)mz->addr; 4932 rmem->dma_arr[0] = mz_phys_addr; 4933 return 0; 4934 } 4935 4936 static void bnxt_free_ctx_mem(struct bnxt *bp) 4937 { 4938 int i; 4939 4940 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4941 return; 4942 4943 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4944 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4945 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4946 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4947 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4948 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4949 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4950 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4951 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4952 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4953 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4954 4955 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4956 if (bp->ctx->tqm_mem[i]) 4957 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4958 } 4959 4960 rte_free(bp->ctx); 4961 bp->ctx = NULL; 4962 } 4963 4964 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4965 4966 #define min_t(type, x, y) ({ \ 4967 type __min1 = (x); \ 4968 type __min2 = (y); \ 4969 __min1 < __min2 ? __min1 : __min2; }) 4970 4971 #define max_t(type, x, y) ({ \ 4972 type __max1 = (x); \ 4973 type __max2 = (y); \ 4974 __max1 > __max2 ? __max1 : __max2; }) 4975 4976 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4977 4978 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4979 { 4980 struct bnxt_ctx_pg_info *ctx_pg; 4981 struct bnxt_ctx_mem_info *ctx; 4982 uint32_t mem_size, ena, entries; 4983 uint32_t entries_sp, min; 4984 int i, rc; 4985 4986 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4987 if (rc) { 4988 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4989 return rc; 4990 } 4991 ctx = bp->ctx; 4992 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4993 return 0; 4994 4995 ctx_pg = &ctx->qp_mem; 4996 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4997 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4998 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4999 if (rc) 5000 return rc; 5001 5002 ctx_pg = &ctx->srq_mem; 5003 ctx_pg->entries = ctx->srq_max_l2_entries; 5004 mem_size = ctx->srq_entry_size * ctx_pg->entries; 5005 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 5006 if (rc) 5007 return rc; 5008 5009 ctx_pg = &ctx->cq_mem; 5010 ctx_pg->entries = ctx->cq_max_l2_entries; 5011 mem_size = ctx->cq_entry_size * ctx_pg->entries; 5012 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 5013 if (rc) 5014 return rc; 5015 5016 ctx_pg = &ctx->vnic_mem; 5017 ctx_pg->entries = ctx->vnic_max_vnic_entries + 5018 ctx->vnic_max_ring_table_entries; 5019 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 5020 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 5021 if (rc) 5022 return rc; 5023 5024 ctx_pg = &ctx->stat_mem; 5025 ctx_pg->entries = ctx->stat_max_entries; 5026 mem_size = ctx->stat_entry_size * ctx_pg->entries; 5027 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 5028 if (rc) 5029 return rc; 5030 5031 min = ctx->tqm_min_entries_per_ring; 5032 5033 entries_sp = ctx->qp_max_l2_entries + 5034 ctx->vnic_max_vnic_entries + 5035 2 * ctx->qp_min_qp1_entries + min; 5036 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 5037 5038 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 5039 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 5040 entries = clamp_t(uint32_t, entries, min, 5041 ctx->tqm_max_entries_per_ring); 5042 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 5043 ctx_pg = ctx->tqm_mem[i]; 5044 ctx_pg->entries = i ? entries : entries_sp; 5045 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 5046 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); 5047 if (rc) 5048 return rc; 5049 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 5050 } 5051 5052 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 5053 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 5054 if (rc) 5055 PMD_DRV_LOG(ERR, 5056 "Failed to configure context mem: rc = %d\n", rc); 5057 else 5058 ctx->flags |= BNXT_CTX_FLAG_INITED; 5059 5060 return rc; 5061 } 5062 5063 static int bnxt_alloc_stats_mem(struct bnxt *bp) 5064 { 5065 struct rte_pci_device *pci_dev = bp->pdev; 5066 char mz_name[RTE_MEMZONE_NAMESIZE]; 5067 const struct rte_memzone *mz = NULL; 5068 uint32_t total_alloc_len; 5069 rte_iova_t mz_phys_addr; 5070 5071 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 5072 return 0; 5073 5074 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5075 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5076 pci_dev->addr.bus, pci_dev->addr.devid, 5077 pci_dev->addr.function, "rx_port_stats"); 5078 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5079 mz = rte_memzone_lookup(mz_name); 5080 total_alloc_len = 5081 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 5082 sizeof(struct rx_port_stats_ext) + 512); 5083 if (!mz) { 5084 mz = rte_memzone_reserve(mz_name, total_alloc_len, 5085 SOCKET_ID_ANY, 5086 RTE_MEMZONE_2MB | 5087 RTE_MEMZONE_SIZE_HINT_ONLY | 5088 RTE_MEMZONE_IOVA_CONTIG); 5089 if (mz == NULL) 5090 return -ENOMEM; 5091 } 5092 memset(mz->addr, 0, mz->len); 5093 mz_phys_addr = mz->iova; 5094 5095 bp->rx_mem_zone = (const void *)mz; 5096 bp->hw_rx_port_stats = mz->addr; 5097 bp->hw_rx_port_stats_map = mz_phys_addr; 5098 5099 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5100 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5101 pci_dev->addr.bus, pci_dev->addr.devid, 5102 pci_dev->addr.function, "tx_port_stats"); 5103 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5104 mz = rte_memzone_lookup(mz_name); 5105 total_alloc_len = 5106 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 5107 sizeof(struct tx_port_stats_ext) + 512); 5108 if (!mz) { 5109 mz = rte_memzone_reserve(mz_name, 5110 total_alloc_len, 5111 SOCKET_ID_ANY, 5112 RTE_MEMZONE_2MB | 5113 RTE_MEMZONE_SIZE_HINT_ONLY | 5114 RTE_MEMZONE_IOVA_CONTIG); 5115 if (mz == NULL) 5116 return -ENOMEM; 5117 } 5118 memset(mz->addr, 0, mz->len); 5119 mz_phys_addr = mz->iova; 5120 5121 bp->tx_mem_zone = (const void *)mz; 5122 bp->hw_tx_port_stats = mz->addr; 5123 bp->hw_tx_port_stats_map = mz_phys_addr; 5124 bp->flags |= BNXT_FLAG_PORT_STATS; 5125 5126 /* Display extended statistics if FW supports it */ 5127 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 5128 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 5129 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 5130 return 0; 5131 5132 bp->hw_rx_port_stats_ext = (void *) 5133 ((uint8_t *)bp->hw_rx_port_stats + 5134 sizeof(struct rx_port_stats)); 5135 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 5136 sizeof(struct rx_port_stats); 5137 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 5138 5139 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 5140 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 5141 bp->hw_tx_port_stats_ext = (void *) 5142 ((uint8_t *)bp->hw_tx_port_stats + 5143 sizeof(struct tx_port_stats)); 5144 bp->hw_tx_port_stats_ext_map = 5145 bp->hw_tx_port_stats_map + 5146 sizeof(struct tx_port_stats); 5147 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 5148 } 5149 5150 return 0; 5151 } 5152 5153 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5154 { 5155 struct bnxt *bp = eth_dev->data->dev_private; 5156 int rc = 0; 5157 5158 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5159 RTE_ETHER_ADDR_LEN * 5160 bp->max_l2_ctx, 5161 0); 5162 if (eth_dev->data->mac_addrs == NULL) { 5163 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5164 return -ENOMEM; 5165 } 5166 5167 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5168 if (BNXT_PF(bp)) 5169 return -EINVAL; 5170 5171 /* Generate a random MAC address, if none was assigned by PF */ 5172 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5173 bnxt_eth_hw_addr_random(bp->mac_addr); 5174 PMD_DRV_LOG(INFO, 5175 "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", 5176 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5177 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5178 5179 rc = bnxt_hwrm_set_mac(bp); 5180 if (rc) 5181 return rc; 5182 } 5183 5184 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5185 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5186 5187 return rc; 5188 } 5189 5190 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5191 { 5192 int rc = 0; 5193 5194 /* MAC is already configured in FW */ 5195 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5196 return 0; 5197 5198 /* Restore the old MAC configured */ 5199 rc = bnxt_hwrm_set_mac(bp); 5200 if (rc) 5201 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5202 5203 return rc; 5204 } 5205 5206 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5207 { 5208 if (!BNXT_PF(bp)) 5209 return; 5210 5211 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5212 5213 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5214 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5215 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5216 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5217 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5218 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5219 } 5220 5221 uint16_t 5222 bnxt_get_svif(uint16_t port_id, bool func_svif, 5223 enum bnxt_ulp_intf_type type) 5224 { 5225 struct rte_eth_dev *eth_dev; 5226 struct bnxt *bp; 5227 5228 eth_dev = &rte_eth_devices[port_id]; 5229 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5230 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5231 if (!vfr) 5232 return 0; 5233 5234 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5235 return vfr->svif; 5236 5237 eth_dev = vfr->parent_dev; 5238 } 5239 5240 bp = eth_dev->data->dev_private; 5241 5242 return func_svif ? bp->func_svif : bp->port_svif; 5243 } 5244 5245 uint16_t 5246 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5247 { 5248 struct rte_eth_dev *eth_dev; 5249 struct bnxt_vnic_info *vnic; 5250 struct bnxt *bp; 5251 5252 eth_dev = &rte_eth_devices[port]; 5253 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5254 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5255 if (!vfr) 5256 return 0; 5257 5258 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5259 return vfr->dflt_vnic_id; 5260 5261 eth_dev = vfr->parent_dev; 5262 } 5263 5264 bp = eth_dev->data->dev_private; 5265 5266 vnic = BNXT_GET_DEFAULT_VNIC(bp); 5267 5268 return vnic->fw_vnic_id; 5269 } 5270 5271 uint16_t 5272 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 5273 { 5274 struct rte_eth_dev *eth_dev; 5275 struct bnxt *bp; 5276 5277 eth_dev = &rte_eth_devices[port]; 5278 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5279 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5280 if (!vfr) 5281 return 0; 5282 5283 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5284 return vfr->fw_fid; 5285 5286 eth_dev = vfr->parent_dev; 5287 } 5288 5289 bp = eth_dev->data->dev_private; 5290 5291 return bp->fw_fid; 5292 } 5293 5294 enum bnxt_ulp_intf_type 5295 bnxt_get_interface_type(uint16_t port) 5296 { 5297 struct rte_eth_dev *eth_dev; 5298 struct bnxt *bp; 5299 5300 eth_dev = &rte_eth_devices[port]; 5301 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 5302 return BNXT_ULP_INTF_TYPE_VF_REP; 5303 5304 bp = eth_dev->data->dev_private; 5305 if (BNXT_PF(bp)) 5306 return BNXT_ULP_INTF_TYPE_PF; 5307 else if (BNXT_VF_IS_TRUSTED(bp)) 5308 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 5309 else if (BNXT_VF(bp)) 5310 return BNXT_ULP_INTF_TYPE_VF; 5311 5312 return BNXT_ULP_INTF_TYPE_INVALID; 5313 } 5314 5315 uint16_t 5316 bnxt_get_phy_port_id(uint16_t port_id) 5317 { 5318 struct bnxt_representor *vfr; 5319 struct rte_eth_dev *eth_dev; 5320 struct bnxt *bp; 5321 5322 eth_dev = &rte_eth_devices[port_id]; 5323 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5324 vfr = eth_dev->data->dev_private; 5325 if (!vfr) 5326 return 0; 5327 5328 eth_dev = vfr->parent_dev; 5329 } 5330 5331 bp = eth_dev->data->dev_private; 5332 5333 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 5334 } 5335 5336 uint16_t 5337 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 5338 { 5339 struct rte_eth_dev *eth_dev; 5340 struct bnxt *bp; 5341 5342 eth_dev = &rte_eth_devices[port_id]; 5343 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5344 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5345 if (!vfr) 5346 return 0; 5347 5348 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5349 return vfr->fw_fid - 1; 5350 5351 eth_dev = vfr->parent_dev; 5352 } 5353 5354 bp = eth_dev->data->dev_private; 5355 5356 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 5357 } 5358 5359 uint16_t 5360 bnxt_get_vport(uint16_t port_id) 5361 { 5362 return (1 << bnxt_get_phy_port_id(port_id)); 5363 } 5364 5365 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5366 { 5367 struct bnxt_error_recovery_info *info = bp->recovery_info; 5368 5369 if (info) { 5370 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5371 memset(info, 0, sizeof(*info)); 5372 return; 5373 } 5374 5375 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5376 return; 5377 5378 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5379 sizeof(*info), 0); 5380 if (!info) 5381 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5382 5383 bp->recovery_info = info; 5384 } 5385 5386 static void bnxt_check_fw_status(struct bnxt *bp) 5387 { 5388 uint32_t fw_status; 5389 5390 if (!(bp->recovery_info && 5391 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5392 return; 5393 5394 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5395 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5396 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5397 fw_status); 5398 } 5399 5400 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5401 { 5402 struct bnxt_error_recovery_info *info = bp->recovery_info; 5403 uint32_t status_loc; 5404 uint32_t sig_ver; 5405 5406 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5407 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5408 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5409 BNXT_GRCP_WINDOW_2_BASE + 5410 offsetof(struct hcomm_status, 5411 sig_ver))); 5412 /* If the signature is absent, then FW does not support this feature */ 5413 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5414 HCOMM_STATUS_SIGNATURE_VAL) 5415 return 0; 5416 5417 if (!info) { 5418 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5419 sizeof(*info), 0); 5420 if (!info) 5421 return -ENOMEM; 5422 bp->recovery_info = info; 5423 } else { 5424 memset(info, 0, sizeof(*info)); 5425 } 5426 5427 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5428 BNXT_GRCP_WINDOW_2_BASE + 5429 offsetof(struct hcomm_status, 5430 fw_status_loc))); 5431 5432 /* Only pre-map the FW health status GRC register */ 5433 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5434 return 0; 5435 5436 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5437 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5438 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5439 5440 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5441 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5442 5443 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5444 5445 return 0; 5446 } 5447 5448 static int bnxt_init_fw(struct bnxt *bp) 5449 { 5450 uint16_t mtu; 5451 int rc = 0; 5452 5453 bp->fw_cap = 0; 5454 5455 rc = bnxt_map_hcomm_fw_status_reg(bp); 5456 if (rc) 5457 return rc; 5458 5459 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5460 if (rc) { 5461 bnxt_check_fw_status(bp); 5462 return rc; 5463 } 5464 5465 rc = bnxt_hwrm_func_reset(bp); 5466 if (rc) 5467 return -EIO; 5468 5469 rc = bnxt_hwrm_vnic_qcaps(bp); 5470 if (rc) 5471 return rc; 5472 5473 rc = bnxt_hwrm_queue_qportcfg(bp); 5474 if (rc) 5475 return rc; 5476 5477 /* Get the MAX capabilities for this function. 5478 * This function also allocates context memory for TQM rings and 5479 * informs the firmware about this allocated backing store memory. 5480 */ 5481 rc = bnxt_hwrm_func_qcaps(bp); 5482 if (rc) 5483 return rc; 5484 5485 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5486 if (rc) 5487 return rc; 5488 5489 bnxt_hwrm_port_mac_qcfg(bp); 5490 5491 bnxt_hwrm_parent_pf_qcfg(bp); 5492 5493 bnxt_hwrm_port_phy_qcaps(bp); 5494 5495 bnxt_alloc_error_recovery_info(bp); 5496 /* Get the adapter error recovery support info */ 5497 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5498 if (rc) 5499 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5500 5501 bnxt_hwrm_port_led_qcaps(bp); 5502 5503 return 0; 5504 } 5505 5506 static int 5507 bnxt_init_locks(struct bnxt *bp) 5508 { 5509 int err; 5510 5511 err = pthread_mutex_init(&bp->flow_lock, NULL); 5512 if (err) { 5513 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5514 return err; 5515 } 5516 5517 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5518 if (err) 5519 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5520 5521 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5522 if (err) 5523 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5524 return err; 5525 } 5526 5527 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5528 { 5529 int rc = 0; 5530 5531 rc = bnxt_init_fw(bp); 5532 if (rc) 5533 return rc; 5534 5535 if (!reconfig_dev) { 5536 rc = bnxt_setup_mac_addr(bp->eth_dev); 5537 if (rc) 5538 return rc; 5539 } else { 5540 rc = bnxt_restore_dflt_mac(bp); 5541 if (rc) 5542 return rc; 5543 } 5544 5545 bnxt_config_vf_req_fwd(bp); 5546 5547 rc = bnxt_hwrm_func_driver_register(bp); 5548 if (rc) { 5549 PMD_DRV_LOG(ERR, "Failed to register driver"); 5550 return -EBUSY; 5551 } 5552 5553 if (BNXT_PF(bp)) { 5554 if (bp->pdev->max_vfs) { 5555 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5556 if (rc) { 5557 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5558 return rc; 5559 } 5560 } else { 5561 rc = bnxt_hwrm_allocate_pf_only(bp); 5562 if (rc) { 5563 PMD_DRV_LOG(ERR, 5564 "Failed to allocate PF resources"); 5565 return rc; 5566 } 5567 } 5568 } 5569 5570 rc = bnxt_alloc_mem(bp, reconfig_dev); 5571 if (rc) 5572 return rc; 5573 5574 rc = bnxt_setup_int(bp); 5575 if (rc) 5576 return rc; 5577 5578 rc = bnxt_request_int(bp); 5579 if (rc) 5580 return rc; 5581 5582 rc = bnxt_init_ctx_mem(bp); 5583 if (rc) { 5584 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5585 return rc; 5586 } 5587 5588 rc = bnxt_init_locks(bp); 5589 if (rc) 5590 return rc; 5591 5592 return 0; 5593 } 5594 5595 static int 5596 bnxt_parse_devarg_truflow(__rte_unused const char *key, 5597 const char *value, void *opaque_arg) 5598 { 5599 struct bnxt *bp = opaque_arg; 5600 unsigned long truflow; 5601 char *end = NULL; 5602 5603 if (!value || !opaque_arg) { 5604 PMD_DRV_LOG(ERR, 5605 "Invalid parameter passed to truflow devargs.\n"); 5606 return -EINVAL; 5607 } 5608 5609 truflow = strtoul(value, &end, 10); 5610 if (end == NULL || *end != '\0' || 5611 (truflow == ULONG_MAX && errno == ERANGE)) { 5612 PMD_DRV_LOG(ERR, 5613 "Invalid parameter passed to truflow devargs.\n"); 5614 return -EINVAL; 5615 } 5616 5617 if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) { 5618 PMD_DRV_LOG(ERR, 5619 "Invalid value passed to truflow devargs.\n"); 5620 return -EINVAL; 5621 } 5622 5623 if (truflow) { 5624 bp->flags |= BNXT_FLAG_TRUFLOW_EN; 5625 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n"); 5626 } else { 5627 bp->flags &= ~BNXT_FLAG_TRUFLOW_EN; 5628 PMD_DRV_LOG(INFO, "Host-based truflow feature disabled.\n"); 5629 } 5630 5631 return 0; 5632 } 5633 5634 static int 5635 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5636 const char *value, void *opaque_arg) 5637 { 5638 struct bnxt *bp = opaque_arg; 5639 unsigned long flow_xstat; 5640 char *end = NULL; 5641 5642 if (!value || !opaque_arg) { 5643 PMD_DRV_LOG(ERR, 5644 "Invalid parameter passed to flow_xstat devarg.\n"); 5645 return -EINVAL; 5646 } 5647 5648 flow_xstat = strtoul(value, &end, 10); 5649 if (end == NULL || *end != '\0' || 5650 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5651 PMD_DRV_LOG(ERR, 5652 "Invalid parameter passed to flow_xstat devarg.\n"); 5653 return -EINVAL; 5654 } 5655 5656 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5657 PMD_DRV_LOG(ERR, 5658 "Invalid value passed to flow_xstat devarg.\n"); 5659 return -EINVAL; 5660 } 5661 5662 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5663 if (BNXT_FLOW_XSTATS_EN(bp)) 5664 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5665 5666 return 0; 5667 } 5668 5669 static int 5670 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5671 const char *value, void *opaque_arg) 5672 { 5673 struct bnxt *bp = opaque_arg; 5674 unsigned long max_num_kflows; 5675 char *end = NULL; 5676 5677 if (!value || !opaque_arg) { 5678 PMD_DRV_LOG(ERR, 5679 "Invalid parameter passed to max_num_kflows devarg.\n"); 5680 return -EINVAL; 5681 } 5682 5683 max_num_kflows = strtoul(value, &end, 10); 5684 if (end == NULL || *end != '\0' || 5685 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5686 PMD_DRV_LOG(ERR, 5687 "Invalid parameter passed to max_num_kflows devarg.\n"); 5688 return -EINVAL; 5689 } 5690 5691 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5692 PMD_DRV_LOG(ERR, 5693 "Invalid value passed to max_num_kflows devarg.\n"); 5694 return -EINVAL; 5695 } 5696 5697 bp->max_num_kflows = max_num_kflows; 5698 if (bp->max_num_kflows) 5699 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5700 max_num_kflows); 5701 5702 return 0; 5703 } 5704 5705 static int 5706 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5707 const char *value, void *opaque_arg) 5708 { 5709 struct bnxt_representor *vfr_bp = opaque_arg; 5710 unsigned long rep_is_pf; 5711 char *end = NULL; 5712 5713 if (!value || !opaque_arg) { 5714 PMD_DRV_LOG(ERR, 5715 "Invalid parameter passed to rep_is_pf devargs.\n"); 5716 return -EINVAL; 5717 } 5718 5719 rep_is_pf = strtoul(value, &end, 10); 5720 if (end == NULL || *end != '\0' || 5721 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5722 PMD_DRV_LOG(ERR, 5723 "Invalid parameter passed to rep_is_pf devargs.\n"); 5724 return -EINVAL; 5725 } 5726 5727 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5728 PMD_DRV_LOG(ERR, 5729 "Invalid value passed to rep_is_pf devargs.\n"); 5730 return -EINVAL; 5731 } 5732 5733 vfr_bp->flags |= rep_is_pf; 5734 if (BNXT_REP_PF(vfr_bp)) 5735 PMD_DRV_LOG(INFO, "PF representor\n"); 5736 else 5737 PMD_DRV_LOG(INFO, "VF representor\n"); 5738 5739 return 0; 5740 } 5741 5742 static int 5743 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5744 const char *value, void *opaque_arg) 5745 { 5746 struct bnxt_representor *vfr_bp = opaque_arg; 5747 unsigned long rep_based_pf; 5748 char *end = NULL; 5749 5750 if (!value || !opaque_arg) { 5751 PMD_DRV_LOG(ERR, 5752 "Invalid parameter passed to rep_based_pf " 5753 "devargs.\n"); 5754 return -EINVAL; 5755 } 5756 5757 rep_based_pf = strtoul(value, &end, 10); 5758 if (end == NULL || *end != '\0' || 5759 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5760 PMD_DRV_LOG(ERR, 5761 "Invalid parameter passed to rep_based_pf " 5762 "devargs.\n"); 5763 return -EINVAL; 5764 } 5765 5766 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5767 PMD_DRV_LOG(ERR, 5768 "Invalid value passed to rep_based_pf devargs.\n"); 5769 return -EINVAL; 5770 } 5771 5772 vfr_bp->rep_based_pf = rep_based_pf; 5773 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5774 5775 return 0; 5776 } 5777 5778 static int 5779 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5780 const char *value, void *opaque_arg) 5781 { 5782 struct bnxt_representor *vfr_bp = opaque_arg; 5783 unsigned long rep_q_r2f; 5784 char *end = NULL; 5785 5786 if (!value || !opaque_arg) { 5787 PMD_DRV_LOG(ERR, 5788 "Invalid parameter passed to rep_q_r2f " 5789 "devargs.\n"); 5790 return -EINVAL; 5791 } 5792 5793 rep_q_r2f = strtoul(value, &end, 10); 5794 if (end == NULL || *end != '\0' || 5795 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5796 PMD_DRV_LOG(ERR, 5797 "Invalid parameter passed to rep_q_r2f " 5798 "devargs.\n"); 5799 return -EINVAL; 5800 } 5801 5802 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5803 PMD_DRV_LOG(ERR, 5804 "Invalid value passed to rep_q_r2f devargs.\n"); 5805 return -EINVAL; 5806 } 5807 5808 vfr_bp->rep_q_r2f = rep_q_r2f; 5809 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5810 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5811 5812 return 0; 5813 } 5814 5815 static int 5816 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5817 const char *value, void *opaque_arg) 5818 { 5819 struct bnxt_representor *vfr_bp = opaque_arg; 5820 unsigned long rep_q_f2r; 5821 char *end = NULL; 5822 5823 if (!value || !opaque_arg) { 5824 PMD_DRV_LOG(ERR, 5825 "Invalid parameter passed to rep_q_f2r " 5826 "devargs.\n"); 5827 return -EINVAL; 5828 } 5829 5830 rep_q_f2r = strtoul(value, &end, 10); 5831 if (end == NULL || *end != '\0' || 5832 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5833 PMD_DRV_LOG(ERR, 5834 "Invalid parameter passed to rep_q_f2r " 5835 "devargs.\n"); 5836 return -EINVAL; 5837 } 5838 5839 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5840 PMD_DRV_LOG(ERR, 5841 "Invalid value passed to rep_q_f2r devargs.\n"); 5842 return -EINVAL; 5843 } 5844 5845 vfr_bp->rep_q_f2r = rep_q_f2r; 5846 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5847 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5848 5849 return 0; 5850 } 5851 5852 static int 5853 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5854 const char *value, void *opaque_arg) 5855 { 5856 struct bnxt_representor *vfr_bp = opaque_arg; 5857 unsigned long rep_fc_r2f; 5858 char *end = NULL; 5859 5860 if (!value || !opaque_arg) { 5861 PMD_DRV_LOG(ERR, 5862 "Invalid parameter passed to rep_fc_r2f " 5863 "devargs.\n"); 5864 return -EINVAL; 5865 } 5866 5867 rep_fc_r2f = strtoul(value, &end, 10); 5868 if (end == NULL || *end != '\0' || 5869 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5870 PMD_DRV_LOG(ERR, 5871 "Invalid parameter passed to rep_fc_r2f " 5872 "devargs.\n"); 5873 return -EINVAL; 5874 } 5875 5876 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5877 PMD_DRV_LOG(ERR, 5878 "Invalid value passed to rep_fc_r2f devargs.\n"); 5879 return -EINVAL; 5880 } 5881 5882 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5883 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5884 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5885 5886 return 0; 5887 } 5888 5889 static int 5890 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5891 const char *value, void *opaque_arg) 5892 { 5893 struct bnxt_representor *vfr_bp = opaque_arg; 5894 unsigned long rep_fc_f2r; 5895 char *end = NULL; 5896 5897 if (!value || !opaque_arg) { 5898 PMD_DRV_LOG(ERR, 5899 "Invalid parameter passed to rep_fc_f2r " 5900 "devargs.\n"); 5901 return -EINVAL; 5902 } 5903 5904 rep_fc_f2r = strtoul(value, &end, 10); 5905 if (end == NULL || *end != '\0' || 5906 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5907 PMD_DRV_LOG(ERR, 5908 "Invalid parameter passed to rep_fc_f2r " 5909 "devargs.\n"); 5910 return -EINVAL; 5911 } 5912 5913 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5914 PMD_DRV_LOG(ERR, 5915 "Invalid value passed to rep_fc_f2r devargs.\n"); 5916 return -EINVAL; 5917 } 5918 5919 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5920 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5921 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5922 5923 return 0; 5924 } 5925 5926 static void 5927 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5928 { 5929 struct rte_kvargs *kvlist; 5930 5931 if (devargs == NULL) 5932 return; 5933 5934 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5935 if (kvlist == NULL) 5936 return; 5937 5938 /* 5939 * Handler for "truflow" devarg. 5940 * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1" 5941 */ 5942 rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW, 5943 bnxt_parse_devarg_truflow, bp); 5944 5945 /* 5946 * Handler for "flow_xstat" devarg. 5947 * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1" 5948 */ 5949 rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5950 bnxt_parse_devarg_flow_xstat, bp); 5951 5952 /* 5953 * Handler for "max_num_kflows" devarg. 5954 * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32" 5955 */ 5956 rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5957 bnxt_parse_devarg_max_num_kflows, bp); 5958 5959 rte_kvargs_free(kvlist); 5960 } 5961 5962 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5963 { 5964 int rc = 0; 5965 5966 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5967 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5968 if (rc) 5969 PMD_DRV_LOG(ERR, 5970 "Failed to alloc switch domain: %d\n", rc); 5971 else 5972 PMD_DRV_LOG(INFO, 5973 "Switch domain allocated %d\n", 5974 bp->switch_domain_id); 5975 } 5976 5977 return rc; 5978 } 5979 5980 static int 5981 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5982 { 5983 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5984 static int version_printed; 5985 struct bnxt *bp; 5986 int rc; 5987 5988 if (version_printed++ == 0) 5989 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5990 5991 eth_dev->dev_ops = &bnxt_dev_ops; 5992 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5993 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5994 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5995 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5996 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5997 5998 /* 5999 * For secondary processes, we don't initialise any further 6000 * as primary has already done this work. 6001 */ 6002 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6003 return 0; 6004 6005 rte_eth_copy_pci_info(eth_dev, pci_dev); 6006 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 6007 6008 bp = eth_dev->data->dev_private; 6009 6010 /* Parse dev arguments passed on when starting the DPDK application. */ 6011 bnxt_parse_dev_args(bp, pci_dev->device.devargs); 6012 6013 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 6014 6015 if (bnxt_vf_pciid(pci_dev->id.device_id)) 6016 bp->flags |= BNXT_FLAG_VF; 6017 6018 if (bnxt_thor_device(pci_dev->id.device_id)) 6019 bp->flags |= BNXT_FLAG_THOR_CHIP; 6020 6021 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 6022 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 6023 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 6024 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 6025 bp->flags |= BNXT_FLAG_STINGRAY; 6026 6027 if (BNXT_TRUFLOW_EN(bp)) { 6028 /* extra mbuf field is required to store CFA code from mark */ 6029 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 6030 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 6031 .size = sizeof(bnxt_cfa_code_dynfield_t), 6032 .align = __alignof__(bnxt_cfa_code_dynfield_t), 6033 }; 6034 bnxt_cfa_code_dynfield_offset = 6035 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 6036 if (bnxt_cfa_code_dynfield_offset < 0) { 6037 PMD_DRV_LOG(ERR, 6038 "Failed to register mbuf field for TruFlow mark\n"); 6039 return -rte_errno; 6040 } 6041 } 6042 6043 rc = bnxt_init_board(eth_dev); 6044 if (rc) { 6045 PMD_DRV_LOG(ERR, 6046 "Failed to initialize board rc: %x\n", rc); 6047 return rc; 6048 } 6049 6050 rc = bnxt_alloc_pf_info(bp); 6051 if (rc) 6052 goto error_free; 6053 6054 rc = bnxt_alloc_link_info(bp); 6055 if (rc) 6056 goto error_free; 6057 6058 rc = bnxt_alloc_parent_info(bp); 6059 if (rc) 6060 goto error_free; 6061 6062 rc = bnxt_alloc_hwrm_resources(bp); 6063 if (rc) { 6064 PMD_DRV_LOG(ERR, 6065 "Failed to allocate hwrm resource rc: %x\n", rc); 6066 goto error_free; 6067 } 6068 rc = bnxt_alloc_leds_info(bp); 6069 if (rc) 6070 goto error_free; 6071 6072 rc = bnxt_alloc_cos_queues(bp); 6073 if (rc) 6074 goto error_free; 6075 6076 rc = bnxt_init_resources(bp, false); 6077 if (rc) 6078 goto error_free; 6079 6080 rc = bnxt_alloc_stats_mem(bp); 6081 if (rc) 6082 goto error_free; 6083 6084 bnxt_alloc_switch_domain(bp); 6085 6086 PMD_DRV_LOG(INFO, 6087 DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", 6088 pci_dev->mem_resource[0].phys_addr, 6089 pci_dev->mem_resource[0].addr); 6090 6091 return 0; 6092 6093 error_free: 6094 bnxt_dev_uninit(eth_dev); 6095 return rc; 6096 } 6097 6098 6099 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 6100 { 6101 if (!ctx) 6102 return; 6103 6104 if (ctx->va) 6105 rte_free(ctx->va); 6106 6107 ctx->va = NULL; 6108 ctx->dma = RTE_BAD_IOVA; 6109 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6110 } 6111 6112 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6113 { 6114 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6115 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6116 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6117 bp->flow_stat->max_fc, 6118 false); 6119 6120 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6121 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6122 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6123 bp->flow_stat->max_fc, 6124 false); 6125 6126 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6127 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6128 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6129 6130 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6131 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6132 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6133 6134 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6135 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6136 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6137 6138 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6139 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6140 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6141 } 6142 6143 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6144 { 6145 bnxt_unregister_fc_ctx_mem(bp); 6146 6147 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6148 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6149 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6150 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6151 } 6152 6153 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6154 { 6155 if (BNXT_FLOW_XSTATS_EN(bp)) 6156 bnxt_uninit_fc_ctx_mem(bp); 6157 } 6158 6159 static void 6160 bnxt_free_error_recovery_info(struct bnxt *bp) 6161 { 6162 rte_free(bp->recovery_info); 6163 bp->recovery_info = NULL; 6164 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6165 } 6166 6167 static void 6168 bnxt_uninit_locks(struct bnxt *bp) 6169 { 6170 pthread_mutex_destroy(&bp->flow_lock); 6171 pthread_mutex_destroy(&bp->def_cp_lock); 6172 pthread_mutex_destroy(&bp->health_check_lock); 6173 if (bp->rep_info) { 6174 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 6175 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 6176 } 6177 } 6178 6179 static int 6180 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6181 { 6182 int rc; 6183 6184 bnxt_free_int(bp); 6185 bnxt_free_mem(bp, reconfig_dev); 6186 6187 bnxt_hwrm_func_buf_unrgtr(bp); 6188 rte_free(bp->pf->vf_req_buf); 6189 6190 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 6191 bp->flags &= ~BNXT_FLAG_REGISTERED; 6192 bnxt_free_ctx_mem(bp); 6193 if (!reconfig_dev) { 6194 bnxt_free_hwrm_resources(bp); 6195 bnxt_free_error_recovery_info(bp); 6196 } 6197 6198 bnxt_uninit_ctx_mem(bp); 6199 6200 bnxt_uninit_locks(bp); 6201 bnxt_free_flow_stats_info(bp); 6202 bnxt_free_rep_info(bp); 6203 rte_free(bp->ptp_cfg); 6204 bp->ptp_cfg = NULL; 6205 return rc; 6206 } 6207 6208 static int 6209 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6210 { 6211 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6212 return -EPERM; 6213 6214 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6215 6216 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6217 bnxt_dev_close_op(eth_dev); 6218 6219 return 0; 6220 } 6221 6222 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6223 { 6224 struct bnxt *bp = eth_dev->data->dev_private; 6225 struct rte_eth_dev *vf_rep_eth_dev; 6226 int ret = 0, i; 6227 6228 if (!bp) 6229 return -EINVAL; 6230 6231 for (i = 0; i < bp->num_reps; i++) { 6232 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6233 if (!vf_rep_eth_dev) 6234 continue; 6235 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6236 vf_rep_eth_dev->data->port_id); 6237 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6238 } 6239 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6240 eth_dev->data->port_id); 6241 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6242 6243 return ret; 6244 } 6245 6246 static void bnxt_free_rep_info(struct bnxt *bp) 6247 { 6248 rte_free(bp->rep_info); 6249 bp->rep_info = NULL; 6250 rte_free(bp->cfa_code_map); 6251 bp->cfa_code_map = NULL; 6252 } 6253 6254 static int bnxt_init_rep_info(struct bnxt *bp) 6255 { 6256 int i = 0, rc; 6257 6258 if (bp->rep_info) 6259 return 0; 6260 6261 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6262 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 6263 0); 6264 if (!bp->rep_info) { 6265 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6266 return -ENOMEM; 6267 } 6268 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6269 sizeof(*bp->cfa_code_map) * 6270 BNXT_MAX_CFA_CODE, 0); 6271 if (!bp->cfa_code_map) { 6272 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6273 bnxt_free_rep_info(bp); 6274 return -ENOMEM; 6275 } 6276 6277 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6278 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6279 6280 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6281 if (rc) { 6282 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6283 bnxt_free_rep_info(bp); 6284 return rc; 6285 } 6286 6287 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6288 if (rc) { 6289 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6290 bnxt_free_rep_info(bp); 6291 return rc; 6292 } 6293 6294 return rc; 6295 } 6296 6297 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6298 struct rte_eth_devargs eth_da, 6299 struct rte_eth_dev *backing_eth_dev, 6300 const char *dev_args) 6301 { 6302 struct rte_eth_dev *vf_rep_eth_dev; 6303 char name[RTE_ETH_NAME_MAX_LEN]; 6304 struct bnxt *backing_bp; 6305 uint16_t num_rep; 6306 int i, ret = 0; 6307 struct rte_kvargs *kvlist; 6308 6309 num_rep = eth_da.nb_representor_ports; 6310 if (num_rep > BNXT_MAX_VF_REPS) { 6311 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6312 num_rep, BNXT_MAX_VF_REPS); 6313 return -EINVAL; 6314 } 6315 6316 if (num_rep >= RTE_MAX_ETHPORTS) { 6317 PMD_DRV_LOG(ERR, 6318 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6319 num_rep, RTE_MAX_ETHPORTS); 6320 return -EINVAL; 6321 } 6322 6323 backing_bp = backing_eth_dev->data->dev_private; 6324 6325 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6326 PMD_DRV_LOG(ERR, 6327 "Not a PF or trusted VF. No Representor support\n"); 6328 /* Returning an error is not an option. 6329 * Applications are not handling this correctly 6330 */ 6331 return 0; 6332 } 6333 6334 if (bnxt_init_rep_info(backing_bp)) 6335 return 0; 6336 6337 for (i = 0; i < num_rep; i++) { 6338 struct bnxt_representor representor = { 6339 .vf_id = eth_da.representor_ports[i], 6340 .switch_domain_id = backing_bp->switch_domain_id, 6341 .parent_dev = backing_eth_dev 6342 }; 6343 6344 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 6345 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6346 representor.vf_id, BNXT_MAX_VF_REPS); 6347 continue; 6348 } 6349 6350 /* representor port net_bdf_port */ 6351 snprintf(name, sizeof(name), "net_%s_representor_%d", 6352 pci_dev->device.name, eth_da.representor_ports[i]); 6353 6354 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6355 if (kvlist) { 6356 /* 6357 * Handler for "rep_is_pf" devarg. 6358 * Invoked as for ex: "-w 000:00:0d.0, 6359 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6360 */ 6361 rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6362 bnxt_parse_devarg_rep_is_pf, 6363 (void *)&representor); 6364 /* 6365 * Handler for "rep_based_pf" devarg. 6366 * Invoked as for ex: "-w 000:00:0d.0, 6367 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6368 */ 6369 rte_kvargs_process(kvlist, BNXT_DEVARG_REP_BASED_PF, 6370 bnxt_parse_devarg_rep_based_pf, 6371 (void *)&representor); 6372 /* 6373 * Handler for "rep_based_pf" devarg. 6374 * Invoked as for ex: "-w 000:00:0d.0, 6375 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6376 */ 6377 rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6378 bnxt_parse_devarg_rep_q_r2f, 6379 (void *)&representor); 6380 /* 6381 * Handler for "rep_based_pf" devarg. 6382 * Invoked as for ex: "-w 000:00:0d.0, 6383 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6384 */ 6385 rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6386 bnxt_parse_devarg_rep_q_f2r, 6387 (void *)&representor); 6388 /* 6389 * Handler for "rep_based_pf" devarg. 6390 * Invoked as for ex: "-w 000:00:0d.0, 6391 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6392 */ 6393 rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6394 bnxt_parse_devarg_rep_fc_r2f, 6395 (void *)&representor); 6396 /* 6397 * Handler for "rep_based_pf" devarg. 6398 * Invoked as for ex: "-w 000:00:0d.0, 6399 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6400 */ 6401 rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6402 bnxt_parse_devarg_rep_fc_f2r, 6403 (void *)&representor); 6404 } 6405 6406 ret = rte_eth_dev_create(&pci_dev->device, name, 6407 sizeof(struct bnxt_representor), 6408 NULL, NULL, 6409 bnxt_representor_init, 6410 &representor); 6411 if (ret) { 6412 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6413 "representor %s.", name); 6414 goto err; 6415 } 6416 6417 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6418 if (!vf_rep_eth_dev) { 6419 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6420 " for VF-Rep: %s.", name); 6421 ret = -ENODEV; 6422 goto err; 6423 } 6424 6425 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6426 backing_eth_dev->data->port_id); 6427 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6428 vf_rep_eth_dev; 6429 backing_bp->num_reps++; 6430 6431 } 6432 6433 return 0; 6434 6435 err: 6436 /* If num_rep > 1, then rollback already created 6437 * ports, since we'll be failing the probe anyway 6438 */ 6439 if (num_rep > 1) 6440 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6441 6442 return ret; 6443 } 6444 6445 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6446 struct rte_pci_device *pci_dev) 6447 { 6448 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6449 struct rte_eth_dev *backing_eth_dev; 6450 uint16_t num_rep; 6451 int ret = 0; 6452 6453 if (pci_dev->device.devargs) { 6454 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6455 ð_da); 6456 if (ret) 6457 return ret; 6458 } 6459 6460 num_rep = eth_da.nb_representor_ports; 6461 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6462 num_rep); 6463 6464 /* We could come here after first level of probe is already invoked 6465 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6466 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6467 */ 6468 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6469 if (backing_eth_dev == NULL) { 6470 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6471 sizeof(struct bnxt), 6472 eth_dev_pci_specific_init, pci_dev, 6473 bnxt_dev_init, NULL); 6474 6475 if (ret || !num_rep) 6476 return ret; 6477 6478 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6479 } 6480 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6481 backing_eth_dev->data->port_id); 6482 6483 if (!num_rep) 6484 return ret; 6485 6486 /* probe representor ports now */ 6487 ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev, 6488 pci_dev->device.devargs->args); 6489 6490 return ret; 6491 } 6492 6493 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6494 { 6495 struct rte_eth_dev *eth_dev; 6496 6497 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6498 if (!eth_dev) 6499 return 0; /* Invoked typically only by OVS-DPDK, by the 6500 * time it comes here the eth_dev is already 6501 * deleted by rte_eth_dev_close(), so returning 6502 * +ve value will at least help in proper cleanup 6503 */ 6504 6505 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6506 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6507 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6508 return rte_eth_dev_destroy(eth_dev, 6509 bnxt_representor_uninit); 6510 else 6511 return rte_eth_dev_destroy(eth_dev, 6512 bnxt_dev_uninit); 6513 } else { 6514 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6515 } 6516 } 6517 6518 static struct rte_pci_driver bnxt_rte_pmd = { 6519 .id_table = bnxt_pci_id_map, 6520 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6521 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6522 * and OVS-DPDK 6523 */ 6524 .probe = bnxt_pci_probe, 6525 .remove = bnxt_pci_remove, 6526 }; 6527 6528 static bool 6529 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6530 { 6531 if (strcmp(dev->device->driver->name, drv->driver.name)) 6532 return false; 6533 6534 return true; 6535 } 6536 6537 bool is_bnxt_supported(struct rte_eth_dev *dev) 6538 { 6539 return is_device_supported(dev, &bnxt_rte_pmd); 6540 } 6541 6542 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE); 6543 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6544 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6545 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6546