1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <rte_ethdev_driver.h> 11 #include <rte_ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 17 #include "bnxt.h" 18 #include "bnxt_filter.h" 19 #include "bnxt_hwrm.h" 20 #include "bnxt_irq.h" 21 #include "bnxt_reps.h" 22 #include "bnxt_ring.h" 23 #include "bnxt_rxq.h" 24 #include "bnxt_rxr.h" 25 #include "bnxt_stats.h" 26 #include "bnxt_txq.h" 27 #include "bnxt_txr.h" 28 #include "bnxt_vnic.h" 29 #include "hsi_struct_def_dpdk.h" 30 #include "bnxt_nvm_defs.h" 31 #include "bnxt_tf_common.h" 32 #include "ulp_flow_db.h" 33 34 #define DRV_MODULE_NAME "bnxt" 35 static const char bnxt_version[] = 36 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 37 38 /* 39 * The set of PCI devices this driver supports 40 */ 41 static const struct rte_pci_id bnxt_pci_id_map[] = { 42 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 95 { .vendor_id = 0, /* sentinel */ }, 96 }; 97 98 #define BNXT_DEVARG_TRUFLOW "host-based-truflow" 99 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 100 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 101 #define BNXT_DEVARG_REPRESENTOR "representor" 102 103 static const char *const bnxt_dev_args[] = { 104 BNXT_DEVARG_REPRESENTOR, 105 BNXT_DEVARG_TRUFLOW, 106 BNXT_DEVARG_FLOW_XSTAT, 107 BNXT_DEVARG_MAX_NUM_KFLOWS, 108 NULL 109 }; 110 111 /* 112 * truflow == false to disable the feature 113 * truflow == true to enable the feature 114 */ 115 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow) ((truflow) > 1) 116 117 /* 118 * flow_xstat == false to disable the feature 119 * flow_xstat == true to enable the feature 120 */ 121 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 122 123 /* 124 * max_num_kflows must be >= 32 125 * and must be a power-of-2 supported value 126 * return: 1 -> invalid 127 * 0 -> valid 128 */ 129 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 130 { 131 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 132 return 1; 133 return 0; 134 } 135 136 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 137 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 138 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 139 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 140 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 141 static int bnxt_restore_vlan_filters(struct bnxt *bp); 142 static void bnxt_dev_recover(void *arg); 143 static void bnxt_free_error_recovery_info(struct bnxt *bp); 144 static void bnxt_free_rep_info(struct bnxt *bp); 145 146 int is_bnxt_in_error(struct bnxt *bp) 147 { 148 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 149 return -EIO; 150 if (bp->flags & BNXT_FLAG_FW_RESET) 151 return -EBUSY; 152 153 return 0; 154 } 155 156 /***********************/ 157 158 /* 159 * High level utility functions 160 */ 161 162 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 163 { 164 if (!BNXT_CHIP_THOR(bp)) 165 return 1; 166 167 return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, 168 BNXT_RSS_ENTRIES_PER_CTX_THOR) / 169 BNXT_RSS_ENTRIES_PER_CTX_THOR; 170 } 171 172 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 173 { 174 if (!BNXT_CHIP_THOR(bp)) 175 return HW_HASH_INDEX_SIZE; 176 177 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; 178 } 179 180 static void bnxt_free_parent_info(struct bnxt *bp) 181 { 182 rte_free(bp->parent); 183 } 184 185 static void bnxt_free_pf_info(struct bnxt *bp) 186 { 187 rte_free(bp->pf); 188 } 189 190 static void bnxt_free_link_info(struct bnxt *bp) 191 { 192 rte_free(bp->link_info); 193 } 194 195 static void bnxt_free_leds_info(struct bnxt *bp) 196 { 197 if (BNXT_VF(bp)) 198 return; 199 200 rte_free(bp->leds); 201 bp->leds = NULL; 202 } 203 204 static void bnxt_free_flow_stats_info(struct bnxt *bp) 205 { 206 rte_free(bp->flow_stat); 207 bp->flow_stat = NULL; 208 } 209 210 static void bnxt_free_cos_queues(struct bnxt *bp) 211 { 212 rte_free(bp->rx_cos_queue); 213 rte_free(bp->tx_cos_queue); 214 } 215 216 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 217 { 218 bnxt_free_filter_mem(bp); 219 bnxt_free_vnic_attributes(bp); 220 bnxt_free_vnic_mem(bp); 221 222 /* tx/rx rings are configured as part of *_queue_setup callbacks. 223 * If the number of rings change across fw update, 224 * we don't have much choice except to warn the user. 225 */ 226 if (!reconfig) { 227 bnxt_free_stats(bp); 228 bnxt_free_tx_rings(bp); 229 bnxt_free_rx_rings(bp); 230 } 231 bnxt_free_async_cp_ring(bp); 232 bnxt_free_rxtx_nq_ring(bp); 233 234 rte_free(bp->grp_info); 235 bp->grp_info = NULL; 236 } 237 238 static int bnxt_alloc_parent_info(struct bnxt *bp) 239 { 240 bp->parent = rte_zmalloc("bnxt_parent_info", 241 sizeof(struct bnxt_parent_info), 0); 242 if (bp->parent == NULL) 243 return -ENOMEM; 244 245 return 0; 246 } 247 248 static int bnxt_alloc_pf_info(struct bnxt *bp) 249 { 250 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 251 if (bp->pf == NULL) 252 return -ENOMEM; 253 254 return 0; 255 } 256 257 static int bnxt_alloc_link_info(struct bnxt *bp) 258 { 259 bp->link_info = 260 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 261 if (bp->link_info == NULL) 262 return -ENOMEM; 263 264 return 0; 265 } 266 267 static int bnxt_alloc_leds_info(struct bnxt *bp) 268 { 269 if (BNXT_VF(bp)) 270 return 0; 271 272 bp->leds = rte_zmalloc("bnxt_leds", 273 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 274 0); 275 if (bp->leds == NULL) 276 return -ENOMEM; 277 278 return 0; 279 } 280 281 static int bnxt_alloc_cos_queues(struct bnxt *bp) 282 { 283 bp->rx_cos_queue = 284 rte_zmalloc("bnxt_rx_cosq", 285 BNXT_COS_QUEUE_COUNT * 286 sizeof(struct bnxt_cos_queue_info), 287 0); 288 if (bp->rx_cos_queue == NULL) 289 return -ENOMEM; 290 291 bp->tx_cos_queue = 292 rte_zmalloc("bnxt_tx_cosq", 293 BNXT_COS_QUEUE_COUNT * 294 sizeof(struct bnxt_cos_queue_info), 295 0); 296 if (bp->tx_cos_queue == NULL) 297 return -ENOMEM; 298 299 return 0; 300 } 301 302 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 303 { 304 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 305 sizeof(struct bnxt_flow_stat_info), 0); 306 if (bp->flow_stat == NULL) 307 return -ENOMEM; 308 309 return 0; 310 } 311 312 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 313 { 314 int rc; 315 316 rc = bnxt_alloc_ring_grps(bp); 317 if (rc) 318 goto alloc_mem_err; 319 320 rc = bnxt_alloc_async_ring_struct(bp); 321 if (rc) 322 goto alloc_mem_err; 323 324 rc = bnxt_alloc_vnic_mem(bp); 325 if (rc) 326 goto alloc_mem_err; 327 328 rc = bnxt_alloc_vnic_attributes(bp); 329 if (rc) 330 goto alloc_mem_err; 331 332 rc = bnxt_alloc_filter_mem(bp); 333 if (rc) 334 goto alloc_mem_err; 335 336 rc = bnxt_alloc_async_cp_ring(bp); 337 if (rc) 338 goto alloc_mem_err; 339 340 rc = bnxt_alloc_rxtx_nq_ring(bp); 341 if (rc) 342 goto alloc_mem_err; 343 344 if (BNXT_FLOW_XSTATS_EN(bp)) { 345 rc = bnxt_alloc_flow_stats_info(bp); 346 if (rc) 347 goto alloc_mem_err; 348 } 349 350 return 0; 351 352 alloc_mem_err: 353 bnxt_free_mem(bp, reconfig); 354 return rc; 355 } 356 357 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 358 { 359 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 360 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 361 uint64_t rx_offloads = dev_conf->rxmode.offloads; 362 struct bnxt_rx_queue *rxq; 363 unsigned int j; 364 int rc; 365 366 rc = bnxt_vnic_grp_alloc(bp, vnic); 367 if (rc) 368 goto err_out; 369 370 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 371 vnic_id, vnic, vnic->fw_grp_ids); 372 373 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 374 if (rc) 375 goto err_out; 376 377 /* Alloc RSS context only if RSS mode is enabled */ 378 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 379 int j, nr_ctxs = bnxt_rss_ctxts(bp); 380 381 rc = 0; 382 for (j = 0; j < nr_ctxs; j++) { 383 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 384 if (rc) 385 break; 386 } 387 if (rc) { 388 PMD_DRV_LOG(ERR, 389 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 390 vnic_id, j, rc); 391 goto err_out; 392 } 393 vnic->num_lb_ctxts = nr_ctxs; 394 } 395 396 /* 397 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 398 * setting is not available at this time, it will not be 399 * configured correctly in the CFA. 400 */ 401 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 402 vnic->vlan_strip = true; 403 else 404 vnic->vlan_strip = false; 405 406 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 407 if (rc) 408 goto err_out; 409 410 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 411 if (rc) 412 goto err_out; 413 414 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 415 rxq = bp->eth_dev->data->rx_queues[j]; 416 417 PMD_DRV_LOG(DEBUG, 418 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 419 j, rxq->vnic, rxq->vnic->fw_grp_ids); 420 421 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 422 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 423 else 424 vnic->rx_queue_cnt++; 425 } 426 427 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 428 429 rc = bnxt_vnic_rss_configure(bp, vnic); 430 if (rc) 431 goto err_out; 432 433 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 434 435 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 436 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 437 else 438 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 439 440 return 0; 441 err_out: 442 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 443 vnic_id, rc); 444 return rc; 445 } 446 447 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 448 { 449 int rc = 0; 450 451 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 452 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 453 if (rc) 454 return rc; 455 456 PMD_DRV_LOG(DEBUG, 457 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 458 " rx_fc_in_tbl.ctx_id = %d\n", 459 bp->flow_stat->rx_fc_in_tbl.va, 460 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 461 bp->flow_stat->rx_fc_in_tbl.ctx_id); 462 463 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 464 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 465 if (rc) 466 return rc; 467 468 PMD_DRV_LOG(DEBUG, 469 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 470 " rx_fc_out_tbl.ctx_id = %d\n", 471 bp->flow_stat->rx_fc_out_tbl.va, 472 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 473 bp->flow_stat->rx_fc_out_tbl.ctx_id); 474 475 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 476 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 477 if (rc) 478 return rc; 479 480 PMD_DRV_LOG(DEBUG, 481 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 482 " tx_fc_in_tbl.ctx_id = %d\n", 483 bp->flow_stat->tx_fc_in_tbl.va, 484 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 485 bp->flow_stat->tx_fc_in_tbl.ctx_id); 486 487 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 488 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 489 if (rc) 490 return rc; 491 492 PMD_DRV_LOG(DEBUG, 493 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 494 " tx_fc_out_tbl.ctx_id = %d\n", 495 bp->flow_stat->tx_fc_out_tbl.va, 496 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 497 bp->flow_stat->tx_fc_out_tbl.ctx_id); 498 499 memset(bp->flow_stat->rx_fc_out_tbl.va, 500 0, 501 bp->flow_stat->rx_fc_out_tbl.size); 502 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 503 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 504 bp->flow_stat->rx_fc_out_tbl.ctx_id, 505 bp->flow_stat->max_fc, 506 true); 507 if (rc) 508 return rc; 509 510 memset(bp->flow_stat->tx_fc_out_tbl.va, 511 0, 512 bp->flow_stat->tx_fc_out_tbl.size); 513 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 514 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 515 bp->flow_stat->tx_fc_out_tbl.ctx_id, 516 bp->flow_stat->max_fc, 517 true); 518 519 return rc; 520 } 521 522 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size, 523 struct bnxt_ctx_mem_buf_info *ctx) 524 { 525 if (!ctx) 526 return -EINVAL; 527 528 ctx->va = rte_zmalloc(type, size, 0); 529 if (ctx->va == NULL) 530 return -ENOMEM; 531 rte_mem_lock_page(ctx->va); 532 ctx->size = size; 533 ctx->dma = rte_mem_virt2iova(ctx->va); 534 if (ctx->dma == RTE_BAD_IOVA) 535 return -ENOMEM; 536 537 return 0; 538 } 539 540 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 541 { 542 struct rte_pci_device *pdev = bp->pdev; 543 char type[RTE_MEMZONE_NAMESIZE]; 544 uint16_t max_fc; 545 int rc = 0; 546 547 max_fc = bp->flow_stat->max_fc; 548 549 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 550 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 551 /* 4 bytes for each counter-id */ 552 rc = bnxt_alloc_ctx_mem_buf(type, 553 max_fc * 4, 554 &bp->flow_stat->rx_fc_in_tbl); 555 if (rc) 556 return rc; 557 558 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 559 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 560 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 561 rc = bnxt_alloc_ctx_mem_buf(type, 562 max_fc * 16, 563 &bp->flow_stat->rx_fc_out_tbl); 564 if (rc) 565 return rc; 566 567 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 568 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 569 /* 4 bytes for each counter-id */ 570 rc = bnxt_alloc_ctx_mem_buf(type, 571 max_fc * 4, 572 &bp->flow_stat->tx_fc_in_tbl); 573 if (rc) 574 return rc; 575 576 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 577 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 578 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 579 rc = bnxt_alloc_ctx_mem_buf(type, 580 max_fc * 16, 581 &bp->flow_stat->tx_fc_out_tbl); 582 if (rc) 583 return rc; 584 585 rc = bnxt_register_fc_ctx_mem(bp); 586 587 return rc; 588 } 589 590 static int bnxt_init_ctx_mem(struct bnxt *bp) 591 { 592 int rc = 0; 593 594 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 595 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 596 !BNXT_FLOW_XSTATS_EN(bp)) 597 return 0; 598 599 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 600 if (rc) 601 return rc; 602 603 rc = bnxt_init_fc_ctx_mem(bp); 604 605 return rc; 606 } 607 608 static int bnxt_init_chip(struct bnxt *bp) 609 { 610 struct rte_eth_link new; 611 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 612 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 613 uint32_t intr_vector = 0; 614 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 615 uint32_t vec = BNXT_MISC_VEC_ID; 616 unsigned int i, j; 617 int rc; 618 619 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 620 bp->eth_dev->data->dev_conf.rxmode.offloads |= 621 DEV_RX_OFFLOAD_JUMBO_FRAME; 622 bp->flags |= BNXT_FLAG_JUMBO; 623 } else { 624 bp->eth_dev->data->dev_conf.rxmode.offloads &= 625 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 626 bp->flags &= ~BNXT_FLAG_JUMBO; 627 } 628 629 /* THOR does not support ring groups. 630 * But we will use the array to save RSS context IDs. 631 */ 632 if (BNXT_CHIP_THOR(bp)) 633 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 634 635 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 636 if (rc) { 637 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 638 goto err_out; 639 } 640 641 rc = bnxt_alloc_hwrm_rings(bp); 642 if (rc) { 643 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 644 goto err_out; 645 } 646 647 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 648 if (rc) { 649 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 650 goto err_out; 651 } 652 653 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 654 goto skip_cosq_cfg; 655 656 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 657 if (bp->rx_cos_queue[i].id != 0xff) { 658 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 659 660 if (!vnic) { 661 PMD_DRV_LOG(ERR, 662 "Num pools more than FW profile\n"); 663 rc = -EINVAL; 664 goto err_out; 665 } 666 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 667 bp->rx_cosq_cnt++; 668 } 669 } 670 671 skip_cosq_cfg: 672 rc = bnxt_mq_rx_configure(bp); 673 if (rc) { 674 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 675 goto err_out; 676 } 677 678 /* VNIC configuration */ 679 for (i = 0; i < bp->nr_vnics; i++) { 680 rc = bnxt_setup_one_vnic(bp, i); 681 if (rc) 682 goto err_out; 683 } 684 685 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 686 if (rc) { 687 PMD_DRV_LOG(ERR, 688 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 689 goto err_out; 690 } 691 692 /* check and configure queue intr-vector mapping */ 693 if ((rte_intr_cap_multiple(intr_handle) || 694 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 695 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 696 intr_vector = bp->eth_dev->data->nb_rx_queues; 697 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 698 if (intr_vector > bp->rx_cp_nr_rings) { 699 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 700 bp->rx_cp_nr_rings); 701 return -ENOTSUP; 702 } 703 rc = rte_intr_efd_enable(intr_handle, intr_vector); 704 if (rc) 705 return rc; 706 } 707 708 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 709 intr_handle->intr_vec = 710 rte_zmalloc("intr_vec", 711 bp->eth_dev->data->nb_rx_queues * 712 sizeof(int), 0); 713 if (intr_handle->intr_vec == NULL) { 714 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 715 " intr_vec", bp->eth_dev->data->nb_rx_queues); 716 rc = -ENOMEM; 717 goto err_disable; 718 } 719 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 720 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 721 intr_handle->intr_vec, intr_handle->nb_efd, 722 intr_handle->max_intr); 723 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 724 queue_id++) { 725 intr_handle->intr_vec[queue_id] = 726 vec + BNXT_RX_VEC_START; 727 if (vec < base + intr_handle->nb_efd - 1) 728 vec++; 729 } 730 } 731 732 /* enable uio/vfio intr/eventfd mapping */ 733 rc = rte_intr_enable(intr_handle); 734 #ifndef RTE_EXEC_ENV_FREEBSD 735 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 736 if (rc) 737 goto err_free; 738 #endif 739 740 rc = bnxt_get_hwrm_link_config(bp, &new); 741 if (rc) { 742 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); 743 goto err_free; 744 } 745 746 if (!bp->link_info->link_up) { 747 rc = bnxt_set_hwrm_link_config(bp, true); 748 if (rc) { 749 PMD_DRV_LOG(ERR, 750 "HWRM link config failure rc: %x\n", rc); 751 goto err_free; 752 } 753 } 754 bnxt_print_link_info(bp->eth_dev); 755 756 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 757 if (!bp->mark_table) 758 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 759 760 return 0; 761 762 err_free: 763 rte_free(intr_handle->intr_vec); 764 err_disable: 765 rte_intr_efd_disable(intr_handle); 766 err_out: 767 /* Some of the error status returned by FW may not be from errno.h */ 768 if (rc > 0) 769 rc = -EIO; 770 771 return rc; 772 } 773 774 static int bnxt_shutdown_nic(struct bnxt *bp) 775 { 776 bnxt_free_all_hwrm_resources(bp); 777 bnxt_free_all_filters(bp); 778 bnxt_free_all_vnics(bp); 779 return 0; 780 } 781 782 /* 783 * Device configuration and status function 784 */ 785 786 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 787 { 788 uint32_t link_speed = bp->link_info->support_speeds; 789 uint32_t speed_capa = 0; 790 791 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 792 speed_capa |= ETH_LINK_SPEED_100M; 793 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 794 speed_capa |= ETH_LINK_SPEED_100M_HD; 795 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 796 speed_capa |= ETH_LINK_SPEED_1G; 797 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 798 speed_capa |= ETH_LINK_SPEED_2_5G; 799 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 800 speed_capa |= ETH_LINK_SPEED_10G; 801 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 802 speed_capa |= ETH_LINK_SPEED_20G; 803 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 804 speed_capa |= ETH_LINK_SPEED_25G; 805 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 806 speed_capa |= ETH_LINK_SPEED_40G; 807 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 808 speed_capa |= ETH_LINK_SPEED_50G; 809 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 810 speed_capa |= ETH_LINK_SPEED_100G; 811 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB) 812 speed_capa |= ETH_LINK_SPEED_200G; 813 814 if (bp->link_info->auto_mode == 815 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 816 speed_capa |= ETH_LINK_SPEED_FIXED; 817 else 818 speed_capa |= ETH_LINK_SPEED_AUTONEG; 819 820 return speed_capa; 821 } 822 823 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 824 struct rte_eth_dev_info *dev_info) 825 { 826 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 827 struct bnxt *bp = eth_dev->data->dev_private; 828 uint16_t max_vnics, i, j, vpool, vrxq; 829 unsigned int max_rx_rings; 830 int rc; 831 832 rc = is_bnxt_in_error(bp); 833 if (rc) 834 return rc; 835 836 /* MAC Specifics */ 837 dev_info->max_mac_addrs = bp->max_l2_ctx; 838 dev_info->max_hash_mac_addrs = 0; 839 840 /* PF/VF specifics */ 841 if (BNXT_PF(bp)) 842 dev_info->max_vfs = pdev->max_vfs; 843 844 max_rx_rings = BNXT_MAX_RINGS(bp); 845 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 846 dev_info->max_rx_queues = max_rx_rings; 847 dev_info->max_tx_queues = max_rx_rings; 848 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 849 dev_info->hash_key_size = 40; 850 max_vnics = bp->max_vnics; 851 852 /* MTU specifics */ 853 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 854 dev_info->max_mtu = BNXT_MAX_MTU; 855 856 /* Fast path specifics */ 857 dev_info->min_rx_bufsize = 1; 858 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 859 860 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 861 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 862 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 863 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; 864 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 865 866 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 867 868 /* *INDENT-OFF* */ 869 dev_info->default_rxconf = (struct rte_eth_rxconf) { 870 .rx_thresh = { 871 .pthresh = 8, 872 .hthresh = 8, 873 .wthresh = 0, 874 }, 875 .rx_free_thresh = 32, 876 /* If no descriptors available, pkts are dropped by default */ 877 .rx_drop_en = 1, 878 }; 879 880 dev_info->default_txconf = (struct rte_eth_txconf) { 881 .tx_thresh = { 882 .pthresh = 32, 883 .hthresh = 0, 884 .wthresh = 0, 885 }, 886 .tx_free_thresh = 32, 887 .tx_rs_thresh = 32, 888 }; 889 eth_dev->data->dev_conf.intr_conf.lsc = 1; 890 891 eth_dev->data->dev_conf.intr_conf.rxq = 1; 892 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 893 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 894 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 895 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 896 897 /* *INDENT-ON* */ 898 899 /* 900 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 901 * need further investigation. 902 */ 903 904 /* VMDq resources */ 905 vpool = 64; /* ETH_64_POOLS */ 906 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 907 for (i = 0; i < 4; vpool >>= 1, i++) { 908 if (max_vnics > vpool) { 909 for (j = 0; j < 5; vrxq >>= 1, j++) { 910 if (dev_info->max_rx_queues > vrxq) { 911 if (vpool > vrxq) 912 vpool = vrxq; 913 goto found; 914 } 915 } 916 /* Not enough resources to support VMDq */ 917 break; 918 } 919 } 920 /* Not enough resources to support VMDq */ 921 vpool = 0; 922 vrxq = 0; 923 found: 924 dev_info->max_vmdq_pools = vpool; 925 dev_info->vmdq_queue_num = vrxq; 926 927 dev_info->vmdq_pool_base = 0; 928 dev_info->vmdq_queue_base = 0; 929 930 return 0; 931 } 932 933 /* Configure the device based on the configuration provided */ 934 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 935 { 936 struct bnxt *bp = eth_dev->data->dev_private; 937 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 938 int rc; 939 940 bp->rx_queues = (void *)eth_dev->data->rx_queues; 941 bp->tx_queues = (void *)eth_dev->data->tx_queues; 942 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 943 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 944 945 rc = is_bnxt_in_error(bp); 946 if (rc) 947 return rc; 948 949 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 950 rc = bnxt_hwrm_check_vf_rings(bp); 951 if (rc) { 952 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 953 return -ENOSPC; 954 } 955 956 /* If a resource has already been allocated - in this case 957 * it is the async completion ring, free it. Reallocate it after 958 * resource reservation. This will ensure the resource counts 959 * are calculated correctly. 960 */ 961 962 pthread_mutex_lock(&bp->def_cp_lock); 963 964 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 965 bnxt_disable_int(bp); 966 bnxt_free_cp_ring(bp, bp->async_cp_ring); 967 } 968 969 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 970 if (rc) { 971 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 972 pthread_mutex_unlock(&bp->def_cp_lock); 973 return -ENOSPC; 974 } 975 976 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 977 rc = bnxt_alloc_async_cp_ring(bp); 978 if (rc) { 979 pthread_mutex_unlock(&bp->def_cp_lock); 980 return rc; 981 } 982 bnxt_enable_int(bp); 983 } 984 985 pthread_mutex_unlock(&bp->def_cp_lock); 986 } else { 987 /* legacy driver needs to get updated values */ 988 rc = bnxt_hwrm_func_qcaps(bp); 989 if (rc) { 990 PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); 991 return rc; 992 } 993 } 994 995 /* Inherit new configurations */ 996 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 997 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 998 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 999 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1000 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1001 bp->max_stat_ctx) 1002 goto resource_error; 1003 1004 if (BNXT_HAS_RING_GRPS(bp) && 1005 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1006 goto resource_error; 1007 1008 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 1009 bp->max_vnics < eth_dev->data->nb_rx_queues) 1010 goto resource_error; 1011 1012 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1013 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1014 1015 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1016 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1017 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1018 1019 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1020 eth_dev->data->mtu = 1021 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1022 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 1023 BNXT_NUM_VLANS; 1024 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1025 } 1026 return 0; 1027 1028 resource_error: 1029 PMD_DRV_LOG(ERR, 1030 "Insufficient resources to support requested config\n"); 1031 PMD_DRV_LOG(ERR, 1032 "Num Queues Requested: Tx %d, Rx %d\n", 1033 eth_dev->data->nb_tx_queues, 1034 eth_dev->data->nb_rx_queues); 1035 PMD_DRV_LOG(ERR, 1036 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1037 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1038 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1039 return -ENOSPC; 1040 } 1041 1042 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1043 { 1044 struct rte_eth_link *link = ð_dev->data->dev_link; 1045 1046 if (link->link_status) 1047 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1048 eth_dev->data->port_id, 1049 (uint32_t)link->link_speed, 1050 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 1051 ("full-duplex") : ("half-duplex\n")); 1052 else 1053 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1054 eth_dev->data->port_id); 1055 } 1056 1057 /* 1058 * Determine whether the current configuration requires support for scattered 1059 * receive; return 1 if scattered receive is required and 0 if not. 1060 */ 1061 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1062 { 1063 uint16_t buf_size; 1064 int i; 1065 1066 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) 1067 return 1; 1068 1069 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1070 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1071 1072 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1073 RTE_PKTMBUF_HEADROOM); 1074 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 1075 return 1; 1076 } 1077 return 0; 1078 } 1079 1080 static eth_rx_burst_t 1081 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1082 { 1083 struct bnxt *bp = eth_dev->data->dev_private; 1084 1085 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1086 #ifndef RTE_LIBRTE_IEEE1588 1087 /* 1088 * Vector mode receive can be enabled only if scatter rx is not 1089 * in use and rx offloads are limited to VLAN stripping and 1090 * CRC stripping. 1091 */ 1092 if (!eth_dev->data->scattered_rx && 1093 !(eth_dev->data->dev_conf.rxmode.offloads & 1094 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 1095 DEV_RX_OFFLOAD_KEEP_CRC | 1096 DEV_RX_OFFLOAD_JUMBO_FRAME | 1097 DEV_RX_OFFLOAD_IPV4_CKSUM | 1098 DEV_RX_OFFLOAD_UDP_CKSUM | 1099 DEV_RX_OFFLOAD_TCP_CKSUM | 1100 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1101 DEV_RX_OFFLOAD_RSS_HASH | 1102 DEV_RX_OFFLOAD_VLAN_FILTER)) && 1103 !BNXT_TRUFLOW_EN(bp)) { 1104 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", 1105 eth_dev->data->port_id); 1106 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1107 return bnxt_recv_pkts_vec; 1108 } 1109 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1110 eth_dev->data->port_id); 1111 PMD_DRV_LOG(INFO, 1112 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1113 eth_dev->data->port_id, 1114 eth_dev->data->scattered_rx, 1115 eth_dev->data->dev_conf.rxmode.offloads); 1116 #endif 1117 #endif 1118 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1119 return bnxt_recv_pkts; 1120 } 1121 1122 static eth_tx_burst_t 1123 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 1124 { 1125 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1126 #ifndef RTE_LIBRTE_IEEE1588 1127 struct bnxt *bp = eth_dev->data->dev_private; 1128 1129 /* 1130 * Vector mode transmit can be enabled only if not using scatter rx 1131 * or tx offloads. 1132 */ 1133 if (!eth_dev->data->scattered_rx && 1134 !eth_dev->data->dev_conf.txmode.offloads && 1135 !BNXT_TRUFLOW_EN(bp)) { 1136 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", 1137 eth_dev->data->port_id); 1138 return bnxt_xmit_pkts_vec; 1139 } 1140 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1141 eth_dev->data->port_id); 1142 PMD_DRV_LOG(INFO, 1143 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1144 eth_dev->data->port_id, 1145 eth_dev->data->scattered_rx, 1146 eth_dev->data->dev_conf.txmode.offloads); 1147 #endif 1148 #endif 1149 return bnxt_xmit_pkts; 1150 } 1151 1152 static int bnxt_handle_if_change_status(struct bnxt *bp) 1153 { 1154 int rc; 1155 1156 /* Since fw has undergone a reset and lost all contexts, 1157 * set fatal flag to not issue hwrm during cleanup 1158 */ 1159 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1160 bnxt_uninit_resources(bp, true); 1161 1162 /* clear fatal flag so that re-init happens */ 1163 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1164 rc = bnxt_init_resources(bp, true); 1165 1166 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1167 1168 return rc; 1169 } 1170 1171 static int32_t 1172 bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type, 1173 uint32_t *flow_id) 1174 { 1175 uint16_t port_id = bp->eth_dev->data->port_id; 1176 struct ulp_tlv_param param_list[] = { 1177 { 1178 .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID, 1179 .length = 2, 1180 .value = {(port_id >> 8) & 0xff, port_id & 0xff} 1181 }, 1182 { 1183 .type = BNXT_ULP_DF_PARAM_TYPE_LAST, 1184 .length = 0, 1185 .value = {0} 1186 } 1187 }; 1188 1189 return ulp_default_flow_create(bp->eth_dev, param_list, flow_type, 1190 flow_id); 1191 } 1192 1193 static int32_t 1194 bnxt_create_df_rules(struct bnxt *bp) 1195 { 1196 struct bnxt_ulp_data *cfg_data; 1197 int rc; 1198 1199 cfg_data = bp->ulp_ctx->cfg_data; 1200 rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS, 1201 &cfg_data->port_to_app_flow_id); 1202 if (rc) { 1203 PMD_DRV_LOG(ERR, 1204 "Failed to create port to app default rule\n"); 1205 return rc; 1206 } 1207 1208 BNXT_TF_DBG(DEBUG, "***** created port to app default rule ******\n"); 1209 rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT, 1210 &cfg_data->app_to_port_flow_id); 1211 if (!rc) { 1212 rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx, 1213 cfg_data->app_to_port_flow_id, 1214 &cfg_data->tx_cfa_action); 1215 if (rc) 1216 goto err; 1217 1218 BNXT_TF_DBG(DEBUG, 1219 "***** created app to port default rule *****\n"); 1220 return 0; 1221 } 1222 1223 err: 1224 BNXT_TF_DBG(DEBUG, "Failed to create app to port default rule\n"); 1225 return rc; 1226 } 1227 1228 static void 1229 bnxt_destroy_df_rules(struct bnxt *bp) 1230 { 1231 struct bnxt_ulp_data *cfg_data; 1232 1233 cfg_data = bp->ulp_ctx->cfg_data; 1234 ulp_default_flow_destroy(bp->eth_dev, cfg_data->port_to_app_flow_id); 1235 ulp_default_flow_destroy(bp->eth_dev, cfg_data->app_to_port_flow_id); 1236 } 1237 1238 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1239 { 1240 struct bnxt *bp = eth_dev->data->dev_private; 1241 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1242 int vlan_mask = 0; 1243 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1244 1245 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1246 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1247 return -EINVAL; 1248 } 1249 1250 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1251 PMD_DRV_LOG(ERR, 1252 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1253 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1254 } 1255 1256 do { 1257 rc = bnxt_hwrm_if_change(bp, true); 1258 if (rc == 0 || rc != -EAGAIN) 1259 break; 1260 1261 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1262 } while (retry_cnt--); 1263 1264 if (rc) 1265 return rc; 1266 1267 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1268 rc = bnxt_handle_if_change_status(bp); 1269 if (rc) 1270 return rc; 1271 } 1272 1273 bnxt_enable_int(bp); 1274 1275 rc = bnxt_init_chip(bp); 1276 if (rc) 1277 goto error; 1278 1279 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1280 eth_dev->data->dev_started = 1; 1281 1282 bnxt_link_update(eth_dev, 1, ETH_LINK_UP); 1283 1284 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1285 vlan_mask |= ETH_VLAN_FILTER_MASK; 1286 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1287 vlan_mask |= ETH_VLAN_STRIP_MASK; 1288 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1289 if (rc) 1290 goto error; 1291 1292 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1293 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1294 1295 pthread_mutex_lock(&bp->def_cp_lock); 1296 bnxt_schedule_fw_health_check(bp); 1297 pthread_mutex_unlock(&bp->def_cp_lock); 1298 1299 if (BNXT_TRUFLOW_EN(bp)) 1300 bnxt_ulp_init(bp); 1301 1302 return 0; 1303 1304 error: 1305 bnxt_shutdown_nic(bp); 1306 bnxt_free_tx_mbufs(bp); 1307 bnxt_free_rx_mbufs(bp); 1308 bnxt_hwrm_if_change(bp, false); 1309 eth_dev->data->dev_started = 0; 1310 return rc; 1311 } 1312 1313 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1314 { 1315 struct bnxt *bp = eth_dev->data->dev_private; 1316 int rc = 0; 1317 1318 if (!bp->link_info->link_up) 1319 rc = bnxt_set_hwrm_link_config(bp, true); 1320 if (!rc) 1321 eth_dev->data->dev_link.link_status = 1; 1322 1323 bnxt_print_link_info(eth_dev); 1324 return rc; 1325 } 1326 1327 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1328 { 1329 struct bnxt *bp = eth_dev->data->dev_private; 1330 1331 eth_dev->data->dev_link.link_status = 0; 1332 bnxt_set_hwrm_link_config(bp, false); 1333 bp->link_info->link_up = 0; 1334 1335 return 0; 1336 } 1337 1338 static void bnxt_free_switch_domain(struct bnxt *bp) 1339 { 1340 if (bp->switch_domain_id) 1341 rte_eth_switch_domain_free(bp->switch_domain_id); 1342 } 1343 1344 /* Unload the driver, release resources */ 1345 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1346 { 1347 struct bnxt *bp = eth_dev->data->dev_private; 1348 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1349 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1350 1351 eth_dev->data->dev_started = 0; 1352 /* Prevent crashes when queues are still in use */ 1353 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1354 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1355 1356 bnxt_disable_int(bp); 1357 1358 /* disable uio/vfio intr/eventfd mapping */ 1359 rte_intr_disable(intr_handle); 1360 1361 bnxt_cancel_fw_health_check(bp); 1362 1363 bnxt_dev_set_link_down_op(eth_dev); 1364 1365 /* Wait for link to be reset and the async notification to process. 1366 * During reset recovery, there is no need to wait and 1367 * VF/NPAR functions do not have privilege to change PHY config. 1368 */ 1369 if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp)) 1370 bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN); 1371 1372 /* Clean queue intr-vector mapping */ 1373 rte_intr_efd_disable(intr_handle); 1374 if (intr_handle->intr_vec != NULL) { 1375 rte_free(intr_handle->intr_vec); 1376 intr_handle->intr_vec = NULL; 1377 } 1378 1379 bnxt_hwrm_port_clr_stats(bp); 1380 bnxt_free_tx_mbufs(bp); 1381 bnxt_free_rx_mbufs(bp); 1382 /* Process any remaining notifications in default completion queue */ 1383 bnxt_int_handler(eth_dev); 1384 bnxt_shutdown_nic(bp); 1385 bnxt_hwrm_if_change(bp, false); 1386 1387 rte_free(bp->mark_table); 1388 bp->mark_table = NULL; 1389 1390 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1391 bp->rx_cosq_cnt = 0; 1392 /* All filters are deleted on a port stop. */ 1393 if (BNXT_FLOW_XSTATS_EN(bp)) 1394 bp->flow_stat->flow_count = 0; 1395 } 1396 1397 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1398 { 1399 struct bnxt *bp = eth_dev->data->dev_private; 1400 1401 /* cancel the recovery handler before remove dev */ 1402 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1403 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1404 bnxt_cancel_fc_thread(bp); 1405 1406 if (BNXT_TRUFLOW_EN(bp)) { 1407 if (bp->rep_info != NULL) 1408 bnxt_destroy_df_rules(bp); 1409 bnxt_ulp_deinit(bp); 1410 } 1411 1412 if (eth_dev->data->dev_started) 1413 bnxt_dev_stop_op(eth_dev); 1414 1415 bnxt_free_switch_domain(bp); 1416 1417 bnxt_uninit_resources(bp, false); 1418 1419 bnxt_free_leds_info(bp); 1420 bnxt_free_cos_queues(bp); 1421 bnxt_free_link_info(bp); 1422 bnxt_free_pf_info(bp); 1423 bnxt_free_parent_info(bp); 1424 1425 eth_dev->dev_ops = NULL; 1426 eth_dev->rx_pkt_burst = NULL; 1427 eth_dev->tx_pkt_burst = NULL; 1428 1429 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1430 bp->tx_mem_zone = NULL; 1431 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1432 bp->rx_mem_zone = NULL; 1433 1434 rte_free(bp->pf->vf_info); 1435 bp->pf->vf_info = NULL; 1436 1437 rte_free(bp->grp_info); 1438 bp->grp_info = NULL; 1439 } 1440 1441 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1442 uint32_t index) 1443 { 1444 struct bnxt *bp = eth_dev->data->dev_private; 1445 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1446 struct bnxt_vnic_info *vnic; 1447 struct bnxt_filter_info *filter, *temp_filter; 1448 uint32_t i; 1449 1450 if (is_bnxt_in_error(bp)) 1451 return; 1452 1453 /* 1454 * Loop through all VNICs from the specified filter flow pools to 1455 * remove the corresponding MAC addr filter 1456 */ 1457 for (i = 0; i < bp->nr_vnics; i++) { 1458 if (!(pool_mask & (1ULL << i))) 1459 continue; 1460 1461 vnic = &bp->vnic_info[i]; 1462 filter = STAILQ_FIRST(&vnic->filter); 1463 while (filter) { 1464 temp_filter = STAILQ_NEXT(filter, next); 1465 if (filter->mac_index == index) { 1466 STAILQ_REMOVE(&vnic->filter, filter, 1467 bnxt_filter_info, next); 1468 bnxt_hwrm_clear_l2_filter(bp, filter); 1469 bnxt_free_filter(bp, filter); 1470 } 1471 filter = temp_filter; 1472 } 1473 } 1474 } 1475 1476 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1477 struct rte_ether_addr *mac_addr, uint32_t index, 1478 uint32_t pool) 1479 { 1480 struct bnxt_filter_info *filter; 1481 int rc = 0; 1482 1483 /* Attach requested MAC address to the new l2_filter */ 1484 STAILQ_FOREACH(filter, &vnic->filter, next) { 1485 if (filter->mac_index == index) { 1486 PMD_DRV_LOG(DEBUG, 1487 "MAC addr already existed for pool %d\n", 1488 pool); 1489 return 0; 1490 } 1491 } 1492 1493 filter = bnxt_alloc_filter(bp); 1494 if (!filter) { 1495 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1496 return -ENODEV; 1497 } 1498 1499 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1500 * if the MAC that's been programmed now is a different one, then, 1501 * copy that addr to filter->l2_addr 1502 */ 1503 if (mac_addr) 1504 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1505 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1506 1507 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1508 if (!rc) { 1509 filter->mac_index = index; 1510 if (filter->mac_index == 0) 1511 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1512 else 1513 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1514 } else { 1515 bnxt_free_filter(bp, filter); 1516 } 1517 1518 return rc; 1519 } 1520 1521 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1522 struct rte_ether_addr *mac_addr, 1523 uint32_t index, uint32_t pool) 1524 { 1525 struct bnxt *bp = eth_dev->data->dev_private; 1526 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1527 int rc = 0; 1528 1529 rc = is_bnxt_in_error(bp); 1530 if (rc) 1531 return rc; 1532 1533 if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { 1534 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1535 return -ENOTSUP; 1536 } 1537 1538 if (!vnic) { 1539 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1540 return -EINVAL; 1541 } 1542 1543 /* Filter settings will get applied when port is started */ 1544 if (!eth_dev->data->dev_started) 1545 return 0; 1546 1547 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1548 1549 return rc; 1550 } 1551 1552 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete, 1553 bool exp_link_status) 1554 { 1555 int rc = 0; 1556 struct bnxt *bp = eth_dev->data->dev_private; 1557 struct rte_eth_link new; 1558 int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT : 1559 BNXT_LINK_DOWN_WAIT_CNT; 1560 1561 rc = is_bnxt_in_error(bp); 1562 if (rc) 1563 return rc; 1564 1565 memset(&new, 0, sizeof(new)); 1566 do { 1567 /* Retrieve link info from hardware */ 1568 rc = bnxt_get_hwrm_link_config(bp, &new); 1569 if (rc) { 1570 new.link_speed = ETH_LINK_SPEED_100M; 1571 new.link_duplex = ETH_LINK_FULL_DUPLEX; 1572 PMD_DRV_LOG(ERR, 1573 "Failed to retrieve link rc = 0x%x!\n", rc); 1574 goto out; 1575 } 1576 1577 if (!wait_to_complete || new.link_status == exp_link_status) 1578 break; 1579 1580 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1581 } while (cnt--); 1582 1583 out: 1584 /* Timed out or success */ 1585 if (new.link_status != eth_dev->data->dev_link.link_status || 1586 new.link_speed != eth_dev->data->dev_link.link_speed) { 1587 rte_eth_linkstatus_set(eth_dev, &new); 1588 1589 _rte_eth_dev_callback_process(eth_dev, 1590 RTE_ETH_EVENT_INTR_LSC, 1591 NULL); 1592 1593 bnxt_print_link_info(eth_dev); 1594 } 1595 1596 return rc; 1597 } 1598 1599 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, 1600 int wait_to_complete) 1601 { 1602 return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP); 1603 } 1604 1605 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1606 { 1607 struct bnxt *bp = eth_dev->data->dev_private; 1608 struct bnxt_vnic_info *vnic; 1609 uint32_t old_flags; 1610 int rc; 1611 1612 rc = is_bnxt_in_error(bp); 1613 if (rc) 1614 return rc; 1615 1616 /* Filter settings will get applied when port is started */ 1617 if (!eth_dev->data->dev_started) 1618 return 0; 1619 1620 if (bp->vnic_info == NULL) 1621 return 0; 1622 1623 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1624 1625 old_flags = vnic->flags; 1626 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1627 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1628 if (rc != 0) 1629 vnic->flags = old_flags; 1630 1631 return rc; 1632 } 1633 1634 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1635 { 1636 struct bnxt *bp = eth_dev->data->dev_private; 1637 struct bnxt_vnic_info *vnic; 1638 uint32_t old_flags; 1639 int rc; 1640 1641 rc = is_bnxt_in_error(bp); 1642 if (rc) 1643 return rc; 1644 1645 /* Filter settings will get applied when port is started */ 1646 if (!eth_dev->data->dev_started) 1647 return 0; 1648 1649 if (bp->vnic_info == NULL) 1650 return 0; 1651 1652 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1653 1654 old_flags = vnic->flags; 1655 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1656 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1657 if (rc != 0) 1658 vnic->flags = old_flags; 1659 1660 if (BNXT_TRUFLOW_EN(bp) && bp->rep_info != NULL) 1661 bnxt_create_df_rules(bp); 1662 1663 return rc; 1664 } 1665 1666 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1667 { 1668 struct bnxt *bp = eth_dev->data->dev_private; 1669 struct bnxt_vnic_info *vnic; 1670 uint32_t old_flags; 1671 int rc; 1672 1673 rc = is_bnxt_in_error(bp); 1674 if (rc) 1675 return rc; 1676 1677 /* Filter settings will get applied when port is started */ 1678 if (!eth_dev->data->dev_started) 1679 return 0; 1680 1681 if (bp->vnic_info == NULL) 1682 return 0; 1683 1684 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1685 1686 old_flags = vnic->flags; 1687 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1688 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1689 if (rc != 0) 1690 vnic->flags = old_flags; 1691 1692 return rc; 1693 } 1694 1695 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1696 { 1697 struct bnxt *bp = eth_dev->data->dev_private; 1698 struct bnxt_vnic_info *vnic; 1699 uint32_t old_flags; 1700 int rc; 1701 1702 rc = is_bnxt_in_error(bp); 1703 if (rc) 1704 return rc; 1705 1706 /* Filter settings will get applied when port is started */ 1707 if (!eth_dev->data->dev_started) 1708 return 0; 1709 1710 if (bp->vnic_info == NULL) 1711 return 0; 1712 1713 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1714 1715 old_flags = vnic->flags; 1716 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1717 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1718 if (rc != 0) 1719 vnic->flags = old_flags; 1720 1721 return rc; 1722 } 1723 1724 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1725 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1726 { 1727 if (qid >= bp->rx_nr_rings) 1728 return NULL; 1729 1730 return bp->eth_dev->data->rx_queues[qid]; 1731 } 1732 1733 /* Return rxq corresponding to a given rss table ring/group ID. */ 1734 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1735 { 1736 struct bnxt_rx_queue *rxq; 1737 unsigned int i; 1738 1739 if (!BNXT_HAS_RING_GRPS(bp)) { 1740 for (i = 0; i < bp->rx_nr_rings; i++) { 1741 rxq = bp->eth_dev->data->rx_queues[i]; 1742 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1743 return rxq->index; 1744 } 1745 } else { 1746 for (i = 0; i < bp->rx_nr_rings; i++) { 1747 if (bp->grp_info[i].fw_grp_id == fwr) 1748 return i; 1749 } 1750 } 1751 1752 return INVALID_HW_RING_ID; 1753 } 1754 1755 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1756 struct rte_eth_rss_reta_entry64 *reta_conf, 1757 uint16_t reta_size) 1758 { 1759 struct bnxt *bp = eth_dev->data->dev_private; 1760 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1761 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1762 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1763 uint16_t idx, sft; 1764 int i, rc; 1765 1766 rc = is_bnxt_in_error(bp); 1767 if (rc) 1768 return rc; 1769 1770 if (!vnic->rss_table) 1771 return -EINVAL; 1772 1773 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 1774 return -EINVAL; 1775 1776 if (reta_size != tbl_size) { 1777 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1778 "(%d) must equal the size supported by the hardware " 1779 "(%d)\n", reta_size, tbl_size); 1780 return -EINVAL; 1781 } 1782 1783 for (i = 0; i < reta_size; i++) { 1784 struct bnxt_rx_queue *rxq; 1785 1786 idx = i / RTE_RETA_GROUP_SIZE; 1787 sft = i % RTE_RETA_GROUP_SIZE; 1788 1789 if (!(reta_conf[idx].mask & (1ULL << sft))) 1790 continue; 1791 1792 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 1793 if (!rxq) { 1794 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 1795 return -EINVAL; 1796 } 1797 1798 if (BNXT_CHIP_THOR(bp)) { 1799 vnic->rss_table[i * 2] = 1800 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1801 vnic->rss_table[i * 2 + 1] = 1802 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1803 } else { 1804 vnic->rss_table[i] = 1805 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1806 } 1807 } 1808 1809 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1810 return 0; 1811 } 1812 1813 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 1814 struct rte_eth_rss_reta_entry64 *reta_conf, 1815 uint16_t reta_size) 1816 { 1817 struct bnxt *bp = eth_dev->data->dev_private; 1818 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1819 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1820 uint16_t idx, sft, i; 1821 int rc; 1822 1823 rc = is_bnxt_in_error(bp); 1824 if (rc) 1825 return rc; 1826 1827 /* Retrieve from the default VNIC */ 1828 if (!vnic) 1829 return -EINVAL; 1830 if (!vnic->rss_table) 1831 return -EINVAL; 1832 1833 if (reta_size != tbl_size) { 1834 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1835 "(%d) must equal the size supported by the hardware " 1836 "(%d)\n", reta_size, tbl_size); 1837 return -EINVAL; 1838 } 1839 1840 for (idx = 0, i = 0; i < reta_size; i++) { 1841 idx = i / RTE_RETA_GROUP_SIZE; 1842 sft = i % RTE_RETA_GROUP_SIZE; 1843 1844 if (reta_conf[idx].mask & (1ULL << sft)) { 1845 uint16_t qid; 1846 1847 if (BNXT_CHIP_THOR(bp)) 1848 qid = bnxt_rss_to_qid(bp, 1849 vnic->rss_table[i * 2]); 1850 else 1851 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 1852 1853 if (qid == INVALID_HW_RING_ID) { 1854 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 1855 return -EINVAL; 1856 } 1857 reta_conf[idx].reta[sft] = qid; 1858 } 1859 } 1860 1861 return 0; 1862 } 1863 1864 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 1865 struct rte_eth_rss_conf *rss_conf) 1866 { 1867 struct bnxt *bp = eth_dev->data->dev_private; 1868 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1869 struct bnxt_vnic_info *vnic; 1870 int rc; 1871 1872 rc = is_bnxt_in_error(bp); 1873 if (rc) 1874 return rc; 1875 1876 /* 1877 * If RSS enablement were different than dev_configure, 1878 * then return -EINVAL 1879 */ 1880 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 1881 if (!rss_conf->rss_hf) 1882 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 1883 } else { 1884 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 1885 return -EINVAL; 1886 } 1887 1888 bp->flags |= BNXT_FLAG_UPDATE_HASH; 1889 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 1890 rss_conf, 1891 sizeof(*rss_conf)); 1892 1893 /* Update the default RSS VNIC(s) */ 1894 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1895 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 1896 1897 /* 1898 * If hashkey is not specified, use the previously configured 1899 * hashkey 1900 */ 1901 if (!rss_conf->rss_key) 1902 goto rss_config; 1903 1904 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 1905 PMD_DRV_LOG(ERR, 1906 "Invalid hashkey length, should be 16 bytes\n"); 1907 return -EINVAL; 1908 } 1909 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 1910 1911 rss_config: 1912 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1913 return 0; 1914 } 1915 1916 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 1917 struct rte_eth_rss_conf *rss_conf) 1918 { 1919 struct bnxt *bp = eth_dev->data->dev_private; 1920 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1921 int len, rc; 1922 uint32_t hash_types; 1923 1924 rc = is_bnxt_in_error(bp); 1925 if (rc) 1926 return rc; 1927 1928 /* RSS configuration is the same for all VNICs */ 1929 if (vnic && vnic->rss_hash_key) { 1930 if (rss_conf->rss_key) { 1931 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 1932 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 1933 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 1934 } 1935 1936 hash_types = vnic->hash_type; 1937 rss_conf->rss_hf = 0; 1938 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 1939 rss_conf->rss_hf |= ETH_RSS_IPV4; 1940 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1941 } 1942 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 1943 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1944 hash_types &= 1945 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1946 } 1947 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 1948 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 1949 hash_types &= 1950 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1951 } 1952 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 1953 rss_conf->rss_hf |= ETH_RSS_IPV6; 1954 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1955 } 1956 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 1957 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 1958 hash_types &= 1959 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1960 } 1961 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 1962 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 1963 hash_types &= 1964 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1965 } 1966 if (hash_types) { 1967 PMD_DRV_LOG(ERR, 1968 "Unknown RSS config from firmware (%08x), RSS disabled", 1969 vnic->hash_type); 1970 return -ENOTSUP; 1971 } 1972 } else { 1973 rss_conf->rss_hf = 0; 1974 } 1975 return 0; 1976 } 1977 1978 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 1979 struct rte_eth_fc_conf *fc_conf) 1980 { 1981 struct bnxt *bp = dev->data->dev_private; 1982 struct rte_eth_link link_info; 1983 int rc; 1984 1985 rc = is_bnxt_in_error(bp); 1986 if (rc) 1987 return rc; 1988 1989 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1990 if (rc) 1991 return rc; 1992 1993 memset(fc_conf, 0, sizeof(*fc_conf)); 1994 if (bp->link_info->auto_pause) 1995 fc_conf->autoneg = 1; 1996 switch (bp->link_info->pause) { 1997 case 0: 1998 fc_conf->mode = RTE_FC_NONE; 1999 break; 2000 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2001 fc_conf->mode = RTE_FC_TX_PAUSE; 2002 break; 2003 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2004 fc_conf->mode = RTE_FC_RX_PAUSE; 2005 break; 2006 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2007 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2008 fc_conf->mode = RTE_FC_FULL; 2009 break; 2010 } 2011 return 0; 2012 } 2013 2014 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2015 struct rte_eth_fc_conf *fc_conf) 2016 { 2017 struct bnxt *bp = dev->data->dev_private; 2018 int rc; 2019 2020 rc = is_bnxt_in_error(bp); 2021 if (rc) 2022 return rc; 2023 2024 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2025 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); 2026 return -ENOTSUP; 2027 } 2028 2029 switch (fc_conf->mode) { 2030 case RTE_FC_NONE: 2031 bp->link_info->auto_pause = 0; 2032 bp->link_info->force_pause = 0; 2033 break; 2034 case RTE_FC_RX_PAUSE: 2035 if (fc_conf->autoneg) { 2036 bp->link_info->auto_pause = 2037 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2038 bp->link_info->force_pause = 0; 2039 } else { 2040 bp->link_info->auto_pause = 0; 2041 bp->link_info->force_pause = 2042 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2043 } 2044 break; 2045 case RTE_FC_TX_PAUSE: 2046 if (fc_conf->autoneg) { 2047 bp->link_info->auto_pause = 2048 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2049 bp->link_info->force_pause = 0; 2050 } else { 2051 bp->link_info->auto_pause = 0; 2052 bp->link_info->force_pause = 2053 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2054 } 2055 break; 2056 case RTE_FC_FULL: 2057 if (fc_conf->autoneg) { 2058 bp->link_info->auto_pause = 2059 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2060 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2061 bp->link_info->force_pause = 0; 2062 } else { 2063 bp->link_info->auto_pause = 0; 2064 bp->link_info->force_pause = 2065 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2066 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2067 } 2068 break; 2069 } 2070 return bnxt_set_hwrm_link_config(bp, true); 2071 } 2072 2073 /* Add UDP tunneling port */ 2074 static int 2075 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2076 struct rte_eth_udp_tunnel *udp_tunnel) 2077 { 2078 struct bnxt *bp = eth_dev->data->dev_private; 2079 uint16_t tunnel_type = 0; 2080 int rc = 0; 2081 2082 rc = is_bnxt_in_error(bp); 2083 if (rc) 2084 return rc; 2085 2086 switch (udp_tunnel->prot_type) { 2087 case RTE_TUNNEL_TYPE_VXLAN: 2088 if (bp->vxlan_port_cnt) { 2089 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2090 udp_tunnel->udp_port); 2091 if (bp->vxlan_port != udp_tunnel->udp_port) { 2092 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2093 return -ENOSPC; 2094 } 2095 bp->vxlan_port_cnt++; 2096 return 0; 2097 } 2098 tunnel_type = 2099 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2100 bp->vxlan_port_cnt++; 2101 break; 2102 case RTE_TUNNEL_TYPE_GENEVE: 2103 if (bp->geneve_port_cnt) { 2104 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2105 udp_tunnel->udp_port); 2106 if (bp->geneve_port != udp_tunnel->udp_port) { 2107 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2108 return -ENOSPC; 2109 } 2110 bp->geneve_port_cnt++; 2111 return 0; 2112 } 2113 tunnel_type = 2114 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2115 bp->geneve_port_cnt++; 2116 break; 2117 default: 2118 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2119 return -ENOTSUP; 2120 } 2121 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2122 tunnel_type); 2123 return rc; 2124 } 2125 2126 static int 2127 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2128 struct rte_eth_udp_tunnel *udp_tunnel) 2129 { 2130 struct bnxt *bp = eth_dev->data->dev_private; 2131 uint16_t tunnel_type = 0; 2132 uint16_t port = 0; 2133 int rc = 0; 2134 2135 rc = is_bnxt_in_error(bp); 2136 if (rc) 2137 return rc; 2138 2139 switch (udp_tunnel->prot_type) { 2140 case RTE_TUNNEL_TYPE_VXLAN: 2141 if (!bp->vxlan_port_cnt) { 2142 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2143 return -EINVAL; 2144 } 2145 if (bp->vxlan_port != udp_tunnel->udp_port) { 2146 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2147 udp_tunnel->udp_port, bp->vxlan_port); 2148 return -EINVAL; 2149 } 2150 if (--bp->vxlan_port_cnt) 2151 return 0; 2152 2153 tunnel_type = 2154 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2155 port = bp->vxlan_fw_dst_port_id; 2156 break; 2157 case RTE_TUNNEL_TYPE_GENEVE: 2158 if (!bp->geneve_port_cnt) { 2159 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2160 return -EINVAL; 2161 } 2162 if (bp->geneve_port != udp_tunnel->udp_port) { 2163 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2164 udp_tunnel->udp_port, bp->geneve_port); 2165 return -EINVAL; 2166 } 2167 if (--bp->geneve_port_cnt) 2168 return 0; 2169 2170 tunnel_type = 2171 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2172 port = bp->geneve_fw_dst_port_id; 2173 break; 2174 default: 2175 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2176 return -ENOTSUP; 2177 } 2178 2179 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2180 if (!rc) { 2181 if (tunnel_type == 2182 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 2183 bp->vxlan_port = 0; 2184 if (tunnel_type == 2185 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 2186 bp->geneve_port = 0; 2187 } 2188 return rc; 2189 } 2190 2191 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2192 { 2193 struct bnxt_filter_info *filter; 2194 struct bnxt_vnic_info *vnic; 2195 int rc = 0; 2196 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2197 2198 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2199 filter = STAILQ_FIRST(&vnic->filter); 2200 while (filter) { 2201 /* Search for this matching MAC+VLAN filter */ 2202 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2203 /* Delete the filter */ 2204 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2205 if (rc) 2206 return rc; 2207 STAILQ_REMOVE(&vnic->filter, filter, 2208 bnxt_filter_info, next); 2209 bnxt_free_filter(bp, filter); 2210 PMD_DRV_LOG(INFO, 2211 "Deleted vlan filter for %d\n", 2212 vlan_id); 2213 return 0; 2214 } 2215 filter = STAILQ_NEXT(filter, next); 2216 } 2217 return -ENOENT; 2218 } 2219 2220 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2221 { 2222 struct bnxt_filter_info *filter; 2223 struct bnxt_vnic_info *vnic; 2224 int rc = 0; 2225 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2226 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2227 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2228 2229 /* Implementation notes on the use of VNIC in this command: 2230 * 2231 * By default, these filters belong to default vnic for the function. 2232 * Once these filters are set up, only destination VNIC can be modified. 2233 * If the destination VNIC is not specified in this command, 2234 * then the HWRM shall only create an l2 context id. 2235 */ 2236 2237 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2238 filter = STAILQ_FIRST(&vnic->filter); 2239 /* Check if the VLAN has already been added */ 2240 while (filter) { 2241 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2242 return -EEXIST; 2243 2244 filter = STAILQ_NEXT(filter, next); 2245 } 2246 2247 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2248 * command to create MAC+VLAN filter with the right flags, enables set. 2249 */ 2250 filter = bnxt_alloc_filter(bp); 2251 if (!filter) { 2252 PMD_DRV_LOG(ERR, 2253 "MAC/VLAN filter alloc failed\n"); 2254 return -ENOMEM; 2255 } 2256 /* MAC + VLAN ID filter */ 2257 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2258 * untagged packets are received 2259 * 2260 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2261 * packets and only the programmed vlan's packets are received 2262 */ 2263 filter->l2_ivlan = vlan_id; 2264 filter->l2_ivlan_mask = 0x0FFF; 2265 filter->enables |= en; 2266 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2267 2268 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2269 if (rc) { 2270 /* Free the newly allocated filter as we were 2271 * not able to create the filter in hardware. 2272 */ 2273 bnxt_free_filter(bp, filter); 2274 return rc; 2275 } 2276 2277 filter->mac_index = 0; 2278 /* Add this new filter to the list */ 2279 if (vlan_id == 0) 2280 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2281 else 2282 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2283 2284 PMD_DRV_LOG(INFO, 2285 "Added Vlan filter for %d\n", vlan_id); 2286 return rc; 2287 } 2288 2289 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2290 uint16_t vlan_id, int on) 2291 { 2292 struct bnxt *bp = eth_dev->data->dev_private; 2293 int rc; 2294 2295 rc = is_bnxt_in_error(bp); 2296 if (rc) 2297 return rc; 2298 2299 if (!eth_dev->data->dev_started) { 2300 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2301 return -EINVAL; 2302 } 2303 2304 /* These operations apply to ALL existing MAC/VLAN filters */ 2305 if (on) 2306 return bnxt_add_vlan_filter(bp, vlan_id); 2307 else 2308 return bnxt_del_vlan_filter(bp, vlan_id); 2309 } 2310 2311 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2312 struct bnxt_vnic_info *vnic) 2313 { 2314 struct bnxt_filter_info *filter; 2315 int rc; 2316 2317 filter = STAILQ_FIRST(&vnic->filter); 2318 while (filter) { 2319 if (filter->mac_index == 0 && 2320 !memcmp(filter->l2_addr, bp->mac_addr, 2321 RTE_ETHER_ADDR_LEN)) { 2322 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2323 if (!rc) { 2324 STAILQ_REMOVE(&vnic->filter, filter, 2325 bnxt_filter_info, next); 2326 bnxt_free_filter(bp, filter); 2327 } 2328 return rc; 2329 } 2330 filter = STAILQ_NEXT(filter, next); 2331 } 2332 return 0; 2333 } 2334 2335 static int 2336 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2337 { 2338 struct bnxt_vnic_info *vnic; 2339 unsigned int i; 2340 int rc; 2341 2342 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2343 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 2344 /* Remove any VLAN filters programmed */ 2345 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2346 bnxt_del_vlan_filter(bp, i); 2347 2348 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2349 if (rc) 2350 return rc; 2351 } else { 2352 /* Default filter will allow packets that match the 2353 * dest mac. So, it has to be deleted, otherwise, we 2354 * will endup receiving vlan packets for which the 2355 * filter is not programmed, when hw-vlan-filter 2356 * configuration is ON 2357 */ 2358 bnxt_del_dflt_mac_filter(bp, vnic); 2359 /* This filter will allow only untagged packets */ 2360 bnxt_add_vlan_filter(bp, 0); 2361 } 2362 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2363 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 2364 2365 return 0; 2366 } 2367 2368 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2369 { 2370 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2371 unsigned int i; 2372 int rc; 2373 2374 /* Destroy vnic filters and vnic */ 2375 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2376 DEV_RX_OFFLOAD_VLAN_FILTER) { 2377 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2378 bnxt_del_vlan_filter(bp, i); 2379 } 2380 bnxt_del_dflt_mac_filter(bp, vnic); 2381 2382 rc = bnxt_hwrm_vnic_free(bp, vnic); 2383 if (rc) 2384 return rc; 2385 2386 rte_free(vnic->fw_grp_ids); 2387 vnic->fw_grp_ids = NULL; 2388 2389 vnic->rx_queue_cnt = 0; 2390 2391 return 0; 2392 } 2393 2394 static int 2395 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2396 { 2397 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2398 int rc; 2399 2400 /* Destroy, recreate and reconfigure the default vnic */ 2401 rc = bnxt_free_one_vnic(bp, 0); 2402 if (rc) 2403 return rc; 2404 2405 /* default vnic 0 */ 2406 rc = bnxt_setup_one_vnic(bp, 0); 2407 if (rc) 2408 return rc; 2409 2410 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2411 DEV_RX_OFFLOAD_VLAN_FILTER) { 2412 rc = bnxt_add_vlan_filter(bp, 0); 2413 if (rc) 2414 return rc; 2415 rc = bnxt_restore_vlan_filters(bp); 2416 if (rc) 2417 return rc; 2418 } else { 2419 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2420 if (rc) 2421 return rc; 2422 } 2423 2424 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2425 if (rc) 2426 return rc; 2427 2428 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2429 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 2430 2431 return rc; 2432 } 2433 2434 static int 2435 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2436 { 2437 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2438 struct bnxt *bp = dev->data->dev_private; 2439 int rc; 2440 2441 rc = is_bnxt_in_error(bp); 2442 if (rc) 2443 return rc; 2444 2445 /* Filter settings will get applied when port is started */ 2446 if (!dev->data->dev_started) 2447 return 0; 2448 2449 if (mask & ETH_VLAN_FILTER_MASK) { 2450 /* Enable or disable VLAN filtering */ 2451 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2452 if (rc) 2453 return rc; 2454 } 2455 2456 if (mask & ETH_VLAN_STRIP_MASK) { 2457 /* Enable or disable VLAN stripping */ 2458 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2459 if (rc) 2460 return rc; 2461 } 2462 2463 if (mask & ETH_VLAN_EXTEND_MASK) { 2464 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2465 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2466 else 2467 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2468 } 2469 2470 return 0; 2471 } 2472 2473 static int 2474 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2475 uint16_t tpid) 2476 { 2477 struct bnxt *bp = dev->data->dev_private; 2478 int qinq = dev->data->dev_conf.rxmode.offloads & 2479 DEV_RX_OFFLOAD_VLAN_EXTEND; 2480 2481 if (vlan_type != ETH_VLAN_TYPE_INNER && 2482 vlan_type != ETH_VLAN_TYPE_OUTER) { 2483 PMD_DRV_LOG(ERR, 2484 "Unsupported vlan type."); 2485 return -EINVAL; 2486 } 2487 if (!qinq) { 2488 PMD_DRV_LOG(ERR, 2489 "QinQ not enabled. Needs to be ON as we can " 2490 "accelerate only outer vlan\n"); 2491 return -EINVAL; 2492 } 2493 2494 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2495 switch (tpid) { 2496 case RTE_ETHER_TYPE_QINQ: 2497 bp->outer_tpid_bd = 2498 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2499 break; 2500 case RTE_ETHER_TYPE_VLAN: 2501 bp->outer_tpid_bd = 2502 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2503 break; 2504 case RTE_ETHER_TYPE_QINQ1: 2505 bp->outer_tpid_bd = 2506 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2507 break; 2508 case RTE_ETHER_TYPE_QINQ2: 2509 bp->outer_tpid_bd = 2510 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2511 break; 2512 case RTE_ETHER_TYPE_QINQ3: 2513 bp->outer_tpid_bd = 2514 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2515 break; 2516 default: 2517 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2518 return -EINVAL; 2519 } 2520 bp->outer_tpid_bd |= tpid; 2521 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2522 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 2523 PMD_DRV_LOG(ERR, 2524 "Can accelerate only outer vlan in QinQ\n"); 2525 return -EINVAL; 2526 } 2527 2528 return 0; 2529 } 2530 2531 static int 2532 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2533 struct rte_ether_addr *addr) 2534 { 2535 struct bnxt *bp = dev->data->dev_private; 2536 /* Default Filter is tied to VNIC 0 */ 2537 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2538 int rc; 2539 2540 rc = is_bnxt_in_error(bp); 2541 if (rc) 2542 return rc; 2543 2544 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2545 return -EPERM; 2546 2547 if (rte_is_zero_ether_addr(addr)) 2548 return -EINVAL; 2549 2550 /* Filter settings will get applied when port is started */ 2551 if (!dev->data->dev_started) 2552 return 0; 2553 2554 /* Check if the requested MAC is already added */ 2555 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2556 return 0; 2557 2558 /* Destroy filter and re-create it */ 2559 bnxt_del_dflt_mac_filter(bp, vnic); 2560 2561 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2562 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 2563 /* This filter will allow only untagged packets */ 2564 rc = bnxt_add_vlan_filter(bp, 0); 2565 } else { 2566 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2567 } 2568 2569 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2570 return rc; 2571 } 2572 2573 static int 2574 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2575 struct rte_ether_addr *mc_addr_set, 2576 uint32_t nb_mc_addr) 2577 { 2578 struct bnxt *bp = eth_dev->data->dev_private; 2579 char *mc_addr_list = (char *)mc_addr_set; 2580 struct bnxt_vnic_info *vnic; 2581 uint32_t off = 0, i = 0; 2582 int rc; 2583 2584 rc = is_bnxt_in_error(bp); 2585 if (rc) 2586 return rc; 2587 2588 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2589 2590 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2591 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2592 goto allmulti; 2593 } 2594 2595 /* TODO Check for Duplicate mcast addresses */ 2596 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2597 for (i = 0; i < nb_mc_addr; i++) { 2598 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2599 RTE_ETHER_ADDR_LEN); 2600 off += RTE_ETHER_ADDR_LEN; 2601 } 2602 2603 vnic->mc_addr_cnt = i; 2604 if (vnic->mc_addr_cnt) 2605 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2606 else 2607 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2608 2609 allmulti: 2610 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2611 } 2612 2613 static int 2614 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2615 { 2616 struct bnxt *bp = dev->data->dev_private; 2617 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2618 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2619 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2620 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2621 int ret; 2622 2623 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2624 fw_major, fw_minor, fw_updt, fw_rsvd); 2625 2626 ret += 1; /* add the size of '\0' */ 2627 if (fw_size < (uint32_t)ret) 2628 return ret; 2629 else 2630 return 0; 2631 } 2632 2633 static void 2634 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2635 struct rte_eth_rxq_info *qinfo) 2636 { 2637 struct bnxt *bp = dev->data->dev_private; 2638 struct bnxt_rx_queue *rxq; 2639 2640 if (is_bnxt_in_error(bp)) 2641 return; 2642 2643 rxq = dev->data->rx_queues[queue_id]; 2644 2645 qinfo->mp = rxq->mb_pool; 2646 qinfo->scattered_rx = dev->data->scattered_rx; 2647 qinfo->nb_desc = rxq->nb_rx_desc; 2648 2649 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2650 qinfo->conf.rx_drop_en = 0; 2651 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2652 } 2653 2654 static void 2655 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2656 struct rte_eth_txq_info *qinfo) 2657 { 2658 struct bnxt *bp = dev->data->dev_private; 2659 struct bnxt_tx_queue *txq; 2660 2661 if (is_bnxt_in_error(bp)) 2662 return; 2663 2664 txq = dev->data->tx_queues[queue_id]; 2665 2666 qinfo->nb_desc = txq->nb_tx_desc; 2667 2668 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2669 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2670 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2671 2672 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2673 qinfo->conf.tx_rs_thresh = 0; 2674 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2675 } 2676 2677 static int 2678 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2679 struct rte_eth_burst_mode *mode) 2680 { 2681 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2682 2683 if (pkt_burst == bnxt_recv_pkts) { 2684 snprintf(mode->info, sizeof(mode->info), "%s", 2685 "Scalar"); 2686 return 0; 2687 } 2688 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 2689 if (pkt_burst == bnxt_recv_pkts_vec) { 2690 snprintf(mode->info, sizeof(mode->info), "%s", 2691 "Vector SSE"); 2692 return 0; 2693 } 2694 #endif 2695 2696 return -EINVAL; 2697 } 2698 2699 static int 2700 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2701 struct rte_eth_burst_mode *mode) 2702 { 2703 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 2704 2705 if (pkt_burst == bnxt_xmit_pkts) { 2706 snprintf(mode->info, sizeof(mode->info), "%s", 2707 "Scalar"); 2708 return 0; 2709 } 2710 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 2711 if (pkt_burst == bnxt_xmit_pkts_vec) { 2712 snprintf(mode->info, sizeof(mode->info), "%s", 2713 "Vector SSE"); 2714 return 0; 2715 } 2716 #endif 2717 2718 return -EINVAL; 2719 } 2720 2721 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 2722 { 2723 struct bnxt *bp = eth_dev->data->dev_private; 2724 uint32_t new_pkt_size; 2725 uint32_t rc = 0; 2726 uint32_t i; 2727 2728 rc = is_bnxt_in_error(bp); 2729 if (rc) 2730 return rc; 2731 2732 /* Exit if receive queues are not configured yet */ 2733 if (!eth_dev->data->nb_rx_queues) 2734 return rc; 2735 2736 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 2737 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 2738 2739 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 2740 /* 2741 * If vector-mode tx/rx is active, disallow any MTU change that would 2742 * require scattered receive support. 2743 */ 2744 if (eth_dev->data->dev_started && 2745 (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec || 2746 eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) && 2747 (new_pkt_size > 2748 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 2749 PMD_DRV_LOG(ERR, 2750 "MTU change would require scattered rx support. "); 2751 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 2752 return -EINVAL; 2753 } 2754 #endif 2755 2756 if (new_mtu > RTE_ETHER_MTU) { 2757 bp->flags |= BNXT_FLAG_JUMBO; 2758 bp->eth_dev->data->dev_conf.rxmode.offloads |= 2759 DEV_RX_OFFLOAD_JUMBO_FRAME; 2760 } else { 2761 bp->eth_dev->data->dev_conf.rxmode.offloads &= 2762 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2763 bp->flags &= ~BNXT_FLAG_JUMBO; 2764 } 2765 2766 /* Is there a change in mtu setting? */ 2767 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) 2768 return rc; 2769 2770 for (i = 0; i < bp->nr_vnics; i++) { 2771 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2772 uint16_t size = 0; 2773 2774 vnic->mru = BNXT_VNIC_MRU(new_mtu); 2775 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 2776 if (rc) 2777 break; 2778 2779 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 2780 size -= RTE_PKTMBUF_HEADROOM; 2781 2782 if (size < new_mtu) { 2783 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 2784 if (rc) 2785 return rc; 2786 } 2787 } 2788 2789 if (!rc) 2790 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 2791 2792 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 2793 2794 return rc; 2795 } 2796 2797 static int 2798 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 2799 { 2800 struct bnxt *bp = dev->data->dev_private; 2801 uint16_t vlan = bp->vlan; 2802 int rc; 2803 2804 rc = is_bnxt_in_error(bp); 2805 if (rc) 2806 return rc; 2807 2808 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2809 PMD_DRV_LOG(ERR, 2810 "PVID cannot be modified for this function\n"); 2811 return -ENOTSUP; 2812 } 2813 bp->vlan = on ? pvid : 0; 2814 2815 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 2816 if (rc) 2817 bp->vlan = vlan; 2818 return rc; 2819 } 2820 2821 static int 2822 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 2823 { 2824 struct bnxt *bp = dev->data->dev_private; 2825 int rc; 2826 2827 rc = is_bnxt_in_error(bp); 2828 if (rc) 2829 return rc; 2830 2831 return bnxt_hwrm_port_led_cfg(bp, true); 2832 } 2833 2834 static int 2835 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 2836 { 2837 struct bnxt *bp = dev->data->dev_private; 2838 int rc; 2839 2840 rc = is_bnxt_in_error(bp); 2841 if (rc) 2842 return rc; 2843 2844 return bnxt_hwrm_port_led_cfg(bp, false); 2845 } 2846 2847 static uint32_t 2848 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 2849 { 2850 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2851 uint32_t desc = 0, raw_cons = 0, cons; 2852 struct bnxt_cp_ring_info *cpr; 2853 struct bnxt_rx_queue *rxq; 2854 struct rx_pkt_cmpl *rxcmp; 2855 int rc; 2856 2857 rc = is_bnxt_in_error(bp); 2858 if (rc) 2859 return rc; 2860 2861 rxq = dev->data->rx_queues[rx_queue_id]; 2862 cpr = rxq->cp_ring; 2863 raw_cons = cpr->cp_raw_cons; 2864 2865 while (1) { 2866 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 2867 rte_prefetch0(&cpr->cp_desc_ring[cons]); 2868 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2869 2870 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) { 2871 break; 2872 } else { 2873 raw_cons++; 2874 desc++; 2875 } 2876 } 2877 2878 return desc; 2879 } 2880 2881 static int 2882 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 2883 { 2884 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 2885 struct bnxt_rx_ring_info *rxr; 2886 struct bnxt_cp_ring_info *cpr; 2887 struct bnxt_sw_rx_bd *rx_buf; 2888 struct rx_pkt_cmpl *rxcmp; 2889 uint32_t cons, cp_cons; 2890 int rc; 2891 2892 if (!rxq) 2893 return -EINVAL; 2894 2895 rc = is_bnxt_in_error(rxq->bp); 2896 if (rc) 2897 return rc; 2898 2899 cpr = rxq->cp_ring; 2900 rxr = rxq->rx_ring; 2901 2902 if (offset >= rxq->nb_rx_desc) 2903 return -EINVAL; 2904 2905 cons = RING_CMP(cpr->cp_ring_struct, offset); 2906 cp_cons = cpr->cp_raw_cons; 2907 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2908 2909 if (cons > cp_cons) { 2910 if (CMPL_VALID(rxcmp, cpr->valid)) 2911 return RTE_ETH_RX_DESC_DONE; 2912 } else { 2913 if (CMPL_VALID(rxcmp, !cpr->valid)) 2914 return RTE_ETH_RX_DESC_DONE; 2915 } 2916 rx_buf = &rxr->rx_buf_ring[cons]; 2917 if (rx_buf->mbuf == NULL) 2918 return RTE_ETH_RX_DESC_UNAVAIL; 2919 2920 2921 return RTE_ETH_RX_DESC_AVAIL; 2922 } 2923 2924 static int 2925 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 2926 { 2927 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 2928 struct bnxt_tx_ring_info *txr; 2929 struct bnxt_cp_ring_info *cpr; 2930 struct bnxt_sw_tx_bd *tx_buf; 2931 struct tx_pkt_cmpl *txcmp; 2932 uint32_t cons, cp_cons; 2933 int rc; 2934 2935 if (!txq) 2936 return -EINVAL; 2937 2938 rc = is_bnxt_in_error(txq->bp); 2939 if (rc) 2940 return rc; 2941 2942 cpr = txq->cp_ring; 2943 txr = txq->tx_ring; 2944 2945 if (offset >= txq->nb_tx_desc) 2946 return -EINVAL; 2947 2948 cons = RING_CMP(cpr->cp_ring_struct, offset); 2949 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2950 cp_cons = cpr->cp_raw_cons; 2951 2952 if (cons > cp_cons) { 2953 if (CMPL_VALID(txcmp, cpr->valid)) 2954 return RTE_ETH_TX_DESC_UNAVAIL; 2955 } else { 2956 if (CMPL_VALID(txcmp, !cpr->valid)) 2957 return RTE_ETH_TX_DESC_UNAVAIL; 2958 } 2959 tx_buf = &txr->tx_buf_ring[cons]; 2960 if (tx_buf->mbuf == NULL) 2961 return RTE_ETH_TX_DESC_DONE; 2962 2963 return RTE_ETH_TX_DESC_FULL; 2964 } 2965 2966 static struct bnxt_filter_info * 2967 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 2968 struct rte_eth_ethertype_filter *efilter, 2969 struct bnxt_vnic_info *vnic0, 2970 struct bnxt_vnic_info *vnic, 2971 int *ret) 2972 { 2973 struct bnxt_filter_info *mfilter = NULL; 2974 int match = 0; 2975 *ret = 0; 2976 2977 if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 || 2978 efilter->ether_type == RTE_ETHER_TYPE_IPV6) { 2979 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" 2980 " ethertype filter.", efilter->ether_type); 2981 *ret = -EINVAL; 2982 goto exit; 2983 } 2984 if (efilter->queue >= bp->rx_nr_rings) { 2985 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2986 *ret = -EINVAL; 2987 goto exit; 2988 } 2989 2990 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 2991 vnic = &bp->vnic_info[efilter->queue]; 2992 if (vnic == NULL) { 2993 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2994 *ret = -EINVAL; 2995 goto exit; 2996 } 2997 2998 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 2999 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 3000 if ((!memcmp(efilter->mac_addr.addr_bytes, 3001 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 3002 mfilter->flags == 3003 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 3004 mfilter->ethertype == efilter->ether_type)) { 3005 match = 1; 3006 break; 3007 } 3008 } 3009 } else { 3010 STAILQ_FOREACH(mfilter, &vnic->filter, next) 3011 if ((!memcmp(efilter->mac_addr.addr_bytes, 3012 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 3013 mfilter->ethertype == efilter->ether_type && 3014 mfilter->flags == 3015 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 3016 match = 1; 3017 break; 3018 } 3019 } 3020 3021 if (match) 3022 *ret = -EEXIST; 3023 3024 exit: 3025 return mfilter; 3026 } 3027 3028 static int 3029 bnxt_ethertype_filter(struct rte_eth_dev *dev, 3030 enum rte_filter_op filter_op, 3031 void *arg) 3032 { 3033 struct bnxt *bp = dev->data->dev_private; 3034 struct rte_eth_ethertype_filter *efilter = 3035 (struct rte_eth_ethertype_filter *)arg; 3036 struct bnxt_filter_info *bfilter, *filter1; 3037 struct bnxt_vnic_info *vnic, *vnic0; 3038 int ret; 3039 3040 if (filter_op == RTE_ETH_FILTER_NOP) 3041 return 0; 3042 3043 if (arg == NULL) { 3044 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 3045 filter_op); 3046 return -EINVAL; 3047 } 3048 3049 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3050 vnic = &bp->vnic_info[efilter->queue]; 3051 3052 switch (filter_op) { 3053 case RTE_ETH_FILTER_ADD: 3054 bnxt_match_and_validate_ether_filter(bp, efilter, 3055 vnic0, vnic, &ret); 3056 if (ret < 0) 3057 return ret; 3058 3059 bfilter = bnxt_get_unused_filter(bp); 3060 if (bfilter == NULL) { 3061 PMD_DRV_LOG(ERR, 3062 "Not enough resources for a new filter.\n"); 3063 return -ENOMEM; 3064 } 3065 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 3066 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 3067 RTE_ETHER_ADDR_LEN); 3068 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 3069 RTE_ETHER_ADDR_LEN); 3070 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 3071 bfilter->ethertype = efilter->ether_type; 3072 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3073 3074 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 3075 if (filter1 == NULL) { 3076 ret = -EINVAL; 3077 goto cleanup; 3078 } 3079 bfilter->enables |= 3080 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 3081 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 3082 3083 bfilter->dst_id = vnic->fw_vnic_id; 3084 3085 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 3086 bfilter->flags = 3087 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 3088 } 3089 3090 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 3091 if (ret) 3092 goto cleanup; 3093 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 3094 break; 3095 case RTE_ETH_FILTER_DELETE: 3096 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 3097 vnic0, vnic, &ret); 3098 if (ret == -EEXIST) { 3099 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 3100 3101 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 3102 next); 3103 bnxt_free_filter(bp, filter1); 3104 } else if (ret == 0) { 3105 PMD_DRV_LOG(ERR, "No matching filter found\n"); 3106 } 3107 break; 3108 default: 3109 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 3110 ret = -EINVAL; 3111 goto error; 3112 } 3113 return ret; 3114 cleanup: 3115 bnxt_free_filter(bp, bfilter); 3116 error: 3117 return ret; 3118 } 3119 3120 static inline int 3121 parse_ntuple_filter(struct bnxt *bp, 3122 struct rte_eth_ntuple_filter *nfilter, 3123 struct bnxt_filter_info *bfilter) 3124 { 3125 uint32_t en = 0; 3126 3127 if (nfilter->queue >= bp->rx_nr_rings) { 3128 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); 3129 return -EINVAL; 3130 } 3131 3132 switch (nfilter->dst_port_mask) { 3133 case UINT16_MAX: 3134 bfilter->dst_port_mask = -1; 3135 bfilter->dst_port = nfilter->dst_port; 3136 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 3137 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3138 break; 3139 default: 3140 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3141 return -EINVAL; 3142 } 3143 3144 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3145 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3146 3147 switch (nfilter->proto_mask) { 3148 case UINT8_MAX: 3149 if (nfilter->proto == 17) /* IPPROTO_UDP */ 3150 bfilter->ip_protocol = 17; 3151 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 3152 bfilter->ip_protocol = 6; 3153 else 3154 return -EINVAL; 3155 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3156 break; 3157 default: 3158 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3159 return -EINVAL; 3160 } 3161 3162 switch (nfilter->dst_ip_mask) { 3163 case UINT32_MAX: 3164 bfilter->dst_ipaddr_mask[0] = -1; 3165 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 3166 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 3167 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3168 break; 3169 default: 3170 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 3171 return -EINVAL; 3172 } 3173 3174 switch (nfilter->src_ip_mask) { 3175 case UINT32_MAX: 3176 bfilter->src_ipaddr_mask[0] = -1; 3177 bfilter->src_ipaddr[0] = nfilter->src_ip; 3178 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 3179 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3180 break; 3181 default: 3182 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 3183 return -EINVAL; 3184 } 3185 3186 switch (nfilter->src_port_mask) { 3187 case UINT16_MAX: 3188 bfilter->src_port_mask = -1; 3189 bfilter->src_port = nfilter->src_port; 3190 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 3191 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3192 break; 3193 default: 3194 PMD_DRV_LOG(ERR, "invalid src_port mask."); 3195 return -EINVAL; 3196 } 3197 3198 bfilter->enables = en; 3199 return 0; 3200 } 3201 3202 static struct bnxt_filter_info* 3203 bnxt_match_ntuple_filter(struct bnxt *bp, 3204 struct bnxt_filter_info *bfilter, 3205 struct bnxt_vnic_info **mvnic) 3206 { 3207 struct bnxt_filter_info *mfilter = NULL; 3208 int i; 3209 3210 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3211 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3212 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 3213 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 3214 bfilter->src_ipaddr_mask[0] == 3215 mfilter->src_ipaddr_mask[0] && 3216 bfilter->src_port == mfilter->src_port && 3217 bfilter->src_port_mask == mfilter->src_port_mask && 3218 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 3219 bfilter->dst_ipaddr_mask[0] == 3220 mfilter->dst_ipaddr_mask[0] && 3221 bfilter->dst_port == mfilter->dst_port && 3222 bfilter->dst_port_mask == mfilter->dst_port_mask && 3223 bfilter->flags == mfilter->flags && 3224 bfilter->enables == mfilter->enables) { 3225 if (mvnic) 3226 *mvnic = vnic; 3227 return mfilter; 3228 } 3229 } 3230 } 3231 return NULL; 3232 } 3233 3234 static int 3235 bnxt_cfg_ntuple_filter(struct bnxt *bp, 3236 struct rte_eth_ntuple_filter *nfilter, 3237 enum rte_filter_op filter_op) 3238 { 3239 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 3240 struct bnxt_vnic_info *vnic, *vnic0, *mvnic; 3241 int ret; 3242 3243 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 3244 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 3245 return -EINVAL; 3246 } 3247 3248 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 3249 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); 3250 return -EINVAL; 3251 } 3252 3253 bfilter = bnxt_get_unused_filter(bp); 3254 if (bfilter == NULL) { 3255 PMD_DRV_LOG(ERR, 3256 "Not enough resources for a new filter.\n"); 3257 return -ENOMEM; 3258 } 3259 ret = parse_ntuple_filter(bp, nfilter, bfilter); 3260 if (ret < 0) 3261 goto free_filter; 3262 3263 vnic = &bp->vnic_info[nfilter->queue]; 3264 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3265 filter1 = STAILQ_FIRST(&vnic0->filter); 3266 if (filter1 == NULL) { 3267 ret = -EINVAL; 3268 goto free_filter; 3269 } 3270 3271 bfilter->dst_id = vnic->fw_vnic_id; 3272 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 3273 bfilter->enables |= 3274 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 3275 bfilter->ethertype = 0x800; 3276 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3277 3278 mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); 3279 3280 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 3281 bfilter->dst_id == mfilter->dst_id) { 3282 PMD_DRV_LOG(ERR, "filter exists.\n"); 3283 ret = -EEXIST; 3284 goto free_filter; 3285 } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 3286 bfilter->dst_id != mfilter->dst_id) { 3287 mfilter->dst_id = vnic->fw_vnic_id; 3288 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); 3289 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); 3290 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); 3291 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); 3292 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); 3293 goto free_filter; 3294 } 3295 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 3296 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3297 ret = -ENOENT; 3298 goto free_filter; 3299 } 3300 3301 if (filter_op == RTE_ETH_FILTER_ADD) { 3302 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 3303 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 3304 if (ret) 3305 goto free_filter; 3306 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 3307 } else { 3308 if (mfilter == NULL) { 3309 /* This should not happen. But for Coverity! */ 3310 ret = -ENOENT; 3311 goto free_filter; 3312 } 3313 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 3314 3315 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); 3316 bnxt_free_filter(bp, mfilter); 3317 bnxt_free_filter(bp, bfilter); 3318 } 3319 3320 return 0; 3321 free_filter: 3322 bnxt_free_filter(bp, bfilter); 3323 return ret; 3324 } 3325 3326 static int 3327 bnxt_ntuple_filter(struct rte_eth_dev *dev, 3328 enum rte_filter_op filter_op, 3329 void *arg) 3330 { 3331 struct bnxt *bp = dev->data->dev_private; 3332 int ret; 3333 3334 if (filter_op == RTE_ETH_FILTER_NOP) 3335 return 0; 3336 3337 if (arg == NULL) { 3338 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 3339 filter_op); 3340 return -EINVAL; 3341 } 3342 3343 switch (filter_op) { 3344 case RTE_ETH_FILTER_ADD: 3345 ret = bnxt_cfg_ntuple_filter(bp, 3346 (struct rte_eth_ntuple_filter *)arg, 3347 filter_op); 3348 break; 3349 case RTE_ETH_FILTER_DELETE: 3350 ret = bnxt_cfg_ntuple_filter(bp, 3351 (struct rte_eth_ntuple_filter *)arg, 3352 filter_op); 3353 break; 3354 default: 3355 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 3356 ret = -EINVAL; 3357 break; 3358 } 3359 return ret; 3360 } 3361 3362 static int 3363 bnxt_parse_fdir_filter(struct bnxt *bp, 3364 struct rte_eth_fdir_filter *fdir, 3365 struct bnxt_filter_info *filter) 3366 { 3367 enum rte_fdir_mode fdir_mode = 3368 bp->eth_dev->data->dev_conf.fdir_conf.mode; 3369 struct bnxt_vnic_info *vnic0, *vnic; 3370 struct bnxt_filter_info *filter1; 3371 uint32_t en = 0; 3372 int i; 3373 3374 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 3375 return -EINVAL; 3376 3377 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 3378 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 3379 3380 switch (fdir->input.flow_type) { 3381 case RTE_ETH_FLOW_IPV4: 3382 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 3383 /* FALLTHROUGH */ 3384 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 3385 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3386 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 3387 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3388 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 3389 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3390 filter->ip_addr_type = 3391 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3392 filter->src_ipaddr_mask[0] = 0xffffffff; 3393 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3394 filter->dst_ipaddr_mask[0] = 0xffffffff; 3395 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3396 filter->ethertype = 0x800; 3397 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3398 break; 3399 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 3400 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 3401 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3402 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 3403 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3404 filter->dst_port_mask = 0xffff; 3405 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3406 filter->src_port_mask = 0xffff; 3407 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3408 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 3409 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3410 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 3411 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3412 filter->ip_protocol = 6; 3413 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3414 filter->ip_addr_type = 3415 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3416 filter->src_ipaddr_mask[0] = 0xffffffff; 3417 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3418 filter->dst_ipaddr_mask[0] = 0xffffffff; 3419 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3420 filter->ethertype = 0x800; 3421 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3422 break; 3423 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 3424 filter->src_port = fdir->input.flow.udp4_flow.src_port; 3425 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3426 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 3427 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3428 filter->dst_port_mask = 0xffff; 3429 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3430 filter->src_port_mask = 0xffff; 3431 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3432 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 3433 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3434 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 3435 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3436 filter->ip_protocol = 17; 3437 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3438 filter->ip_addr_type = 3439 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 3440 filter->src_ipaddr_mask[0] = 0xffffffff; 3441 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3442 filter->dst_ipaddr_mask[0] = 0xffffffff; 3443 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3444 filter->ethertype = 0x800; 3445 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3446 break; 3447 case RTE_ETH_FLOW_IPV6: 3448 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 3449 /* FALLTHROUGH */ 3450 filter->ip_addr_type = 3451 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3452 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 3453 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3454 rte_memcpy(filter->src_ipaddr, 3455 fdir->input.flow.ipv6_flow.src_ip, 16); 3456 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3457 rte_memcpy(filter->dst_ipaddr, 3458 fdir->input.flow.ipv6_flow.dst_ip, 16); 3459 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3460 memset(filter->dst_ipaddr_mask, 0xff, 16); 3461 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3462 memset(filter->src_ipaddr_mask, 0xff, 16); 3463 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3464 filter->ethertype = 0x86dd; 3465 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3466 break; 3467 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 3468 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 3469 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3470 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 3471 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3472 filter->dst_port_mask = 0xffff; 3473 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3474 filter->src_port_mask = 0xffff; 3475 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3476 filter->ip_addr_type = 3477 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3478 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 3479 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3480 rte_memcpy(filter->src_ipaddr, 3481 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 3482 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3483 rte_memcpy(filter->dst_ipaddr, 3484 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 3485 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3486 memset(filter->dst_ipaddr_mask, 0xff, 16); 3487 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3488 memset(filter->src_ipaddr_mask, 0xff, 16); 3489 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3490 filter->ethertype = 0x86dd; 3491 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3492 break; 3493 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 3494 filter->src_port = fdir->input.flow.udp6_flow.src_port; 3495 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 3496 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 3497 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 3498 filter->dst_port_mask = 0xffff; 3499 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 3500 filter->src_port_mask = 0xffff; 3501 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 3502 filter->ip_addr_type = 3503 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 3504 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 3505 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 3506 rte_memcpy(filter->src_ipaddr, 3507 fdir->input.flow.udp6_flow.ip.src_ip, 16); 3508 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 3509 rte_memcpy(filter->dst_ipaddr, 3510 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 3511 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 3512 memset(filter->dst_ipaddr_mask, 0xff, 16); 3513 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 3514 memset(filter->src_ipaddr_mask, 0xff, 16); 3515 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 3516 filter->ethertype = 0x86dd; 3517 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3518 break; 3519 case RTE_ETH_FLOW_L2_PAYLOAD: 3520 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 3521 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 3522 break; 3523 case RTE_ETH_FLOW_VXLAN: 3524 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3525 return -EINVAL; 3526 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 3527 filter->tunnel_type = 3528 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 3529 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 3530 break; 3531 case RTE_ETH_FLOW_NVGRE: 3532 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3533 return -EINVAL; 3534 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 3535 filter->tunnel_type = 3536 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 3537 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 3538 break; 3539 case RTE_ETH_FLOW_UNKNOWN: 3540 case RTE_ETH_FLOW_RAW: 3541 case RTE_ETH_FLOW_FRAG_IPV4: 3542 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 3543 case RTE_ETH_FLOW_FRAG_IPV6: 3544 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 3545 case RTE_ETH_FLOW_IPV6_EX: 3546 case RTE_ETH_FLOW_IPV6_TCP_EX: 3547 case RTE_ETH_FLOW_IPV6_UDP_EX: 3548 case RTE_ETH_FLOW_GENEVE: 3549 /* FALLTHROUGH */ 3550 default: 3551 return -EINVAL; 3552 } 3553 3554 vnic0 = BNXT_GET_DEFAULT_VNIC(bp); 3555 vnic = &bp->vnic_info[fdir->action.rx_queue]; 3556 if (vnic == NULL) { 3557 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); 3558 return -EINVAL; 3559 } 3560 3561 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 3562 rte_memcpy(filter->dst_macaddr, 3563 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 3564 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 3565 } 3566 3567 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 3568 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 3569 filter1 = STAILQ_FIRST(&vnic0->filter); 3570 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 3571 } else { 3572 filter->dst_id = vnic->fw_vnic_id; 3573 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3574 if (filter->dst_macaddr[i] == 0x00) 3575 filter1 = STAILQ_FIRST(&vnic0->filter); 3576 else 3577 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 3578 } 3579 3580 if (filter1 == NULL) 3581 return -EINVAL; 3582 3583 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 3584 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 3585 3586 filter->enables = en; 3587 3588 return 0; 3589 } 3590 3591 static struct bnxt_filter_info * 3592 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, 3593 struct bnxt_vnic_info **mvnic) 3594 { 3595 struct bnxt_filter_info *mf = NULL; 3596 int i; 3597 3598 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3599 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3600 3601 STAILQ_FOREACH(mf, &vnic->filter, next) { 3602 if (mf->filter_type == nf->filter_type && 3603 mf->flags == nf->flags && 3604 mf->src_port == nf->src_port && 3605 mf->src_port_mask == nf->src_port_mask && 3606 mf->dst_port == nf->dst_port && 3607 mf->dst_port_mask == nf->dst_port_mask && 3608 mf->ip_protocol == nf->ip_protocol && 3609 mf->ip_addr_type == nf->ip_addr_type && 3610 mf->ethertype == nf->ethertype && 3611 mf->vni == nf->vni && 3612 mf->tunnel_type == nf->tunnel_type && 3613 mf->l2_ovlan == nf->l2_ovlan && 3614 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 3615 mf->l2_ivlan == nf->l2_ivlan && 3616 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 3617 !memcmp(mf->l2_addr, nf->l2_addr, 3618 RTE_ETHER_ADDR_LEN) && 3619 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 3620 RTE_ETHER_ADDR_LEN) && 3621 !memcmp(mf->src_macaddr, nf->src_macaddr, 3622 RTE_ETHER_ADDR_LEN) && 3623 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 3624 RTE_ETHER_ADDR_LEN) && 3625 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 3626 sizeof(nf->src_ipaddr)) && 3627 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 3628 sizeof(nf->src_ipaddr_mask)) && 3629 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 3630 sizeof(nf->dst_ipaddr)) && 3631 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 3632 sizeof(nf->dst_ipaddr_mask))) { 3633 if (mvnic) 3634 *mvnic = vnic; 3635 return mf; 3636 } 3637 } 3638 } 3639 return NULL; 3640 } 3641 3642 static int 3643 bnxt_fdir_filter(struct rte_eth_dev *dev, 3644 enum rte_filter_op filter_op, 3645 void *arg) 3646 { 3647 struct bnxt *bp = dev->data->dev_private; 3648 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 3649 struct bnxt_filter_info *filter, *match; 3650 struct bnxt_vnic_info *vnic, *mvnic; 3651 int ret = 0, i; 3652 3653 if (filter_op == RTE_ETH_FILTER_NOP) 3654 return 0; 3655 3656 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 3657 return -EINVAL; 3658 3659 switch (filter_op) { 3660 case RTE_ETH_FILTER_ADD: 3661 case RTE_ETH_FILTER_DELETE: 3662 /* FALLTHROUGH */ 3663 filter = bnxt_get_unused_filter(bp); 3664 if (filter == NULL) { 3665 PMD_DRV_LOG(ERR, 3666 "Not enough resources for a new flow.\n"); 3667 return -ENOMEM; 3668 } 3669 3670 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 3671 if (ret != 0) 3672 goto free_filter; 3673 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 3674 3675 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 3676 vnic = &bp->vnic_info[0]; 3677 else 3678 vnic = &bp->vnic_info[fdir->action.rx_queue]; 3679 3680 match = bnxt_match_fdir(bp, filter, &mvnic); 3681 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 3682 if (match->dst_id == vnic->fw_vnic_id) { 3683 PMD_DRV_LOG(ERR, "Flow already exists.\n"); 3684 ret = -EEXIST; 3685 goto free_filter; 3686 } else { 3687 match->dst_id = vnic->fw_vnic_id; 3688 ret = bnxt_hwrm_set_ntuple_filter(bp, 3689 match->dst_id, 3690 match); 3691 STAILQ_REMOVE(&mvnic->filter, match, 3692 bnxt_filter_info, next); 3693 STAILQ_INSERT_TAIL(&vnic->filter, match, next); 3694 PMD_DRV_LOG(ERR, 3695 "Filter with matching pattern exist\n"); 3696 PMD_DRV_LOG(ERR, 3697 "Updated it to new destination q\n"); 3698 goto free_filter; 3699 } 3700 } 3701 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 3702 PMD_DRV_LOG(ERR, "Flow does not exist.\n"); 3703 ret = -ENOENT; 3704 goto free_filter; 3705 } 3706 3707 if (filter_op == RTE_ETH_FILTER_ADD) { 3708 ret = bnxt_hwrm_set_ntuple_filter(bp, 3709 filter->dst_id, 3710 filter); 3711 if (ret) 3712 goto free_filter; 3713 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 3714 } else { 3715 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 3716 STAILQ_REMOVE(&vnic->filter, match, 3717 bnxt_filter_info, next); 3718 bnxt_free_filter(bp, match); 3719 bnxt_free_filter(bp, filter); 3720 } 3721 break; 3722 case RTE_ETH_FILTER_FLUSH: 3723 for (i = bp->nr_vnics - 1; i >= 0; i--) { 3724 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3725 3726 STAILQ_FOREACH(filter, &vnic->filter, next) { 3727 if (filter->filter_type == 3728 HWRM_CFA_NTUPLE_FILTER) { 3729 ret = 3730 bnxt_hwrm_clear_ntuple_filter(bp, 3731 filter); 3732 STAILQ_REMOVE(&vnic->filter, filter, 3733 bnxt_filter_info, next); 3734 } 3735 } 3736 } 3737 return ret; 3738 case RTE_ETH_FILTER_UPDATE: 3739 case RTE_ETH_FILTER_STATS: 3740 case RTE_ETH_FILTER_INFO: 3741 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); 3742 break; 3743 default: 3744 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); 3745 ret = -EINVAL; 3746 break; 3747 } 3748 return ret; 3749 3750 free_filter: 3751 bnxt_free_filter(bp, filter); 3752 return ret; 3753 } 3754 3755 int 3756 bnxt_filter_ctrl_op(struct rte_eth_dev *dev, 3757 enum rte_filter_type filter_type, 3758 enum rte_filter_op filter_op, void *arg) 3759 { 3760 struct bnxt *bp = dev->data->dev_private; 3761 int ret = 0; 3762 3763 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3764 struct bnxt_vf_representor *vfr = dev->data->dev_private; 3765 bp = vfr->parent_dev->data->dev_private; 3766 } 3767 3768 ret = is_bnxt_in_error(bp); 3769 if (ret) 3770 return ret; 3771 3772 switch (filter_type) { 3773 case RTE_ETH_FILTER_TUNNEL: 3774 PMD_DRV_LOG(ERR, 3775 "filter type: %d: To be implemented\n", filter_type); 3776 break; 3777 case RTE_ETH_FILTER_FDIR: 3778 ret = bnxt_fdir_filter(dev, filter_op, arg); 3779 break; 3780 case RTE_ETH_FILTER_NTUPLE: 3781 ret = bnxt_ntuple_filter(dev, filter_op, arg); 3782 break; 3783 case RTE_ETH_FILTER_ETHERTYPE: 3784 ret = bnxt_ethertype_filter(dev, filter_op, arg); 3785 break; 3786 case RTE_ETH_FILTER_GENERIC: 3787 if (filter_op != RTE_ETH_FILTER_GET) 3788 return -EINVAL; 3789 if (BNXT_TRUFLOW_EN(bp)) 3790 *(const void **)arg = &bnxt_ulp_rte_flow_ops; 3791 else 3792 *(const void **)arg = &bnxt_flow_ops; 3793 break; 3794 default: 3795 PMD_DRV_LOG(ERR, 3796 "Filter type (%d) not supported", filter_type); 3797 ret = -EINVAL; 3798 break; 3799 } 3800 return ret; 3801 } 3802 3803 static const uint32_t * 3804 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3805 { 3806 static const uint32_t ptypes[] = { 3807 RTE_PTYPE_L2_ETHER_VLAN, 3808 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3809 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3810 RTE_PTYPE_L4_ICMP, 3811 RTE_PTYPE_L4_TCP, 3812 RTE_PTYPE_L4_UDP, 3813 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3814 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3815 RTE_PTYPE_INNER_L4_ICMP, 3816 RTE_PTYPE_INNER_L4_TCP, 3817 RTE_PTYPE_INNER_L4_UDP, 3818 RTE_PTYPE_UNKNOWN 3819 }; 3820 3821 if (!dev->rx_pkt_burst) 3822 return NULL; 3823 3824 return ptypes; 3825 } 3826 3827 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3828 int reg_win) 3829 { 3830 uint32_t reg_base = *reg_arr & 0xfffff000; 3831 uint32_t win_off; 3832 int i; 3833 3834 for (i = 0; i < count; i++) { 3835 if ((reg_arr[i] & 0xfffff000) != reg_base) 3836 return -ERANGE; 3837 } 3838 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3839 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3840 return 0; 3841 } 3842 3843 static int bnxt_map_ptp_regs(struct bnxt *bp) 3844 { 3845 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3846 uint32_t *reg_arr; 3847 int rc, i; 3848 3849 reg_arr = ptp->rx_regs; 3850 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3851 if (rc) 3852 return rc; 3853 3854 reg_arr = ptp->tx_regs; 3855 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3856 if (rc) 3857 return rc; 3858 3859 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3860 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3861 3862 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3863 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3864 3865 return 0; 3866 } 3867 3868 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3869 { 3870 rte_write32(0, (uint8_t *)bp->bar0 + 3871 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3872 rte_write32(0, (uint8_t *)bp->bar0 + 3873 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3874 } 3875 3876 static uint64_t bnxt_cc_read(struct bnxt *bp) 3877 { 3878 uint64_t ns; 3879 3880 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3881 BNXT_GRCPF_REG_SYNC_TIME)); 3882 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3883 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3884 return ns; 3885 } 3886 3887 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3888 { 3889 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3890 uint32_t fifo; 3891 3892 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3893 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3894 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3895 return -EAGAIN; 3896 3897 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3898 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3899 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3900 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3901 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3902 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3903 3904 return 0; 3905 } 3906 3907 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3908 { 3909 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3910 struct bnxt_pf_info *pf = bp->pf; 3911 uint16_t port_id; 3912 uint32_t fifo; 3913 3914 if (!ptp) 3915 return -ENODEV; 3916 3917 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3918 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3919 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3920 return -EAGAIN; 3921 3922 port_id = pf->port_id; 3923 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3924 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3925 3926 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3927 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3928 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 3929 /* bnxt_clr_rx_ts(bp); TBD */ 3930 return -EBUSY; 3931 } 3932 3933 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3934 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3935 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3936 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3937 3938 return 0; 3939 } 3940 3941 static int 3942 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3943 { 3944 uint64_t ns; 3945 struct bnxt *bp = dev->data->dev_private; 3946 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3947 3948 if (!ptp) 3949 return 0; 3950 3951 ns = rte_timespec_to_ns(ts); 3952 /* Set the timecounters to a new value. */ 3953 ptp->tc.nsec = ns; 3954 3955 return 0; 3956 } 3957 3958 static int 3959 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3960 { 3961 struct bnxt *bp = dev->data->dev_private; 3962 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3963 uint64_t ns, systime_cycles = 0; 3964 int rc = 0; 3965 3966 if (!ptp) 3967 return 0; 3968 3969 if (BNXT_CHIP_THOR(bp)) 3970 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3971 &systime_cycles); 3972 else 3973 systime_cycles = bnxt_cc_read(bp); 3974 3975 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3976 *ts = rte_ns_to_timespec(ns); 3977 3978 return rc; 3979 } 3980 static int 3981 bnxt_timesync_enable(struct rte_eth_dev *dev) 3982 { 3983 struct bnxt *bp = dev->data->dev_private; 3984 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3985 uint32_t shift = 0; 3986 int rc; 3987 3988 if (!ptp) 3989 return 0; 3990 3991 ptp->rx_filter = 1; 3992 ptp->tx_tstamp_en = 1; 3993 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3994 3995 rc = bnxt_hwrm_ptp_cfg(bp); 3996 if (rc) 3997 return rc; 3998 3999 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 4000 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4001 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4002 4003 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 4004 ptp->tc.cc_shift = shift; 4005 ptp->tc.nsec_mask = (1ULL << shift) - 1; 4006 4007 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 4008 ptp->rx_tstamp_tc.cc_shift = shift; 4009 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4010 4011 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 4012 ptp->tx_tstamp_tc.cc_shift = shift; 4013 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4014 4015 if (!BNXT_CHIP_THOR(bp)) 4016 bnxt_map_ptp_regs(bp); 4017 4018 return 0; 4019 } 4020 4021 static int 4022 bnxt_timesync_disable(struct rte_eth_dev *dev) 4023 { 4024 struct bnxt *bp = dev->data->dev_private; 4025 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4026 4027 if (!ptp) 4028 return 0; 4029 4030 ptp->rx_filter = 0; 4031 ptp->tx_tstamp_en = 0; 4032 ptp->rxctl = 0; 4033 4034 bnxt_hwrm_ptp_cfg(bp); 4035 4036 if (!BNXT_CHIP_THOR(bp)) 4037 bnxt_unmap_ptp_regs(bp); 4038 4039 return 0; 4040 } 4041 4042 static int 4043 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 4044 struct timespec *timestamp, 4045 uint32_t flags __rte_unused) 4046 { 4047 struct bnxt *bp = dev->data->dev_private; 4048 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4049 uint64_t rx_tstamp_cycles = 0; 4050 uint64_t ns; 4051 4052 if (!ptp) 4053 return 0; 4054 4055 if (BNXT_CHIP_THOR(bp)) 4056 rx_tstamp_cycles = ptp->rx_timestamp; 4057 else 4058 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 4059 4060 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 4061 *timestamp = rte_ns_to_timespec(ns); 4062 return 0; 4063 } 4064 4065 static int 4066 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 4067 struct timespec *timestamp) 4068 { 4069 struct bnxt *bp = dev->data->dev_private; 4070 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4071 uint64_t tx_tstamp_cycles = 0; 4072 uint64_t ns; 4073 int rc = 0; 4074 4075 if (!ptp) 4076 return 0; 4077 4078 if (BNXT_CHIP_THOR(bp)) 4079 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 4080 &tx_tstamp_cycles); 4081 else 4082 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 4083 4084 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 4085 *timestamp = rte_ns_to_timespec(ns); 4086 4087 return rc; 4088 } 4089 4090 static int 4091 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 4092 { 4093 struct bnxt *bp = dev->data->dev_private; 4094 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 4095 4096 if (!ptp) 4097 return 0; 4098 4099 ptp->tc.nsec += delta; 4100 4101 return 0; 4102 } 4103 4104 static int 4105 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 4106 { 4107 struct bnxt *bp = dev->data->dev_private; 4108 int rc; 4109 uint32_t dir_entries; 4110 uint32_t entry_length; 4111 4112 rc = is_bnxt_in_error(bp); 4113 if (rc) 4114 return rc; 4115 4116 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 4117 bp->pdev->addr.domain, bp->pdev->addr.bus, 4118 bp->pdev->addr.devid, bp->pdev->addr.function); 4119 4120 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 4121 if (rc != 0) 4122 return rc; 4123 4124 return dir_entries * entry_length; 4125 } 4126 4127 static int 4128 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 4129 struct rte_dev_eeprom_info *in_eeprom) 4130 { 4131 struct bnxt *bp = dev->data->dev_private; 4132 uint32_t index; 4133 uint32_t offset; 4134 int rc; 4135 4136 rc = is_bnxt_in_error(bp); 4137 if (rc) 4138 return rc; 4139 4140 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 4141 bp->pdev->addr.domain, bp->pdev->addr.bus, 4142 bp->pdev->addr.devid, bp->pdev->addr.function, 4143 in_eeprom->offset, in_eeprom->length); 4144 4145 if (in_eeprom->offset == 0) /* special offset value to get directory */ 4146 return bnxt_get_nvram_directory(bp, in_eeprom->length, 4147 in_eeprom->data); 4148 4149 index = in_eeprom->offset >> 24; 4150 offset = in_eeprom->offset & 0xffffff; 4151 4152 if (index != 0) 4153 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 4154 in_eeprom->length, in_eeprom->data); 4155 4156 return 0; 4157 } 4158 4159 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 4160 { 4161 switch (dir_type) { 4162 case BNX_DIR_TYPE_CHIMP_PATCH: 4163 case BNX_DIR_TYPE_BOOTCODE: 4164 case BNX_DIR_TYPE_BOOTCODE_2: 4165 case BNX_DIR_TYPE_APE_FW: 4166 case BNX_DIR_TYPE_APE_PATCH: 4167 case BNX_DIR_TYPE_KONG_FW: 4168 case BNX_DIR_TYPE_KONG_PATCH: 4169 case BNX_DIR_TYPE_BONO_FW: 4170 case BNX_DIR_TYPE_BONO_PATCH: 4171 /* FALLTHROUGH */ 4172 return true; 4173 } 4174 4175 return false; 4176 } 4177 4178 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 4179 { 4180 switch (dir_type) { 4181 case BNX_DIR_TYPE_AVS: 4182 case BNX_DIR_TYPE_EXP_ROM_MBA: 4183 case BNX_DIR_TYPE_PCIE: 4184 case BNX_DIR_TYPE_TSCF_UCODE: 4185 case BNX_DIR_TYPE_EXT_PHY: 4186 case BNX_DIR_TYPE_CCM: 4187 case BNX_DIR_TYPE_ISCSI_BOOT: 4188 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 4189 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 4190 /* FALLTHROUGH */ 4191 return true; 4192 } 4193 4194 return false; 4195 } 4196 4197 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 4198 { 4199 return bnxt_dir_type_is_ape_bin_format(dir_type) || 4200 bnxt_dir_type_is_other_exec_format(dir_type); 4201 } 4202 4203 static int 4204 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 4205 struct rte_dev_eeprom_info *in_eeprom) 4206 { 4207 struct bnxt *bp = dev->data->dev_private; 4208 uint8_t index, dir_op; 4209 uint16_t type, ext, ordinal, attr; 4210 int rc; 4211 4212 rc = is_bnxt_in_error(bp); 4213 if (rc) 4214 return rc; 4215 4216 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 4217 bp->pdev->addr.domain, bp->pdev->addr.bus, 4218 bp->pdev->addr.devid, bp->pdev->addr.function, 4219 in_eeprom->offset, in_eeprom->length); 4220 4221 if (!BNXT_PF(bp)) { 4222 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 4223 return -EINVAL; 4224 } 4225 4226 type = in_eeprom->magic >> 16; 4227 4228 if (type == 0xffff) { /* special value for directory operations */ 4229 index = in_eeprom->magic & 0xff; 4230 dir_op = in_eeprom->magic >> 8; 4231 if (index == 0) 4232 return -EINVAL; 4233 switch (dir_op) { 4234 case 0x0e: /* erase */ 4235 if (in_eeprom->offset != ~in_eeprom->magic) 4236 return -EINVAL; 4237 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 4238 default: 4239 return -EINVAL; 4240 } 4241 } 4242 4243 /* Create or re-write an NVM item: */ 4244 if (bnxt_dir_type_is_executable(type) == true) 4245 return -EOPNOTSUPP; 4246 ext = in_eeprom->magic & 0xffff; 4247 ordinal = in_eeprom->offset >> 16; 4248 attr = in_eeprom->offset & 0xffff; 4249 4250 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 4251 in_eeprom->data, in_eeprom->length); 4252 } 4253 4254 /* 4255 * Initialization 4256 */ 4257 4258 static const struct eth_dev_ops bnxt_dev_ops = { 4259 .dev_infos_get = bnxt_dev_info_get_op, 4260 .dev_close = bnxt_dev_close_op, 4261 .dev_configure = bnxt_dev_configure_op, 4262 .dev_start = bnxt_dev_start_op, 4263 .dev_stop = bnxt_dev_stop_op, 4264 .dev_set_link_up = bnxt_dev_set_link_up_op, 4265 .dev_set_link_down = bnxt_dev_set_link_down_op, 4266 .stats_get = bnxt_stats_get_op, 4267 .stats_reset = bnxt_stats_reset_op, 4268 .rx_queue_setup = bnxt_rx_queue_setup_op, 4269 .rx_queue_release = bnxt_rx_queue_release_op, 4270 .tx_queue_setup = bnxt_tx_queue_setup_op, 4271 .tx_queue_release = bnxt_tx_queue_release_op, 4272 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4273 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4274 .reta_update = bnxt_reta_update_op, 4275 .reta_query = bnxt_reta_query_op, 4276 .rss_hash_update = bnxt_rss_hash_update_op, 4277 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4278 .link_update = bnxt_link_update_op, 4279 .promiscuous_enable = bnxt_promiscuous_enable_op, 4280 .promiscuous_disable = bnxt_promiscuous_disable_op, 4281 .allmulticast_enable = bnxt_allmulticast_enable_op, 4282 .allmulticast_disable = bnxt_allmulticast_disable_op, 4283 .mac_addr_add = bnxt_mac_addr_add_op, 4284 .mac_addr_remove = bnxt_mac_addr_remove_op, 4285 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4286 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4287 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4288 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4289 .vlan_filter_set = bnxt_vlan_filter_set_op, 4290 .vlan_offload_set = bnxt_vlan_offload_set_op, 4291 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4292 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4293 .mtu_set = bnxt_mtu_set_op, 4294 .mac_addr_set = bnxt_set_default_mac_addr_op, 4295 .xstats_get = bnxt_dev_xstats_get_op, 4296 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4297 .xstats_reset = bnxt_dev_xstats_reset_op, 4298 .fw_version_get = bnxt_fw_version_get, 4299 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4300 .rxq_info_get = bnxt_rxq_info_get_op, 4301 .txq_info_get = bnxt_txq_info_get_op, 4302 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4303 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4304 .dev_led_on = bnxt_dev_led_on_op, 4305 .dev_led_off = bnxt_dev_led_off_op, 4306 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 4307 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 4308 .rx_queue_count = bnxt_rx_queue_count_op, 4309 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 4310 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 4311 .rx_queue_start = bnxt_rx_queue_start, 4312 .rx_queue_stop = bnxt_rx_queue_stop, 4313 .tx_queue_start = bnxt_tx_queue_start, 4314 .tx_queue_stop = bnxt_tx_queue_stop, 4315 .filter_ctrl = bnxt_filter_ctrl_op, 4316 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4317 .get_eeprom_length = bnxt_get_eeprom_length_op, 4318 .get_eeprom = bnxt_get_eeprom_op, 4319 .set_eeprom = bnxt_set_eeprom_op, 4320 .timesync_enable = bnxt_timesync_enable, 4321 .timesync_disable = bnxt_timesync_disable, 4322 .timesync_read_time = bnxt_timesync_read_time, 4323 .timesync_write_time = bnxt_timesync_write_time, 4324 .timesync_adjust_time = bnxt_timesync_adjust_time, 4325 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4326 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4327 }; 4328 4329 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4330 { 4331 uint32_t offset; 4332 4333 /* Only pre-map the reset GRC registers using window 3 */ 4334 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4335 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4336 4337 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4338 4339 return offset; 4340 } 4341 4342 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4343 { 4344 struct bnxt_error_recovery_info *info = bp->recovery_info; 4345 uint32_t reg_base = 0xffffffff; 4346 int i; 4347 4348 /* Only pre-map the monitoring GRC registers using window 2 */ 4349 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4350 uint32_t reg = info->status_regs[i]; 4351 4352 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4353 continue; 4354 4355 if (reg_base == 0xffffffff) 4356 reg_base = reg & 0xfffff000; 4357 if ((reg & 0xfffff000) != reg_base) 4358 return -ERANGE; 4359 4360 /* Use mask 0xffc as the Lower 2 bits indicates 4361 * address space location 4362 */ 4363 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4364 (reg & 0xffc); 4365 } 4366 4367 if (reg_base == 0xffffffff) 4368 return 0; 4369 4370 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4371 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4372 4373 return 0; 4374 } 4375 4376 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4377 { 4378 struct bnxt_error_recovery_info *info = bp->recovery_info; 4379 uint32_t delay = info->delay_after_reset[index]; 4380 uint32_t val = info->reset_reg_val[index]; 4381 uint32_t reg = info->reset_reg[index]; 4382 uint32_t type, offset; 4383 4384 type = BNXT_FW_STATUS_REG_TYPE(reg); 4385 offset = BNXT_FW_STATUS_REG_OFF(reg); 4386 4387 switch (type) { 4388 case BNXT_FW_STATUS_REG_TYPE_CFG: 4389 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4390 break; 4391 case BNXT_FW_STATUS_REG_TYPE_GRC: 4392 offset = bnxt_map_reset_regs(bp, offset); 4393 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4394 break; 4395 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4396 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4397 break; 4398 } 4399 /* wait on a specific interval of time until core reset is complete */ 4400 if (delay) 4401 rte_delay_ms(delay); 4402 } 4403 4404 static void bnxt_dev_cleanup(struct bnxt *bp) 4405 { 4406 bnxt_set_hwrm_link_config(bp, false); 4407 bp->link_info->link_up = 0; 4408 if (bp->eth_dev->data->dev_started) 4409 bnxt_dev_stop_op(bp->eth_dev); 4410 4411 bnxt_uninit_resources(bp, true); 4412 } 4413 4414 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4415 { 4416 struct rte_eth_dev *dev = bp->eth_dev; 4417 struct rte_vlan_filter_conf *vfc; 4418 int vidx, vbit, rc; 4419 uint16_t vlan_id; 4420 4421 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4422 vfc = &dev->data->vlan_filter_conf; 4423 vidx = vlan_id / 64; 4424 vbit = vlan_id % 64; 4425 4426 /* Each bit corresponds to a VLAN id */ 4427 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4428 rc = bnxt_add_vlan_filter(bp, vlan_id); 4429 if (rc) 4430 return rc; 4431 } 4432 } 4433 4434 return 0; 4435 } 4436 4437 static int bnxt_restore_mac_filters(struct bnxt *bp) 4438 { 4439 struct rte_eth_dev *dev = bp->eth_dev; 4440 struct rte_eth_dev_info dev_info; 4441 struct rte_ether_addr *addr; 4442 uint64_t pool_mask; 4443 uint32_t pool = 0; 4444 uint16_t i; 4445 int rc; 4446 4447 if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) 4448 return 0; 4449 4450 rc = bnxt_dev_info_get_op(dev, &dev_info); 4451 if (rc) 4452 return rc; 4453 4454 /* replay MAC address configuration */ 4455 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4456 addr = &dev->data->mac_addrs[i]; 4457 4458 /* skip zero address */ 4459 if (rte_is_zero_ether_addr(addr)) 4460 continue; 4461 4462 pool = 0; 4463 pool_mask = dev->data->mac_pool_sel[i]; 4464 4465 do { 4466 if (pool_mask & 1ULL) { 4467 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4468 if (rc) 4469 return rc; 4470 } 4471 pool_mask >>= 1; 4472 pool++; 4473 } while (pool_mask); 4474 } 4475 4476 return 0; 4477 } 4478 4479 static int bnxt_restore_filters(struct bnxt *bp) 4480 { 4481 struct rte_eth_dev *dev = bp->eth_dev; 4482 int ret = 0; 4483 4484 if (dev->data->all_multicast) { 4485 ret = bnxt_allmulticast_enable_op(dev); 4486 if (ret) 4487 return ret; 4488 } 4489 if (dev->data->promiscuous) { 4490 ret = bnxt_promiscuous_enable_op(dev); 4491 if (ret) 4492 return ret; 4493 } 4494 4495 ret = bnxt_restore_mac_filters(bp); 4496 if (ret) 4497 return ret; 4498 4499 ret = bnxt_restore_vlan_filters(bp); 4500 /* TODO restore other filters as well */ 4501 return ret; 4502 } 4503 4504 static void bnxt_dev_recover(void *arg) 4505 { 4506 struct bnxt *bp = arg; 4507 int timeout = bp->fw_reset_max_msecs; 4508 int rc = 0; 4509 4510 /* Clear Error flag so that device re-init should happen */ 4511 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4512 4513 do { 4514 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT); 4515 if (rc == 0) 4516 break; 4517 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4518 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4519 } while (rc && timeout); 4520 4521 if (rc) { 4522 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4523 goto err; 4524 } 4525 4526 rc = bnxt_init_resources(bp, true); 4527 if (rc) { 4528 PMD_DRV_LOG(ERR, 4529 "Failed to initialize resources after reset\n"); 4530 goto err; 4531 } 4532 /* clear reset flag as the device is initialized now */ 4533 bp->flags &= ~BNXT_FLAG_FW_RESET; 4534 4535 rc = bnxt_dev_start_op(bp->eth_dev); 4536 if (rc) { 4537 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4538 goto err_start; 4539 } 4540 4541 rc = bnxt_restore_filters(bp); 4542 if (rc) 4543 goto err_start; 4544 4545 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 4546 return; 4547 err_start: 4548 bnxt_dev_stop_op(bp->eth_dev); 4549 err: 4550 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4551 bnxt_uninit_resources(bp, false); 4552 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4553 } 4554 4555 void bnxt_dev_reset_and_resume(void *arg) 4556 { 4557 struct bnxt *bp = arg; 4558 int rc; 4559 4560 bnxt_dev_cleanup(bp); 4561 4562 bnxt_wait_for_device_shutdown(bp); 4563 4564 rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs, 4565 bnxt_dev_recover, (void *)bp); 4566 if (rc) 4567 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4568 } 4569 4570 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4571 { 4572 struct bnxt_error_recovery_info *info = bp->recovery_info; 4573 uint32_t reg = info->status_regs[index]; 4574 uint32_t type, offset, val = 0; 4575 4576 type = BNXT_FW_STATUS_REG_TYPE(reg); 4577 offset = BNXT_FW_STATUS_REG_OFF(reg); 4578 4579 switch (type) { 4580 case BNXT_FW_STATUS_REG_TYPE_CFG: 4581 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4582 break; 4583 case BNXT_FW_STATUS_REG_TYPE_GRC: 4584 offset = info->mapped_status_regs[index]; 4585 /* FALLTHROUGH */ 4586 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4587 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4588 offset)); 4589 break; 4590 } 4591 4592 return val; 4593 } 4594 4595 static int bnxt_fw_reset_all(struct bnxt *bp) 4596 { 4597 struct bnxt_error_recovery_info *info = bp->recovery_info; 4598 uint32_t i; 4599 int rc = 0; 4600 4601 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4602 /* Reset through master function driver */ 4603 for (i = 0; i < info->reg_array_cnt; i++) 4604 bnxt_write_fw_reset_reg(bp, i); 4605 /* Wait for time specified by FW after triggering reset */ 4606 rte_delay_ms(info->master_func_wait_period_after_reset); 4607 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4608 /* Reset with the help of Kong processor */ 4609 rc = bnxt_hwrm_fw_reset(bp); 4610 if (rc) 4611 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4612 } 4613 4614 return rc; 4615 } 4616 4617 static void bnxt_fw_reset_cb(void *arg) 4618 { 4619 struct bnxt *bp = arg; 4620 struct bnxt_error_recovery_info *info = bp->recovery_info; 4621 int rc = 0; 4622 4623 /* Only Master function can do FW reset */ 4624 if (bnxt_is_master_func(bp) && 4625 bnxt_is_recovery_enabled(bp)) { 4626 rc = bnxt_fw_reset_all(bp); 4627 if (rc) { 4628 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4629 return; 4630 } 4631 } 4632 4633 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4634 * EXCEPTION_FATAL_ASYNC event to all the functions 4635 * (including MASTER FUNC). After receiving this Async, all the active 4636 * drivers should treat this case as FW initiated recovery 4637 */ 4638 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4639 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4640 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4641 4642 /* To recover from error */ 4643 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4644 (void *)bp); 4645 } 4646 } 4647 4648 /* Driver should poll FW heartbeat, reset_counter with the frequency 4649 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4650 * When the driver detects heartbeat stop or change in reset_counter, 4651 * it has to trigger a reset to recover from the error condition. 4652 * A “master PF” is the function who will have the privilege to 4653 * initiate the chimp reset. The master PF will be elected by the 4654 * firmware and will be notified through async message. 4655 */ 4656 static void bnxt_check_fw_health(void *arg) 4657 { 4658 struct bnxt *bp = arg; 4659 struct bnxt_error_recovery_info *info = bp->recovery_info; 4660 uint32_t val = 0, wait_msec; 4661 4662 if (!info || !bnxt_is_recovery_enabled(bp) || 4663 is_bnxt_in_error(bp)) 4664 return; 4665 4666 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4667 if (val == info->last_heart_beat) 4668 goto reset; 4669 4670 info->last_heart_beat = val; 4671 4672 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4673 if (val != info->last_reset_counter) 4674 goto reset; 4675 4676 info->last_reset_counter = val; 4677 4678 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4679 bnxt_check_fw_health, (void *)bp); 4680 4681 return; 4682 reset: 4683 /* Stop DMA to/from device */ 4684 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4685 bp->flags |= BNXT_FLAG_FW_RESET; 4686 4687 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4688 4689 if (bnxt_is_master_func(bp)) 4690 wait_msec = info->master_func_wait_period; 4691 else 4692 wait_msec = info->normal_func_wait_period; 4693 4694 rte_eal_alarm_set(US_PER_MS * wait_msec, 4695 bnxt_fw_reset_cb, (void *)bp); 4696 } 4697 4698 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4699 { 4700 uint32_t polling_freq; 4701 4702 if (!bnxt_is_recovery_enabled(bp)) 4703 return; 4704 4705 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4706 return; 4707 4708 polling_freq = bp->recovery_info->driver_polling_freq; 4709 4710 rte_eal_alarm_set(US_PER_MS * polling_freq, 4711 bnxt_check_fw_health, (void *)bp); 4712 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4713 } 4714 4715 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4716 { 4717 if (!bnxt_is_recovery_enabled(bp)) 4718 return; 4719 4720 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4721 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4722 } 4723 4724 static bool bnxt_vf_pciid(uint16_t device_id) 4725 { 4726 switch (device_id) { 4727 case BROADCOM_DEV_ID_57304_VF: 4728 case BROADCOM_DEV_ID_57406_VF: 4729 case BROADCOM_DEV_ID_5731X_VF: 4730 case BROADCOM_DEV_ID_5741X_VF: 4731 case BROADCOM_DEV_ID_57414_VF: 4732 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4733 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4734 case BROADCOM_DEV_ID_58802_VF: 4735 case BROADCOM_DEV_ID_57500_VF1: 4736 case BROADCOM_DEV_ID_57500_VF2: 4737 /* FALLTHROUGH */ 4738 return true; 4739 default: 4740 return false; 4741 } 4742 } 4743 4744 static bool bnxt_thor_device(uint16_t device_id) 4745 { 4746 switch (device_id) { 4747 case BROADCOM_DEV_ID_57508: 4748 case BROADCOM_DEV_ID_57504: 4749 case BROADCOM_DEV_ID_57502: 4750 case BROADCOM_DEV_ID_57508_MF1: 4751 case BROADCOM_DEV_ID_57504_MF1: 4752 case BROADCOM_DEV_ID_57502_MF1: 4753 case BROADCOM_DEV_ID_57508_MF2: 4754 case BROADCOM_DEV_ID_57504_MF2: 4755 case BROADCOM_DEV_ID_57502_MF2: 4756 case BROADCOM_DEV_ID_57500_VF1: 4757 case BROADCOM_DEV_ID_57500_VF2: 4758 /* FALLTHROUGH */ 4759 return true; 4760 default: 4761 return false; 4762 } 4763 } 4764 4765 bool bnxt_stratus_device(struct bnxt *bp) 4766 { 4767 uint16_t device_id = bp->pdev->id.device_id; 4768 4769 switch (device_id) { 4770 case BROADCOM_DEV_ID_STRATUS_NIC: 4771 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4772 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4773 /* FALLTHROUGH */ 4774 return true; 4775 default: 4776 return false; 4777 } 4778 } 4779 4780 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 4781 { 4782 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4783 struct bnxt *bp = eth_dev->data->dev_private; 4784 4785 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4786 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4787 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4788 if (!bp->bar0 || !bp->doorbell_base) { 4789 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4790 return -ENODEV; 4791 } 4792 4793 bp->eth_dev = eth_dev; 4794 bp->pdev = pci_dev; 4795 4796 return 0; 4797 } 4798 4799 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4800 struct bnxt_ctx_pg_info *ctx_pg, 4801 uint32_t mem_size, 4802 const char *suffix, 4803 uint16_t idx) 4804 { 4805 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4806 const struct rte_memzone *mz = NULL; 4807 char mz_name[RTE_MEMZONE_NAMESIZE]; 4808 rte_iova_t mz_phys_addr; 4809 uint64_t valid_bits = 0; 4810 uint32_t sz; 4811 int i; 4812 4813 if (!mem_size) 4814 return 0; 4815 4816 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4817 BNXT_PAGE_SIZE; 4818 rmem->page_size = BNXT_PAGE_SIZE; 4819 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4820 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4821 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4822 4823 valid_bits = PTU_PTE_VALID; 4824 4825 if (rmem->nr_pages > 1) { 4826 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4827 "bnxt_ctx_pg_tbl%s_%x_%d", 4828 suffix, idx, bp->eth_dev->data->port_id); 4829 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4830 mz = rte_memzone_lookup(mz_name); 4831 if (!mz) { 4832 mz = rte_memzone_reserve_aligned(mz_name, 4833 rmem->nr_pages * 8, 4834 SOCKET_ID_ANY, 4835 RTE_MEMZONE_2MB | 4836 RTE_MEMZONE_SIZE_HINT_ONLY | 4837 RTE_MEMZONE_IOVA_CONTIG, 4838 BNXT_PAGE_SIZE); 4839 if (mz == NULL) 4840 return -ENOMEM; 4841 } 4842 4843 memset(mz->addr, 0, mz->len); 4844 mz_phys_addr = mz->iova; 4845 4846 rmem->pg_tbl = mz->addr; 4847 rmem->pg_tbl_map = mz_phys_addr; 4848 rmem->pg_tbl_mz = mz; 4849 } 4850 4851 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4852 suffix, idx, bp->eth_dev->data->port_id); 4853 mz = rte_memzone_lookup(mz_name); 4854 if (!mz) { 4855 mz = rte_memzone_reserve_aligned(mz_name, 4856 mem_size, 4857 SOCKET_ID_ANY, 4858 RTE_MEMZONE_1GB | 4859 RTE_MEMZONE_SIZE_HINT_ONLY | 4860 RTE_MEMZONE_IOVA_CONTIG, 4861 BNXT_PAGE_SIZE); 4862 if (mz == NULL) 4863 return -ENOMEM; 4864 } 4865 4866 memset(mz->addr, 0, mz->len); 4867 mz_phys_addr = mz->iova; 4868 4869 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4870 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4871 rmem->dma_arr[i] = mz_phys_addr + sz; 4872 4873 if (rmem->nr_pages > 1) { 4874 if (i == rmem->nr_pages - 2 && 4875 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4876 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4877 else if (i == rmem->nr_pages - 1 && 4878 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4879 valid_bits |= PTU_PTE_LAST; 4880 4881 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4882 valid_bits); 4883 } 4884 } 4885 4886 rmem->mz = mz; 4887 if (rmem->vmem_size) 4888 rmem->vmem = (void **)mz->addr; 4889 rmem->dma_arr[0] = mz_phys_addr; 4890 return 0; 4891 } 4892 4893 static void bnxt_free_ctx_mem(struct bnxt *bp) 4894 { 4895 int i; 4896 4897 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4898 return; 4899 4900 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4901 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4902 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4903 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4904 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4905 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4906 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4907 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4908 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4909 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4910 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4911 4912 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4913 if (bp->ctx->tqm_mem[i]) 4914 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4915 } 4916 4917 rte_free(bp->ctx); 4918 bp->ctx = NULL; 4919 } 4920 4921 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4922 4923 #define min_t(type, x, y) ({ \ 4924 type __min1 = (x); \ 4925 type __min2 = (y); \ 4926 __min1 < __min2 ? __min1 : __min2; }) 4927 4928 #define max_t(type, x, y) ({ \ 4929 type __max1 = (x); \ 4930 type __max2 = (y); \ 4931 __max1 > __max2 ? __max1 : __max2; }) 4932 4933 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4934 4935 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4936 { 4937 struct bnxt_ctx_pg_info *ctx_pg; 4938 struct bnxt_ctx_mem_info *ctx; 4939 uint32_t mem_size, ena, entries; 4940 uint32_t entries_sp, min; 4941 int i, rc; 4942 4943 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4944 if (rc) { 4945 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4946 return rc; 4947 } 4948 ctx = bp->ctx; 4949 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4950 return 0; 4951 4952 ctx_pg = &ctx->qp_mem; 4953 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4954 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4955 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4956 if (rc) 4957 return rc; 4958 4959 ctx_pg = &ctx->srq_mem; 4960 ctx_pg->entries = ctx->srq_max_l2_entries; 4961 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4962 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4963 if (rc) 4964 return rc; 4965 4966 ctx_pg = &ctx->cq_mem; 4967 ctx_pg->entries = ctx->cq_max_l2_entries; 4968 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4969 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4970 if (rc) 4971 return rc; 4972 4973 ctx_pg = &ctx->vnic_mem; 4974 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4975 ctx->vnic_max_ring_table_entries; 4976 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4977 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4978 if (rc) 4979 return rc; 4980 4981 ctx_pg = &ctx->stat_mem; 4982 ctx_pg->entries = ctx->stat_max_entries; 4983 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4984 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4985 if (rc) 4986 return rc; 4987 4988 min = ctx->tqm_min_entries_per_ring; 4989 4990 entries_sp = ctx->qp_max_l2_entries + 4991 ctx->vnic_max_vnic_entries + 4992 2 * ctx->qp_min_qp1_entries + min; 4993 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4994 4995 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4996 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4997 entries = clamp_t(uint32_t, entries, min, 4998 ctx->tqm_max_entries_per_ring); 4999 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 5000 ctx_pg = ctx->tqm_mem[i]; 5001 ctx_pg->entries = i ? entries : entries_sp; 5002 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 5003 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); 5004 if (rc) 5005 return rc; 5006 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 5007 } 5008 5009 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 5010 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 5011 if (rc) 5012 PMD_DRV_LOG(ERR, 5013 "Failed to configure context mem: rc = %d\n", rc); 5014 else 5015 ctx->flags |= BNXT_CTX_FLAG_INITED; 5016 5017 return rc; 5018 } 5019 5020 static int bnxt_alloc_stats_mem(struct bnxt *bp) 5021 { 5022 struct rte_pci_device *pci_dev = bp->pdev; 5023 char mz_name[RTE_MEMZONE_NAMESIZE]; 5024 const struct rte_memzone *mz = NULL; 5025 uint32_t total_alloc_len; 5026 rte_iova_t mz_phys_addr; 5027 5028 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 5029 return 0; 5030 5031 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5032 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5033 pci_dev->addr.bus, pci_dev->addr.devid, 5034 pci_dev->addr.function, "rx_port_stats"); 5035 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5036 mz = rte_memzone_lookup(mz_name); 5037 total_alloc_len = 5038 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 5039 sizeof(struct rx_port_stats_ext) + 512); 5040 if (!mz) { 5041 mz = rte_memzone_reserve(mz_name, total_alloc_len, 5042 SOCKET_ID_ANY, 5043 RTE_MEMZONE_2MB | 5044 RTE_MEMZONE_SIZE_HINT_ONLY | 5045 RTE_MEMZONE_IOVA_CONTIG); 5046 if (mz == NULL) 5047 return -ENOMEM; 5048 } 5049 memset(mz->addr, 0, mz->len); 5050 mz_phys_addr = mz->iova; 5051 5052 bp->rx_mem_zone = (const void *)mz; 5053 bp->hw_rx_port_stats = mz->addr; 5054 bp->hw_rx_port_stats_map = mz_phys_addr; 5055 5056 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5057 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5058 pci_dev->addr.bus, pci_dev->addr.devid, 5059 pci_dev->addr.function, "tx_port_stats"); 5060 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5061 mz = rte_memzone_lookup(mz_name); 5062 total_alloc_len = 5063 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 5064 sizeof(struct tx_port_stats_ext) + 512); 5065 if (!mz) { 5066 mz = rte_memzone_reserve(mz_name, 5067 total_alloc_len, 5068 SOCKET_ID_ANY, 5069 RTE_MEMZONE_2MB | 5070 RTE_MEMZONE_SIZE_HINT_ONLY | 5071 RTE_MEMZONE_IOVA_CONTIG); 5072 if (mz == NULL) 5073 return -ENOMEM; 5074 } 5075 memset(mz->addr, 0, mz->len); 5076 mz_phys_addr = mz->iova; 5077 5078 bp->tx_mem_zone = (const void *)mz; 5079 bp->hw_tx_port_stats = mz->addr; 5080 bp->hw_tx_port_stats_map = mz_phys_addr; 5081 bp->flags |= BNXT_FLAG_PORT_STATS; 5082 5083 /* Display extended statistics if FW supports it */ 5084 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 5085 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 5086 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 5087 return 0; 5088 5089 bp->hw_rx_port_stats_ext = (void *) 5090 ((uint8_t *)bp->hw_rx_port_stats + 5091 sizeof(struct rx_port_stats)); 5092 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 5093 sizeof(struct rx_port_stats); 5094 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 5095 5096 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 5097 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 5098 bp->hw_tx_port_stats_ext = (void *) 5099 ((uint8_t *)bp->hw_tx_port_stats + 5100 sizeof(struct tx_port_stats)); 5101 bp->hw_tx_port_stats_ext_map = 5102 bp->hw_tx_port_stats_map + 5103 sizeof(struct tx_port_stats); 5104 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 5105 } 5106 5107 return 0; 5108 } 5109 5110 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5111 { 5112 struct bnxt *bp = eth_dev->data->dev_private; 5113 int rc = 0; 5114 5115 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5116 RTE_ETHER_ADDR_LEN * 5117 bp->max_l2_ctx, 5118 0); 5119 if (eth_dev->data->mac_addrs == NULL) { 5120 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5121 return -ENOMEM; 5122 } 5123 5124 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5125 if (BNXT_PF(bp)) 5126 return -EINVAL; 5127 5128 /* Generate a random MAC address, if none was assigned by PF */ 5129 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5130 bnxt_eth_hw_addr_random(bp->mac_addr); 5131 PMD_DRV_LOG(INFO, 5132 "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", 5133 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5134 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5135 5136 rc = bnxt_hwrm_set_mac(bp); 5137 if (rc) 5138 return rc; 5139 } 5140 5141 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5142 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5143 5144 return rc; 5145 } 5146 5147 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5148 { 5149 int rc = 0; 5150 5151 /* MAC is already configured in FW */ 5152 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5153 return 0; 5154 5155 /* Restore the old MAC configured */ 5156 rc = bnxt_hwrm_set_mac(bp); 5157 if (rc) 5158 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5159 5160 return rc; 5161 } 5162 5163 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5164 { 5165 if (!BNXT_PF(bp)) 5166 return; 5167 5168 #define ALLOW_FUNC(x) \ 5169 { \ 5170 uint32_t arg = (x); \ 5171 bp->pf->vf_req_fwd[((arg) >> 5)] &= \ 5172 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 5173 } 5174 5175 /* Forward all requests if firmware is new enough */ 5176 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 5177 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 5178 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 5179 memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd)); 5180 } else { 5181 PMD_DRV_LOG(WARNING, 5182 "Firmware too old for VF mailbox functionality\n"); 5183 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5184 } 5185 5186 /* 5187 * The following are used for driver cleanup. If we disallow these, 5188 * VF drivers can't clean up cleanly. 5189 */ 5190 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 5191 ALLOW_FUNC(HWRM_VNIC_FREE); 5192 ALLOW_FUNC(HWRM_RING_FREE); 5193 ALLOW_FUNC(HWRM_RING_GRP_FREE); 5194 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 5195 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 5196 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 5197 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 5198 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 5199 } 5200 5201 uint16_t 5202 bnxt_get_svif(uint16_t port_id, bool func_svif, 5203 enum bnxt_ulp_intf_type type) 5204 { 5205 struct rte_eth_dev *eth_dev; 5206 struct bnxt *bp; 5207 5208 eth_dev = &rte_eth_devices[port_id]; 5209 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5210 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private; 5211 if (!vfr) 5212 return 0; 5213 5214 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5215 return vfr->svif; 5216 5217 eth_dev = vfr->parent_dev; 5218 } 5219 5220 bp = eth_dev->data->dev_private; 5221 5222 return func_svif ? bp->func_svif : bp->port_svif; 5223 } 5224 5225 uint16_t 5226 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5227 { 5228 struct rte_eth_dev *eth_dev; 5229 struct bnxt_vnic_info *vnic; 5230 struct bnxt *bp; 5231 5232 eth_dev = &rte_eth_devices[port]; 5233 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5234 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private; 5235 if (!vfr) 5236 return 0; 5237 5238 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5239 return vfr->dflt_vnic_id; 5240 5241 eth_dev = vfr->parent_dev; 5242 } 5243 5244 bp = eth_dev->data->dev_private; 5245 5246 vnic = BNXT_GET_DEFAULT_VNIC(bp); 5247 5248 return vnic->fw_vnic_id; 5249 } 5250 5251 uint16_t 5252 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 5253 { 5254 struct rte_eth_dev *eth_dev; 5255 struct bnxt *bp; 5256 5257 eth_dev = &rte_eth_devices[port]; 5258 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5259 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private; 5260 if (!vfr) 5261 return 0; 5262 5263 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5264 return vfr->fw_fid; 5265 5266 eth_dev = vfr->parent_dev; 5267 } 5268 5269 bp = eth_dev->data->dev_private; 5270 5271 return bp->fw_fid; 5272 } 5273 5274 enum bnxt_ulp_intf_type 5275 bnxt_get_interface_type(uint16_t port) 5276 { 5277 struct rte_eth_dev *eth_dev; 5278 struct bnxt *bp; 5279 5280 eth_dev = &rte_eth_devices[port]; 5281 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 5282 return BNXT_ULP_INTF_TYPE_VF_REP; 5283 5284 bp = eth_dev->data->dev_private; 5285 if (BNXT_PF(bp)) 5286 return BNXT_ULP_INTF_TYPE_PF; 5287 else if (BNXT_VF_IS_TRUSTED(bp)) 5288 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 5289 else if (BNXT_VF(bp)) 5290 return BNXT_ULP_INTF_TYPE_VF; 5291 5292 return BNXT_ULP_INTF_TYPE_INVALID; 5293 } 5294 5295 uint16_t 5296 bnxt_get_phy_port_id(uint16_t port_id) 5297 { 5298 struct bnxt_vf_representor *vfr; 5299 struct rte_eth_dev *eth_dev; 5300 struct bnxt *bp; 5301 5302 eth_dev = &rte_eth_devices[port_id]; 5303 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5304 vfr = eth_dev->data->dev_private; 5305 if (!vfr) 5306 return 0; 5307 5308 eth_dev = vfr->parent_dev; 5309 } 5310 5311 bp = eth_dev->data->dev_private; 5312 5313 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 5314 } 5315 5316 uint16_t 5317 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 5318 { 5319 struct rte_eth_dev *eth_dev; 5320 struct bnxt *bp; 5321 5322 eth_dev = &rte_eth_devices[port_id]; 5323 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5324 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private; 5325 if (!vfr) 5326 return 0; 5327 5328 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5329 return vfr->fw_fid - 1; 5330 5331 eth_dev = vfr->parent_dev; 5332 } 5333 5334 bp = eth_dev->data->dev_private; 5335 5336 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 5337 } 5338 5339 uint16_t 5340 bnxt_get_vport(uint16_t port_id) 5341 { 5342 return (1 << bnxt_get_phy_port_id(port_id)); 5343 } 5344 5345 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5346 { 5347 struct bnxt_error_recovery_info *info = bp->recovery_info; 5348 5349 if (info) { 5350 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5351 memset(info, 0, sizeof(*info)); 5352 return; 5353 } 5354 5355 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5356 return; 5357 5358 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5359 sizeof(*info), 0); 5360 if (!info) 5361 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5362 5363 bp->recovery_info = info; 5364 } 5365 5366 static void bnxt_check_fw_status(struct bnxt *bp) 5367 { 5368 uint32_t fw_status; 5369 5370 if (!(bp->recovery_info && 5371 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5372 return; 5373 5374 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5375 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5376 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5377 fw_status); 5378 } 5379 5380 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5381 { 5382 struct bnxt_error_recovery_info *info = bp->recovery_info; 5383 uint32_t status_loc; 5384 uint32_t sig_ver; 5385 5386 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5387 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5388 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5389 BNXT_GRCP_WINDOW_2_BASE + 5390 offsetof(struct hcomm_status, 5391 sig_ver))); 5392 /* If the signature is absent, then FW does not support this feature */ 5393 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5394 HCOMM_STATUS_SIGNATURE_VAL) 5395 return 0; 5396 5397 if (!info) { 5398 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5399 sizeof(*info), 0); 5400 if (!info) 5401 return -ENOMEM; 5402 bp->recovery_info = info; 5403 } else { 5404 memset(info, 0, sizeof(*info)); 5405 } 5406 5407 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5408 BNXT_GRCP_WINDOW_2_BASE + 5409 offsetof(struct hcomm_status, 5410 fw_status_loc))); 5411 5412 /* Only pre-map the FW health status GRC register */ 5413 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5414 return 0; 5415 5416 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5417 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5418 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5419 5420 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5421 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5422 5423 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5424 5425 return 0; 5426 } 5427 5428 static int bnxt_init_fw(struct bnxt *bp) 5429 { 5430 uint16_t mtu; 5431 int rc = 0; 5432 5433 bp->fw_cap = 0; 5434 5435 rc = bnxt_map_hcomm_fw_status_reg(bp); 5436 if (rc) 5437 return rc; 5438 5439 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5440 if (rc) { 5441 bnxt_check_fw_status(bp); 5442 return rc; 5443 } 5444 5445 rc = bnxt_hwrm_func_reset(bp); 5446 if (rc) 5447 return -EIO; 5448 5449 rc = bnxt_hwrm_vnic_qcaps(bp); 5450 if (rc) 5451 return rc; 5452 5453 rc = bnxt_hwrm_queue_qportcfg(bp); 5454 if (rc) 5455 return rc; 5456 5457 /* Get the MAX capabilities for this function. 5458 * This function also allocates context memory for TQM rings and 5459 * informs the firmware about this allocated backing store memory. 5460 */ 5461 rc = bnxt_hwrm_func_qcaps(bp); 5462 if (rc) 5463 return rc; 5464 5465 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5466 if (rc) 5467 return rc; 5468 5469 bnxt_hwrm_port_mac_qcfg(bp); 5470 5471 bnxt_hwrm_parent_pf_qcfg(bp); 5472 5473 bnxt_hwrm_port_phy_qcaps(bp); 5474 5475 bnxt_alloc_error_recovery_info(bp); 5476 /* Get the adapter error recovery support info */ 5477 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5478 if (rc) 5479 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5480 5481 bnxt_hwrm_port_led_qcaps(bp); 5482 5483 return 0; 5484 } 5485 5486 static int 5487 bnxt_init_locks(struct bnxt *bp) 5488 { 5489 int err; 5490 5491 err = pthread_mutex_init(&bp->flow_lock, NULL); 5492 if (err) { 5493 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5494 return err; 5495 } 5496 5497 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5498 if (err) 5499 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5500 return err; 5501 } 5502 5503 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5504 { 5505 int rc = 0; 5506 5507 rc = bnxt_init_fw(bp); 5508 if (rc) 5509 return rc; 5510 5511 if (!reconfig_dev) { 5512 rc = bnxt_setup_mac_addr(bp->eth_dev); 5513 if (rc) 5514 return rc; 5515 } else { 5516 rc = bnxt_restore_dflt_mac(bp); 5517 if (rc) 5518 return rc; 5519 } 5520 5521 bnxt_config_vf_req_fwd(bp); 5522 5523 rc = bnxt_hwrm_func_driver_register(bp); 5524 if (rc) { 5525 PMD_DRV_LOG(ERR, "Failed to register driver"); 5526 return -EBUSY; 5527 } 5528 5529 if (BNXT_PF(bp)) { 5530 if (bp->pdev->max_vfs) { 5531 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5532 if (rc) { 5533 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5534 return rc; 5535 } 5536 } else { 5537 rc = bnxt_hwrm_allocate_pf_only(bp); 5538 if (rc) { 5539 PMD_DRV_LOG(ERR, 5540 "Failed to allocate PF resources"); 5541 return rc; 5542 } 5543 } 5544 } 5545 5546 rc = bnxt_alloc_mem(bp, reconfig_dev); 5547 if (rc) 5548 return rc; 5549 5550 rc = bnxt_setup_int(bp); 5551 if (rc) 5552 return rc; 5553 5554 rc = bnxt_request_int(bp); 5555 if (rc) 5556 return rc; 5557 5558 rc = bnxt_init_ctx_mem(bp); 5559 if (rc) { 5560 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5561 return rc; 5562 } 5563 5564 rc = bnxt_init_locks(bp); 5565 if (rc) 5566 return rc; 5567 5568 return 0; 5569 } 5570 5571 static int 5572 bnxt_parse_devarg_truflow(__rte_unused const char *key, 5573 const char *value, void *opaque_arg) 5574 { 5575 struct bnxt *bp = opaque_arg; 5576 unsigned long truflow; 5577 char *end = NULL; 5578 5579 if (!value || !opaque_arg) { 5580 PMD_DRV_LOG(ERR, 5581 "Invalid parameter passed to truflow devargs.\n"); 5582 return -EINVAL; 5583 } 5584 5585 truflow = strtoul(value, &end, 10); 5586 if (end == NULL || *end != '\0' || 5587 (truflow == ULONG_MAX && errno == ERANGE)) { 5588 PMD_DRV_LOG(ERR, 5589 "Invalid parameter passed to truflow devargs.\n"); 5590 return -EINVAL; 5591 } 5592 5593 if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) { 5594 PMD_DRV_LOG(ERR, 5595 "Invalid value passed to truflow devargs.\n"); 5596 return -EINVAL; 5597 } 5598 5599 bp->flags |= BNXT_FLAG_TRUFLOW_EN; 5600 if (BNXT_TRUFLOW_EN(bp)) 5601 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n"); 5602 5603 return 0; 5604 } 5605 5606 static int 5607 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5608 const char *value, void *opaque_arg) 5609 { 5610 struct bnxt *bp = opaque_arg; 5611 unsigned long flow_xstat; 5612 char *end = NULL; 5613 5614 if (!value || !opaque_arg) { 5615 PMD_DRV_LOG(ERR, 5616 "Invalid parameter passed to flow_xstat devarg.\n"); 5617 return -EINVAL; 5618 } 5619 5620 flow_xstat = strtoul(value, &end, 10); 5621 if (end == NULL || *end != '\0' || 5622 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5623 PMD_DRV_LOG(ERR, 5624 "Invalid parameter passed to flow_xstat devarg.\n"); 5625 return -EINVAL; 5626 } 5627 5628 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5629 PMD_DRV_LOG(ERR, 5630 "Invalid value passed to flow_xstat devarg.\n"); 5631 return -EINVAL; 5632 } 5633 5634 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5635 if (BNXT_FLOW_XSTATS_EN(bp)) 5636 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5637 5638 return 0; 5639 } 5640 5641 static int 5642 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5643 const char *value, void *opaque_arg) 5644 { 5645 struct bnxt *bp = opaque_arg; 5646 unsigned long max_num_kflows; 5647 char *end = NULL; 5648 5649 if (!value || !opaque_arg) { 5650 PMD_DRV_LOG(ERR, 5651 "Invalid parameter passed to max_num_kflows devarg.\n"); 5652 return -EINVAL; 5653 } 5654 5655 max_num_kflows = strtoul(value, &end, 10); 5656 if (end == NULL || *end != '\0' || 5657 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5658 PMD_DRV_LOG(ERR, 5659 "Invalid parameter passed to max_num_kflows devarg.\n"); 5660 return -EINVAL; 5661 } 5662 5663 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5664 PMD_DRV_LOG(ERR, 5665 "Invalid value passed to max_num_kflows devarg.\n"); 5666 return -EINVAL; 5667 } 5668 5669 bp->max_num_kflows = max_num_kflows; 5670 if (bp->max_num_kflows) 5671 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5672 max_num_kflows); 5673 5674 return 0; 5675 } 5676 5677 static void 5678 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5679 { 5680 struct rte_kvargs *kvlist; 5681 5682 if (devargs == NULL) 5683 return; 5684 5685 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5686 if (kvlist == NULL) 5687 return; 5688 5689 /* 5690 * Handler for "truflow" devarg. 5691 * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1" 5692 */ 5693 rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW, 5694 bnxt_parse_devarg_truflow, bp); 5695 5696 /* 5697 * Handler for "flow_xstat" devarg. 5698 * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1" 5699 */ 5700 rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5701 bnxt_parse_devarg_flow_xstat, bp); 5702 5703 /* 5704 * Handler for "max_num_kflows" devarg. 5705 * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32" 5706 */ 5707 rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5708 bnxt_parse_devarg_max_num_kflows, bp); 5709 5710 rte_kvargs_free(kvlist); 5711 } 5712 5713 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5714 { 5715 int rc = 0; 5716 5717 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5718 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5719 if (rc) 5720 PMD_DRV_LOG(ERR, 5721 "Failed to alloc switch domain: %d\n", rc); 5722 else 5723 PMD_DRV_LOG(INFO, 5724 "Switch domain allocated %d\n", 5725 bp->switch_domain_id); 5726 } 5727 5728 return rc; 5729 } 5730 5731 static int 5732 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5733 { 5734 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5735 static int version_printed; 5736 struct bnxt *bp; 5737 int rc; 5738 5739 if (version_printed++ == 0) 5740 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5741 5742 eth_dev->dev_ops = &bnxt_dev_ops; 5743 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5744 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5745 5746 /* 5747 * For secondary processes, we don't initialise any further 5748 * as primary has already done this work. 5749 */ 5750 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5751 return 0; 5752 5753 rte_eth_copy_pci_info(eth_dev, pci_dev); 5754 5755 bp = eth_dev->data->dev_private; 5756 5757 /* Parse dev arguments passed on when starting the DPDK application. */ 5758 bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5759 5760 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5761 5762 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5763 bp->flags |= BNXT_FLAG_VF; 5764 5765 if (bnxt_thor_device(pci_dev->id.device_id)) 5766 bp->flags |= BNXT_FLAG_THOR_CHIP; 5767 5768 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5769 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5770 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5771 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5772 bp->flags |= BNXT_FLAG_STINGRAY; 5773 5774 rc = bnxt_init_board(eth_dev); 5775 if (rc) { 5776 PMD_DRV_LOG(ERR, 5777 "Failed to initialize board rc: %x\n", rc); 5778 return rc; 5779 } 5780 5781 rc = bnxt_alloc_pf_info(bp); 5782 if (rc) 5783 goto error_free; 5784 5785 rc = bnxt_alloc_link_info(bp); 5786 if (rc) 5787 goto error_free; 5788 5789 rc = bnxt_alloc_parent_info(bp); 5790 if (rc) 5791 goto error_free; 5792 5793 rc = bnxt_alloc_hwrm_resources(bp); 5794 if (rc) { 5795 PMD_DRV_LOG(ERR, 5796 "Failed to allocate hwrm resource rc: %x\n", rc); 5797 goto error_free; 5798 } 5799 rc = bnxt_alloc_leds_info(bp); 5800 if (rc) 5801 goto error_free; 5802 5803 rc = bnxt_alloc_cos_queues(bp); 5804 if (rc) 5805 goto error_free; 5806 5807 rc = bnxt_init_resources(bp, false); 5808 if (rc) 5809 goto error_free; 5810 5811 rc = bnxt_alloc_stats_mem(bp); 5812 if (rc) 5813 goto error_free; 5814 5815 bnxt_alloc_switch_domain(bp); 5816 5817 /* Pass the information to the rte_eth_dev_close() that it should also 5818 * release the private port resources. 5819 */ 5820 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 5821 5822 PMD_DRV_LOG(INFO, 5823 DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", 5824 pci_dev->mem_resource[0].phys_addr, 5825 pci_dev->mem_resource[0].addr); 5826 5827 return 0; 5828 5829 error_free: 5830 bnxt_dev_uninit(eth_dev); 5831 return rc; 5832 } 5833 5834 5835 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5836 { 5837 if (!ctx) 5838 return; 5839 5840 if (ctx->va) 5841 rte_free(ctx->va); 5842 5843 ctx->va = NULL; 5844 ctx->dma = RTE_BAD_IOVA; 5845 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 5846 } 5847 5848 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 5849 { 5850 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 5851 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5852 bp->flow_stat->rx_fc_out_tbl.ctx_id, 5853 bp->flow_stat->max_fc, 5854 false); 5855 5856 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 5857 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5858 bp->flow_stat->tx_fc_out_tbl.ctx_id, 5859 bp->flow_stat->max_fc, 5860 false); 5861 5862 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5863 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 5864 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5865 5866 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5867 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 5868 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5869 5870 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5871 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 5872 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5873 5874 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5875 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 5876 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5877 } 5878 5879 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 5880 { 5881 bnxt_unregister_fc_ctx_mem(bp); 5882 5883 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 5884 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 5885 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 5886 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 5887 } 5888 5889 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 5890 { 5891 if (BNXT_FLOW_XSTATS_EN(bp)) 5892 bnxt_uninit_fc_ctx_mem(bp); 5893 } 5894 5895 static void 5896 bnxt_free_error_recovery_info(struct bnxt *bp) 5897 { 5898 rte_free(bp->recovery_info); 5899 bp->recovery_info = NULL; 5900 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5901 } 5902 5903 static void 5904 bnxt_uninit_locks(struct bnxt *bp) 5905 { 5906 pthread_mutex_destroy(&bp->flow_lock); 5907 pthread_mutex_destroy(&bp->def_cp_lock); 5908 if (bp->rep_info) 5909 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 5910 } 5911 5912 static int 5913 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 5914 { 5915 int rc; 5916 5917 bnxt_free_int(bp); 5918 bnxt_free_mem(bp, reconfig_dev); 5919 bnxt_hwrm_func_buf_unrgtr(bp); 5920 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 5921 bp->flags &= ~BNXT_FLAG_REGISTERED; 5922 bnxt_free_ctx_mem(bp); 5923 if (!reconfig_dev) { 5924 bnxt_free_hwrm_resources(bp); 5925 bnxt_free_error_recovery_info(bp); 5926 } 5927 5928 bnxt_uninit_ctx_mem(bp); 5929 5930 bnxt_uninit_locks(bp); 5931 bnxt_free_flow_stats_info(bp); 5932 bnxt_free_rep_info(bp); 5933 rte_free(bp->ptp_cfg); 5934 bp->ptp_cfg = NULL; 5935 return rc; 5936 } 5937 5938 static int 5939 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 5940 { 5941 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5942 return -EPERM; 5943 5944 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 5945 5946 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 5947 bnxt_dev_close_op(eth_dev); 5948 5949 return 0; 5950 } 5951 5952 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 5953 { 5954 struct bnxt *bp = eth_dev->data->dev_private; 5955 struct rte_eth_dev *vf_rep_eth_dev; 5956 int ret = 0, i; 5957 5958 if (!bp) 5959 return -EINVAL; 5960 5961 for (i = 0; i < bp->num_reps; i++) { 5962 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 5963 if (!vf_rep_eth_dev) 5964 continue; 5965 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_vf_representor_uninit); 5966 } 5967 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 5968 5969 return ret; 5970 } 5971 5972 static void bnxt_free_rep_info(struct bnxt *bp) 5973 { 5974 rte_free(bp->rep_info); 5975 bp->rep_info = NULL; 5976 rte_free(bp->cfa_code_map); 5977 bp->cfa_code_map = NULL; 5978 } 5979 5980 static int bnxt_init_rep_info(struct bnxt *bp) 5981 { 5982 int i = 0, rc; 5983 5984 if (bp->rep_info) 5985 return 0; 5986 5987 bp->rep_info = rte_zmalloc("bnxt_rep_info", 5988 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 5989 0); 5990 if (!bp->rep_info) { 5991 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 5992 return -ENOMEM; 5993 } 5994 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 5995 sizeof(*bp->cfa_code_map) * 5996 BNXT_MAX_CFA_CODE, 0); 5997 if (!bp->cfa_code_map) { 5998 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 5999 bnxt_free_rep_info(bp); 6000 return -ENOMEM; 6001 } 6002 6003 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6004 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6005 6006 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6007 if (rc) { 6008 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6009 bnxt_free_rep_info(bp); 6010 return rc; 6011 } 6012 return rc; 6013 } 6014 6015 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6016 struct rte_eth_devargs eth_da, 6017 struct rte_eth_dev *backing_eth_dev) 6018 { 6019 struct rte_eth_dev *vf_rep_eth_dev; 6020 char name[RTE_ETH_NAME_MAX_LEN]; 6021 struct bnxt *backing_bp; 6022 uint16_t num_rep; 6023 int i, ret = 0; 6024 6025 num_rep = eth_da.nb_representor_ports; 6026 if (num_rep > BNXT_MAX_VF_REPS) { 6027 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6028 num_rep, BNXT_MAX_VF_REPS); 6029 return -EINVAL; 6030 } 6031 6032 if (num_rep > RTE_MAX_ETHPORTS) { 6033 PMD_DRV_LOG(ERR, 6034 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6035 num_rep, RTE_MAX_ETHPORTS); 6036 return -EINVAL; 6037 } 6038 6039 backing_bp = backing_eth_dev->data->dev_private; 6040 6041 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6042 PMD_DRV_LOG(ERR, 6043 "Not a PF or trusted VF. No Representor support\n"); 6044 /* Returning an error is not an option. 6045 * Applications are not handling this correctly 6046 */ 6047 return 0; 6048 } 6049 6050 if (bnxt_init_rep_info(backing_bp)) 6051 return 0; 6052 6053 for (i = 0; i < num_rep; i++) { 6054 struct bnxt_vf_representor representor = { 6055 .vf_id = eth_da.representor_ports[i], 6056 .switch_domain_id = backing_bp->switch_domain_id, 6057 .parent_dev = backing_eth_dev 6058 }; 6059 6060 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 6061 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6062 representor.vf_id, BNXT_MAX_VF_REPS); 6063 continue; 6064 } 6065 6066 /* representor port net_bdf_port */ 6067 snprintf(name, sizeof(name), "net_%s_representor_%d", 6068 pci_dev->device.name, eth_da.representor_ports[i]); 6069 6070 ret = rte_eth_dev_create(&pci_dev->device, name, 6071 sizeof(struct bnxt_vf_representor), 6072 NULL, NULL, 6073 bnxt_vf_representor_init, 6074 &representor); 6075 6076 if (!ret) { 6077 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6078 if (!vf_rep_eth_dev) { 6079 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6080 " for VF-Rep: %s.", name); 6081 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6082 ret = -ENODEV; 6083 return ret; 6084 } 6085 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6086 vf_rep_eth_dev; 6087 backing_bp->num_reps++; 6088 } else { 6089 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6090 "representor %s.", name); 6091 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6092 } 6093 } 6094 6095 return ret; 6096 } 6097 6098 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6099 struct rte_pci_device *pci_dev) 6100 { 6101 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6102 struct rte_eth_dev *backing_eth_dev; 6103 uint16_t num_rep; 6104 int ret = 0; 6105 6106 if (pci_dev->device.devargs) { 6107 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6108 ð_da); 6109 if (ret) 6110 return ret; 6111 } 6112 6113 num_rep = eth_da.nb_representor_ports; 6114 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6115 num_rep); 6116 6117 /* We could come here after first level of probe is already invoked 6118 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6119 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6120 */ 6121 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6122 if (backing_eth_dev == NULL) { 6123 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6124 sizeof(struct bnxt), 6125 eth_dev_pci_specific_init, pci_dev, 6126 bnxt_dev_init, NULL); 6127 6128 if (ret || !num_rep) 6129 return ret; 6130 6131 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6132 } 6133 6134 /* probe representor ports now */ 6135 ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev); 6136 6137 return ret; 6138 } 6139 6140 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6141 { 6142 struct rte_eth_dev *eth_dev; 6143 6144 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6145 if (!eth_dev) 6146 return 0; /* Invoked typically only by OVS-DPDK, by the 6147 * time it comes here the eth_dev is already 6148 * deleted by rte_eth_dev_close(), so returning 6149 * +ve value will at least help in proper cleanup 6150 */ 6151 6152 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6153 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6154 return rte_eth_dev_destroy(eth_dev, 6155 bnxt_vf_representor_uninit); 6156 else 6157 return rte_eth_dev_destroy(eth_dev, 6158 bnxt_dev_uninit); 6159 } else { 6160 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6161 } 6162 } 6163 6164 static struct rte_pci_driver bnxt_rte_pmd = { 6165 .id_table = bnxt_pci_id_map, 6166 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6167 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6168 * and OVS-DPDK 6169 */ 6170 .probe = bnxt_pci_probe, 6171 .remove = bnxt_pci_remove, 6172 }; 6173 6174 static bool 6175 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6176 { 6177 if (strcmp(dev->device->driver->name, drv->driver.name)) 6178 return false; 6179 6180 return true; 6181 } 6182 6183 bool is_bnxt_supported(struct rte_eth_dev *dev) 6184 { 6185 return is_device_supported(dev, &bnxt_rte_pmd); 6186 } 6187 6188 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE); 6189 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6190 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6191 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6192