1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <rte_ethdev_driver.h> 11 #include <rte_ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 15 #include "bnxt.h" 16 #include "bnxt_cpr.h" 17 #include "bnxt_filter.h" 18 #include "bnxt_hwrm.h" 19 #include "bnxt_irq.h" 20 #include "bnxt_ring.h" 21 #include "bnxt_rxq.h" 22 #include "bnxt_rxr.h" 23 #include "bnxt_stats.h" 24 #include "bnxt_txq.h" 25 #include "bnxt_txr.h" 26 #include "bnxt_vnic.h" 27 #include "hsi_struct_def_dpdk.h" 28 #include "bnxt_nvm_defs.h" 29 #include "bnxt_util.h" 30 31 #define DRV_MODULE_NAME "bnxt" 32 static const char bnxt_version[] = 33 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 34 int bnxt_logtype_driver; 35 36 #define PCI_VENDOR_ID_BROADCOM 0x14E4 37 38 #define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 39 #define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 40 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 41 #define BROADCOM_DEV_ID_57414_VF 0x16c1 42 #define BROADCOM_DEV_ID_57301 0x16c8 43 #define BROADCOM_DEV_ID_57302 0x16c9 44 #define BROADCOM_DEV_ID_57304_PF 0x16ca 45 #define BROADCOM_DEV_ID_57304_VF 0x16cb 46 #define BROADCOM_DEV_ID_57417_MF 0x16cc 47 #define BROADCOM_DEV_ID_NS2 0x16cd 48 #define BROADCOM_DEV_ID_57311 0x16ce 49 #define BROADCOM_DEV_ID_57312 0x16cf 50 #define BROADCOM_DEV_ID_57402 0x16d0 51 #define BROADCOM_DEV_ID_57404 0x16d1 52 #define BROADCOM_DEV_ID_57406_PF 0x16d2 53 #define BROADCOM_DEV_ID_57406_VF 0x16d3 54 #define BROADCOM_DEV_ID_57402_MF 0x16d4 55 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 56 #define BROADCOM_DEV_ID_57412 0x16d6 57 #define BROADCOM_DEV_ID_57414 0x16d7 58 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8 59 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9 60 #define BROADCOM_DEV_ID_5741X_VF 0x16dc 61 #define BROADCOM_DEV_ID_57412_MF 0x16de 62 #define BROADCOM_DEV_ID_57314 0x16df 63 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0 64 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 65 #define BROADCOM_DEV_ID_57417_SFP 0x16e2 66 #define BROADCOM_DEV_ID_57416_SFP 0x16e3 67 #define BROADCOM_DEV_ID_57317_SFP 0x16e4 68 #define BROADCOM_DEV_ID_57404_MF 0x16e7 69 #define BROADCOM_DEV_ID_57406_MF 0x16e8 70 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 71 #define BROADCOM_DEV_ID_57407_MF 0x16ea 72 #define BROADCOM_DEV_ID_57414_MF 0x16ec 73 #define BROADCOM_DEV_ID_57416_MF 0x16ee 74 #define BROADCOM_DEV_ID_57508 0x1750 75 #define BROADCOM_DEV_ID_57504 0x1751 76 #define BROADCOM_DEV_ID_57502 0x1752 77 #define BROADCOM_DEV_ID_57500_VF1 0x1806 78 #define BROADCOM_DEV_ID_57500_VF2 0x1807 79 #define BROADCOM_DEV_ID_58802 0xd802 80 #define BROADCOM_DEV_ID_58804 0xd804 81 #define BROADCOM_DEV_ID_58808 0x16f0 82 #define BROADCOM_DEV_ID_58802_VF 0xd800 83 84 static const struct rte_pci_id bnxt_pci_id_map[] = { 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 86 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 88 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 97 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 98 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 99 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 132 { .vendor_id = 0, /* sentinel */ }, 133 }; 134 135 #define BNXT_ETH_RSS_SUPPORT ( \ 136 ETH_RSS_IPV4 | \ 137 ETH_RSS_NONFRAG_IPV4_TCP | \ 138 ETH_RSS_NONFRAG_IPV4_UDP | \ 139 ETH_RSS_IPV6 | \ 140 ETH_RSS_NONFRAG_IPV6_TCP | \ 141 ETH_RSS_NONFRAG_IPV6_UDP) 142 143 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ 144 DEV_TX_OFFLOAD_IPV4_CKSUM | \ 145 DEV_TX_OFFLOAD_TCP_CKSUM | \ 146 DEV_TX_OFFLOAD_UDP_CKSUM | \ 147 DEV_TX_OFFLOAD_TCP_TSO | \ 148 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ 149 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ 150 DEV_TX_OFFLOAD_GRE_TNL_TSO | \ 151 DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ 152 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ 153 DEV_TX_OFFLOAD_MULTI_SEGS) 154 155 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ 156 DEV_RX_OFFLOAD_VLAN_STRIP | \ 157 DEV_RX_OFFLOAD_IPV4_CKSUM | \ 158 DEV_RX_OFFLOAD_UDP_CKSUM | \ 159 DEV_RX_OFFLOAD_TCP_CKSUM | \ 160 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ 161 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 162 DEV_RX_OFFLOAD_KEEP_CRC | \ 163 DEV_RX_OFFLOAD_TCP_LRO) 164 165 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 166 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); 167 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); 168 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 169 170 /***********************/ 171 172 /* 173 * High level utility functions 174 */ 175 176 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 177 { 178 if (!BNXT_CHIP_THOR(bp)) 179 return 1; 180 181 return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, 182 BNXT_RSS_ENTRIES_PER_CTX_THOR) / 183 BNXT_RSS_ENTRIES_PER_CTX_THOR; 184 } 185 186 static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 187 { 188 if (!BNXT_CHIP_THOR(bp)) 189 return HW_HASH_INDEX_SIZE; 190 191 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; 192 } 193 194 static void bnxt_free_mem(struct bnxt *bp) 195 { 196 bnxt_free_filter_mem(bp); 197 bnxt_free_vnic_attributes(bp); 198 bnxt_free_vnic_mem(bp); 199 200 bnxt_free_stats(bp); 201 bnxt_free_tx_rings(bp); 202 bnxt_free_rx_rings(bp); 203 bnxt_free_async_cp_ring(bp); 204 } 205 206 static int bnxt_alloc_mem(struct bnxt *bp) 207 { 208 int rc; 209 210 rc = bnxt_alloc_async_ring_struct(bp); 211 if (rc) 212 goto alloc_mem_err; 213 214 rc = bnxt_alloc_vnic_mem(bp); 215 if (rc) 216 goto alloc_mem_err; 217 218 rc = bnxt_alloc_vnic_attributes(bp); 219 if (rc) 220 goto alloc_mem_err; 221 222 rc = bnxt_alloc_filter_mem(bp); 223 if (rc) 224 goto alloc_mem_err; 225 226 rc = bnxt_alloc_async_cp_ring(bp); 227 if (rc) 228 goto alloc_mem_err; 229 230 return 0; 231 232 alloc_mem_err: 233 bnxt_free_mem(bp); 234 return rc; 235 } 236 237 static int bnxt_init_chip(struct bnxt *bp) 238 { 239 struct bnxt_rx_queue *rxq; 240 struct rte_eth_link new; 241 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 242 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 243 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 244 uint64_t rx_offloads = dev_conf->rxmode.offloads; 245 uint32_t intr_vector = 0; 246 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 247 uint32_t vec = BNXT_MISC_VEC_ID; 248 unsigned int i, j; 249 int rc; 250 251 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 252 bp->eth_dev->data->dev_conf.rxmode.offloads |= 253 DEV_RX_OFFLOAD_JUMBO_FRAME; 254 bp->flags |= BNXT_FLAG_JUMBO; 255 } else { 256 bp->eth_dev->data->dev_conf.rxmode.offloads &= 257 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 258 bp->flags &= ~BNXT_FLAG_JUMBO; 259 } 260 261 /* THOR does not support ring groups. 262 * But we will use the array to save RSS context IDs. 263 */ 264 if (BNXT_CHIP_THOR(bp)) 265 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 266 267 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 268 if (rc) { 269 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 270 goto err_out; 271 } 272 273 rc = bnxt_alloc_hwrm_rings(bp); 274 if (rc) { 275 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 276 goto err_out; 277 } 278 279 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 280 if (rc) { 281 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 282 goto err_out; 283 } 284 285 rc = bnxt_mq_rx_configure(bp); 286 if (rc) { 287 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 288 goto err_out; 289 } 290 291 /* VNIC configuration */ 292 for (i = 0; i < bp->nr_vnics; i++) { 293 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 294 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 295 uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; 296 297 vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); 298 if (!vnic->fw_grp_ids) { 299 PMD_DRV_LOG(ERR, 300 "Failed to alloc %d bytes for group ids\n", 301 size); 302 rc = -ENOMEM; 303 goto err_out; 304 } 305 memset(vnic->fw_grp_ids, -1, size); 306 307 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 308 i, vnic, vnic->fw_grp_ids); 309 310 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 311 if (rc) { 312 PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n", 313 i, rc); 314 goto err_out; 315 } 316 317 /* Alloc RSS context only if RSS mode is enabled */ 318 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 319 int j, nr_ctxs = bnxt_rss_ctxts(bp); 320 321 rc = 0; 322 for (j = 0; j < nr_ctxs; j++) { 323 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 324 if (rc) 325 break; 326 } 327 if (rc) { 328 PMD_DRV_LOG(ERR, 329 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 330 i, j, rc); 331 goto err_out; 332 } 333 vnic->num_lb_ctxts = nr_ctxs; 334 } 335 336 /* 337 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 338 * setting is not available at this time, it will not be 339 * configured correctly in the CFA. 340 */ 341 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 342 vnic->vlan_strip = true; 343 else 344 vnic->vlan_strip = false; 345 346 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 347 if (rc) { 348 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 349 i, rc); 350 goto err_out; 351 } 352 353 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 354 if (rc) { 355 PMD_DRV_LOG(ERR, 356 "HWRM vnic %d filter failure rc: %x\n", 357 i, rc); 358 goto err_out; 359 } 360 361 for (j = 0; j < bp->rx_nr_rings; j++) { 362 rxq = bp->eth_dev->data->rx_queues[j]; 363 364 PMD_DRV_LOG(DEBUG, 365 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 366 j, rxq->vnic, rxq->vnic->fw_grp_ids); 367 368 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 369 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 370 } 371 372 rc = bnxt_vnic_rss_configure(bp, vnic); 373 if (rc) { 374 PMD_DRV_LOG(ERR, 375 "HWRM vnic set RSS failure rc: %x\n", rc); 376 goto err_out; 377 } 378 379 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 380 381 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 382 DEV_RX_OFFLOAD_TCP_LRO) 383 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 384 else 385 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 386 } 387 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 388 if (rc) { 389 PMD_DRV_LOG(ERR, 390 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 391 goto err_out; 392 } 393 394 /* check and configure queue intr-vector mapping */ 395 if ((rte_intr_cap_multiple(intr_handle) || 396 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 397 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 398 intr_vector = bp->eth_dev->data->nb_rx_queues; 399 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 400 if (intr_vector > bp->rx_cp_nr_rings) { 401 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 402 bp->rx_cp_nr_rings); 403 return -ENOTSUP; 404 } 405 rc = rte_intr_efd_enable(intr_handle, intr_vector); 406 if (rc) 407 return rc; 408 } 409 410 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 411 intr_handle->intr_vec = 412 rte_zmalloc("intr_vec", 413 bp->eth_dev->data->nb_rx_queues * 414 sizeof(int), 0); 415 if (intr_handle->intr_vec == NULL) { 416 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 417 " intr_vec", bp->eth_dev->data->nb_rx_queues); 418 rc = -ENOMEM; 419 goto err_disable; 420 } 421 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 422 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 423 intr_handle->intr_vec, intr_handle->nb_efd, 424 intr_handle->max_intr); 425 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 426 queue_id++) { 427 intr_handle->intr_vec[queue_id] = 428 vec + BNXT_RX_VEC_START; 429 if (vec < base + intr_handle->nb_efd - 1) 430 vec++; 431 } 432 } 433 434 /* enable uio/vfio intr/eventfd mapping */ 435 rc = rte_intr_enable(intr_handle); 436 if (rc) 437 goto err_free; 438 439 rc = bnxt_get_hwrm_link_config(bp, &new); 440 if (rc) { 441 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); 442 goto err_free; 443 } 444 445 if (!bp->link_info.link_up) { 446 rc = bnxt_set_hwrm_link_config(bp, true); 447 if (rc) { 448 PMD_DRV_LOG(ERR, 449 "HWRM link config failure rc: %x\n", rc); 450 goto err_free; 451 } 452 } 453 bnxt_print_link_info(bp->eth_dev); 454 455 return 0; 456 457 err_free: 458 rte_free(intr_handle->intr_vec); 459 err_disable: 460 rte_intr_efd_disable(intr_handle); 461 err_out: 462 /* Some of the error status returned by FW may not be from errno.h */ 463 if (rc > 0) 464 rc = -EIO; 465 466 return rc; 467 } 468 469 static int bnxt_shutdown_nic(struct bnxt *bp) 470 { 471 bnxt_free_all_hwrm_resources(bp); 472 bnxt_free_all_filters(bp); 473 bnxt_free_all_vnics(bp); 474 return 0; 475 } 476 477 static int bnxt_init_nic(struct bnxt *bp) 478 { 479 int rc; 480 481 if (BNXT_HAS_RING_GRPS(bp)) { 482 rc = bnxt_init_ring_grps(bp); 483 if (rc) 484 return rc; 485 } 486 487 bnxt_init_vnics(bp); 488 bnxt_init_filters(bp); 489 490 return 0; 491 } 492 493 /* 494 * Device configuration and status function 495 */ 496 497 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 498 struct rte_eth_dev_info *dev_info) 499 { 500 struct bnxt *bp = eth_dev->data->dev_private; 501 uint16_t max_vnics, i, j, vpool, vrxq; 502 unsigned int max_rx_rings; 503 504 /* MAC Specifics */ 505 dev_info->max_mac_addrs = bp->max_l2_ctx; 506 dev_info->max_hash_mac_addrs = 0; 507 508 /* PF/VF specifics */ 509 if (BNXT_PF(bp)) 510 dev_info->max_vfs = bp->pdev->max_vfs; 511 max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx); 512 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 513 dev_info->max_rx_queues = max_rx_rings; 514 dev_info->max_tx_queues = max_rx_rings; 515 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 516 dev_info->hash_key_size = 40; 517 max_vnics = bp->max_vnics; 518 519 /* Fast path specifics */ 520 dev_info->min_rx_bufsize = 1; 521 dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN + 522 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 523 524 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 525 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 526 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 527 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; 528 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 529 530 /* *INDENT-OFF* */ 531 dev_info->default_rxconf = (struct rte_eth_rxconf) { 532 .rx_thresh = { 533 .pthresh = 8, 534 .hthresh = 8, 535 .wthresh = 0, 536 }, 537 .rx_free_thresh = 32, 538 /* If no descriptors available, pkts are dropped by default */ 539 .rx_drop_en = 1, 540 }; 541 542 dev_info->default_txconf = (struct rte_eth_txconf) { 543 .tx_thresh = { 544 .pthresh = 32, 545 .hthresh = 0, 546 .wthresh = 0, 547 }, 548 .tx_free_thresh = 32, 549 .tx_rs_thresh = 32, 550 }; 551 eth_dev->data->dev_conf.intr_conf.lsc = 1; 552 553 eth_dev->data->dev_conf.intr_conf.rxq = 1; 554 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 555 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 556 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 557 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 558 559 /* *INDENT-ON* */ 560 561 /* 562 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 563 * need further investigation. 564 */ 565 566 /* VMDq resources */ 567 vpool = 64; /* ETH_64_POOLS */ 568 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 569 for (i = 0; i < 4; vpool >>= 1, i++) { 570 if (max_vnics > vpool) { 571 for (j = 0; j < 5; vrxq >>= 1, j++) { 572 if (dev_info->max_rx_queues > vrxq) { 573 if (vpool > vrxq) 574 vpool = vrxq; 575 goto found; 576 } 577 } 578 /* Not enough resources to support VMDq */ 579 break; 580 } 581 } 582 /* Not enough resources to support VMDq */ 583 vpool = 0; 584 vrxq = 0; 585 found: 586 dev_info->max_vmdq_pools = vpool; 587 dev_info->vmdq_queue_num = vrxq; 588 589 dev_info->vmdq_pool_base = 0; 590 dev_info->vmdq_queue_base = 0; 591 592 return 0; 593 } 594 595 /* Configure the device based on the configuration provided */ 596 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 597 { 598 struct bnxt *bp = eth_dev->data->dev_private; 599 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 600 int rc; 601 602 bp->rx_queues = (void *)eth_dev->data->rx_queues; 603 bp->tx_queues = (void *)eth_dev->data->tx_queues; 604 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 605 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 606 607 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 608 rc = bnxt_hwrm_check_vf_rings(bp); 609 if (rc) { 610 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 611 return -ENOSPC; 612 } 613 614 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 615 if (rc) { 616 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 617 return -ENOSPC; 618 } 619 } else { 620 /* legacy driver needs to get updated values */ 621 rc = bnxt_hwrm_func_qcaps(bp); 622 if (rc) { 623 PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); 624 return rc; 625 } 626 } 627 628 /* Inherit new configurations */ 629 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 630 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 631 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 632 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 633 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 634 bp->max_stat_ctx) 635 goto resource_error; 636 637 if (BNXT_HAS_RING_GRPS(bp) && 638 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 639 goto resource_error; 640 641 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 642 bp->max_vnics < eth_dev->data->nb_rx_queues) 643 goto resource_error; 644 645 bp->rx_cp_nr_rings = bp->rx_nr_rings; 646 bp->tx_cp_nr_rings = bp->tx_nr_rings; 647 648 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 649 eth_dev->data->mtu = 650 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 651 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 652 BNXT_NUM_VLANS; 653 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 654 } 655 return 0; 656 657 resource_error: 658 PMD_DRV_LOG(ERR, 659 "Insufficient resources to support requested config\n"); 660 PMD_DRV_LOG(ERR, 661 "Num Queues Requested: Tx %d, Rx %d\n", 662 eth_dev->data->nb_tx_queues, 663 eth_dev->data->nb_rx_queues); 664 PMD_DRV_LOG(ERR, 665 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 666 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 667 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 668 return -ENOSPC; 669 } 670 671 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 672 { 673 struct rte_eth_link *link = ð_dev->data->dev_link; 674 675 if (link->link_status) 676 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 677 eth_dev->data->port_id, 678 (uint32_t)link->link_speed, 679 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 680 ("full-duplex") : ("half-duplex\n")); 681 else 682 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 683 eth_dev->data->port_id); 684 } 685 686 /* 687 * Determine whether the current configuration requires support for scattered 688 * receive; return 1 if scattered receive is required and 0 if not. 689 */ 690 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 691 { 692 uint16_t buf_size; 693 int i; 694 695 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 696 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 697 698 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 699 RTE_PKTMBUF_HEADROOM); 700 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 701 return 1; 702 } 703 return 0; 704 } 705 706 static eth_rx_burst_t 707 bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev) 708 { 709 #ifdef RTE_ARCH_X86 710 /* 711 * Vector mode receive can be enabled only if scatter rx is not 712 * in use and rx offloads are limited to VLAN stripping and 713 * CRC stripping. 714 */ 715 if (!eth_dev->data->scattered_rx && 716 !(eth_dev->data->dev_conf.rxmode.offloads & 717 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 718 DEV_RX_OFFLOAD_KEEP_CRC | 719 DEV_RX_OFFLOAD_JUMBO_FRAME | 720 DEV_RX_OFFLOAD_IPV4_CKSUM | 721 DEV_RX_OFFLOAD_UDP_CKSUM | 722 DEV_RX_OFFLOAD_TCP_CKSUM | 723 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 724 DEV_RX_OFFLOAD_VLAN_FILTER))) { 725 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", 726 eth_dev->data->port_id); 727 return bnxt_recv_pkts_vec; 728 } 729 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 730 eth_dev->data->port_id); 731 PMD_DRV_LOG(INFO, 732 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 733 eth_dev->data->port_id, 734 eth_dev->data->scattered_rx, 735 eth_dev->data->dev_conf.rxmode.offloads); 736 #endif 737 return bnxt_recv_pkts; 738 } 739 740 static eth_tx_burst_t 741 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 742 { 743 #ifdef RTE_ARCH_X86 744 /* 745 * Vector mode transmit can be enabled only if not using scatter rx 746 * or tx offloads. 747 */ 748 if (!eth_dev->data->scattered_rx && 749 !eth_dev->data->dev_conf.txmode.offloads) { 750 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", 751 eth_dev->data->port_id); 752 return bnxt_xmit_pkts_vec; 753 } 754 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 755 eth_dev->data->port_id); 756 PMD_DRV_LOG(INFO, 757 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 758 eth_dev->data->port_id, 759 eth_dev->data->scattered_rx, 760 eth_dev->data->dev_conf.txmode.offloads); 761 #endif 762 return bnxt_xmit_pkts; 763 } 764 765 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 766 { 767 struct bnxt *bp = eth_dev->data->dev_private; 768 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 769 int vlan_mask = 0; 770 int rc; 771 772 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 773 PMD_DRV_LOG(ERR, 774 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 775 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 776 } 777 778 rc = bnxt_init_chip(bp); 779 if (rc) 780 goto error; 781 782 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 783 784 bnxt_link_update_op(eth_dev, 1); 785 786 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 787 vlan_mask |= ETH_VLAN_FILTER_MASK; 788 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 789 vlan_mask |= ETH_VLAN_STRIP_MASK; 790 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 791 if (rc) 792 goto error; 793 794 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 795 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 796 bnxt_enable_int(bp); 797 bp->flags |= BNXT_FLAG_INIT_DONE; 798 bp->dev_stopped = 0; 799 return 0; 800 801 error: 802 bnxt_shutdown_nic(bp); 803 bnxt_free_tx_mbufs(bp); 804 bnxt_free_rx_mbufs(bp); 805 return rc; 806 } 807 808 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 809 { 810 struct bnxt *bp = eth_dev->data->dev_private; 811 int rc = 0; 812 813 if (!bp->link_info.link_up) 814 rc = bnxt_set_hwrm_link_config(bp, true); 815 if (!rc) 816 eth_dev->data->dev_link.link_status = 1; 817 818 bnxt_print_link_info(eth_dev); 819 return 0; 820 } 821 822 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 823 { 824 struct bnxt *bp = eth_dev->data->dev_private; 825 826 eth_dev->data->dev_link.link_status = 0; 827 bnxt_set_hwrm_link_config(bp, false); 828 bp->link_info.link_up = 0; 829 830 return 0; 831 } 832 833 /* Unload the driver, release resources */ 834 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 835 { 836 struct bnxt *bp = eth_dev->data->dev_private; 837 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 838 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 839 840 bnxt_disable_int(bp); 841 842 /* disable uio/vfio intr/eventfd mapping */ 843 rte_intr_disable(intr_handle); 844 845 bp->flags &= ~BNXT_FLAG_INIT_DONE; 846 if (bp->eth_dev->data->dev_started) { 847 /* TBD: STOP HW queues DMA */ 848 eth_dev->data->dev_link.link_status = 0; 849 } 850 bnxt_set_hwrm_link_config(bp, false); 851 852 /* Clean queue intr-vector mapping */ 853 rte_intr_efd_disable(intr_handle); 854 if (intr_handle->intr_vec != NULL) { 855 rte_free(intr_handle->intr_vec); 856 intr_handle->intr_vec = NULL; 857 } 858 859 bnxt_hwrm_port_clr_stats(bp); 860 bnxt_free_tx_mbufs(bp); 861 bnxt_free_rx_mbufs(bp); 862 bnxt_shutdown_nic(bp); 863 bp->dev_stopped = 1; 864 } 865 866 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 867 { 868 struct bnxt *bp = eth_dev->data->dev_private; 869 870 if (bp->dev_stopped == 0) 871 bnxt_dev_stop_op(eth_dev); 872 873 if (eth_dev->data->mac_addrs != NULL) { 874 rte_free(eth_dev->data->mac_addrs); 875 eth_dev->data->mac_addrs = NULL; 876 } 877 if (bp->grp_info != NULL) { 878 rte_free(bp->grp_info); 879 bp->grp_info = NULL; 880 } 881 882 bnxt_dev_uninit(eth_dev); 883 } 884 885 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 886 uint32_t index) 887 { 888 struct bnxt *bp = eth_dev->data->dev_private; 889 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 890 struct bnxt_vnic_info *vnic; 891 struct bnxt_filter_info *filter, *temp_filter; 892 uint32_t i; 893 894 /* 895 * Loop through all VNICs from the specified filter flow pools to 896 * remove the corresponding MAC addr filter 897 */ 898 for (i = 0; i < bp->nr_vnics; i++) { 899 if (!(pool_mask & (1ULL << i))) 900 continue; 901 902 vnic = &bp->vnic_info[i]; 903 filter = STAILQ_FIRST(&vnic->filter); 904 while (filter) { 905 temp_filter = STAILQ_NEXT(filter, next); 906 if (filter->mac_index == index) { 907 STAILQ_REMOVE(&vnic->filter, filter, 908 bnxt_filter_info, next); 909 bnxt_hwrm_clear_l2_filter(bp, filter); 910 filter->mac_index = INVALID_MAC_INDEX; 911 memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); 912 STAILQ_INSERT_TAIL(&bp->free_filter_list, 913 filter, next); 914 } 915 filter = temp_filter; 916 } 917 } 918 } 919 920 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 921 struct rte_ether_addr *mac_addr, 922 uint32_t index, uint32_t pool) 923 { 924 struct bnxt *bp = eth_dev->data->dev_private; 925 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 926 struct bnxt_filter_info *filter; 927 int rc = 0; 928 929 if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { 930 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 931 return -ENOTSUP; 932 } 933 934 if (!vnic) { 935 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 936 return -EINVAL; 937 } 938 /* Attach requested MAC address to the new l2_filter */ 939 STAILQ_FOREACH(filter, &vnic->filter, next) { 940 if (filter->mac_index == index) { 941 PMD_DRV_LOG(ERR, 942 "MAC addr already existed for pool %d\n", pool); 943 return 0; 944 } 945 } 946 filter = bnxt_alloc_filter(bp); 947 if (!filter) { 948 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 949 return -ENODEV; 950 } 951 952 filter->mac_index = index; 953 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 954 955 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 956 if (!rc) { 957 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 958 } else { 959 filter->mac_index = INVALID_MAC_INDEX; 960 memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); 961 bnxt_free_filter(bp, filter); 962 } 963 964 return rc; 965 } 966 967 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 968 { 969 int rc = 0; 970 struct bnxt *bp = eth_dev->data->dev_private; 971 struct rte_eth_link new; 972 unsigned int cnt = BNXT_LINK_WAIT_CNT; 973 974 memset(&new, 0, sizeof(new)); 975 do { 976 /* Retrieve link info from hardware */ 977 rc = bnxt_get_hwrm_link_config(bp, &new); 978 if (rc) { 979 new.link_speed = ETH_LINK_SPEED_100M; 980 new.link_duplex = ETH_LINK_FULL_DUPLEX; 981 PMD_DRV_LOG(ERR, 982 "Failed to retrieve link rc = 0x%x!\n", rc); 983 goto out; 984 } 985 986 if (!wait_to_complete || new.link_status) 987 break; 988 989 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 990 } while (cnt--); 991 992 out: 993 /* Timed out or success */ 994 if (new.link_status != eth_dev->data->dev_link.link_status || 995 new.link_speed != eth_dev->data->dev_link.link_speed) { 996 memcpy(ð_dev->data->dev_link, &new, 997 sizeof(struct rte_eth_link)); 998 999 _rte_eth_dev_callback_process(eth_dev, 1000 RTE_ETH_EVENT_INTR_LSC, 1001 NULL); 1002 1003 bnxt_print_link_info(eth_dev); 1004 } 1005 1006 return rc; 1007 } 1008 1009 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1010 { 1011 struct bnxt *bp = eth_dev->data->dev_private; 1012 struct bnxt_vnic_info *vnic; 1013 uint32_t old_flags; 1014 int rc; 1015 1016 if (bp->vnic_info == NULL) 1017 return 0; 1018 1019 vnic = &bp->vnic_info[0]; 1020 1021 old_flags = vnic->flags; 1022 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1023 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1024 if (rc != 0) 1025 vnic->flags = old_flags; 1026 1027 return rc; 1028 } 1029 1030 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1031 { 1032 struct bnxt *bp = eth_dev->data->dev_private; 1033 struct bnxt_vnic_info *vnic; 1034 uint32_t old_flags; 1035 int rc; 1036 1037 if (bp->vnic_info == NULL) 1038 return 0; 1039 1040 vnic = &bp->vnic_info[0]; 1041 1042 old_flags = vnic->flags; 1043 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1044 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1045 if (rc != 0) 1046 vnic->flags = old_flags; 1047 1048 return rc; 1049 } 1050 1051 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1052 { 1053 struct bnxt *bp = eth_dev->data->dev_private; 1054 struct bnxt_vnic_info *vnic; 1055 uint32_t old_flags; 1056 int rc; 1057 1058 if (bp->vnic_info == NULL) 1059 return 0; 1060 1061 vnic = &bp->vnic_info[0]; 1062 1063 old_flags = vnic->flags; 1064 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1065 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1066 if (rc != 0) 1067 vnic->flags = old_flags; 1068 1069 return rc; 1070 } 1071 1072 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1073 { 1074 struct bnxt *bp = eth_dev->data->dev_private; 1075 struct bnxt_vnic_info *vnic; 1076 uint32_t old_flags; 1077 int rc; 1078 1079 if (bp->vnic_info == NULL) 1080 return 0; 1081 1082 vnic = &bp->vnic_info[0]; 1083 1084 old_flags = vnic->flags; 1085 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1086 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1087 if (rc != 0) 1088 vnic->flags = old_flags; 1089 1090 return rc; 1091 } 1092 1093 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1094 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1095 { 1096 if (qid >= bp->rx_nr_rings) 1097 return NULL; 1098 1099 return bp->eth_dev->data->rx_queues[qid]; 1100 } 1101 1102 /* Return rxq corresponding to a given rss table ring/group ID. */ 1103 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1104 { 1105 struct bnxt_rx_queue *rxq; 1106 unsigned int i; 1107 1108 if (!BNXT_HAS_RING_GRPS(bp)) { 1109 for (i = 0; i < bp->rx_nr_rings; i++) { 1110 rxq = bp->eth_dev->data->rx_queues[i]; 1111 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1112 return rxq->index; 1113 } 1114 } else { 1115 for (i = 0; i < bp->rx_nr_rings; i++) { 1116 if (bp->grp_info[i].fw_grp_id == fwr) 1117 return i; 1118 } 1119 } 1120 1121 return INVALID_HW_RING_ID; 1122 } 1123 1124 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1125 struct rte_eth_rss_reta_entry64 *reta_conf, 1126 uint16_t reta_size) 1127 { 1128 struct bnxt *bp = eth_dev->data->dev_private; 1129 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1130 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1131 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1132 uint16_t idx, sft; 1133 int i; 1134 1135 if (!vnic->rss_table) 1136 return -EINVAL; 1137 1138 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 1139 return -EINVAL; 1140 1141 if (reta_size != tbl_size) { 1142 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1143 "(%d) must equal the size supported by the hardware " 1144 "(%d)\n", reta_size, tbl_size); 1145 return -EINVAL; 1146 } 1147 1148 for (i = 0; i < reta_size; i++) { 1149 struct bnxt_rx_queue *rxq; 1150 1151 idx = i / RTE_RETA_GROUP_SIZE; 1152 sft = i % RTE_RETA_GROUP_SIZE; 1153 1154 if (!(reta_conf[idx].mask & (1ULL << sft))) 1155 continue; 1156 1157 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 1158 if (!rxq) { 1159 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 1160 return -EINVAL; 1161 } 1162 1163 if (BNXT_CHIP_THOR(bp)) { 1164 vnic->rss_table[i * 2] = 1165 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1166 vnic->rss_table[i * 2 + 1] = 1167 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1168 } else { 1169 vnic->rss_table[i] = 1170 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1171 } 1172 1173 vnic->rss_table[i] = 1174 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1175 } 1176 1177 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1178 return 0; 1179 } 1180 1181 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 1182 struct rte_eth_rss_reta_entry64 *reta_conf, 1183 uint16_t reta_size) 1184 { 1185 struct bnxt *bp = eth_dev->data->dev_private; 1186 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1187 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1188 uint16_t idx, sft, i; 1189 1190 /* Retrieve from the default VNIC */ 1191 if (!vnic) 1192 return -EINVAL; 1193 if (!vnic->rss_table) 1194 return -EINVAL; 1195 1196 if (reta_size != tbl_size) { 1197 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1198 "(%d) must equal the size supported by the hardware " 1199 "(%d)\n", reta_size, tbl_size); 1200 return -EINVAL; 1201 } 1202 1203 for (idx = 0, i = 0; i < reta_size; i++) { 1204 idx = i / RTE_RETA_GROUP_SIZE; 1205 sft = i % RTE_RETA_GROUP_SIZE; 1206 1207 if (reta_conf[idx].mask & (1ULL << sft)) { 1208 uint16_t qid; 1209 1210 if (BNXT_CHIP_THOR(bp)) 1211 qid = bnxt_rss_to_qid(bp, 1212 vnic->rss_table[i * 2]); 1213 else 1214 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 1215 1216 if (qid == INVALID_HW_RING_ID) { 1217 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 1218 return -EINVAL; 1219 } 1220 reta_conf[idx].reta[sft] = qid; 1221 } 1222 } 1223 1224 return 0; 1225 } 1226 1227 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 1228 struct rte_eth_rss_conf *rss_conf) 1229 { 1230 struct bnxt *bp = eth_dev->data->dev_private; 1231 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1232 struct bnxt_vnic_info *vnic; 1233 uint16_t hash_type = 0; 1234 unsigned int i; 1235 1236 /* 1237 * If RSS enablement were different than dev_configure, 1238 * then return -EINVAL 1239 */ 1240 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 1241 if (!rss_conf->rss_hf) 1242 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 1243 } else { 1244 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 1245 return -EINVAL; 1246 } 1247 1248 bp->flags |= BNXT_FLAG_UPDATE_HASH; 1249 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); 1250 1251 if (rss_conf->rss_hf & ETH_RSS_IPV4) 1252 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1253 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 1254 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1255 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 1256 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1257 if (rss_conf->rss_hf & ETH_RSS_IPV6) 1258 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1259 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 1260 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1261 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 1262 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1263 1264 /* Update the RSS VNIC(s) */ 1265 for (i = 0; i < bp->nr_vnics; i++) { 1266 vnic = &bp->vnic_info[i]; 1267 vnic->hash_type = hash_type; 1268 1269 /* 1270 * Use the supplied key if the key length is 1271 * acceptable and the rss_key is not NULL 1272 */ 1273 if (rss_conf->rss_key && 1274 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) 1275 memcpy(vnic->rss_hash_key, rss_conf->rss_key, 1276 rss_conf->rss_key_len); 1277 1278 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1279 } 1280 return 0; 1281 } 1282 1283 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 1284 struct rte_eth_rss_conf *rss_conf) 1285 { 1286 struct bnxt *bp = eth_dev->data->dev_private; 1287 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1288 int len; 1289 uint32_t hash_types; 1290 1291 /* RSS configuration is the same for all VNICs */ 1292 if (vnic && vnic->rss_hash_key) { 1293 if (rss_conf->rss_key) { 1294 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 1295 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 1296 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 1297 } 1298 1299 hash_types = vnic->hash_type; 1300 rss_conf->rss_hf = 0; 1301 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 1302 rss_conf->rss_hf |= ETH_RSS_IPV4; 1303 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1304 } 1305 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 1306 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1307 hash_types &= 1308 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1309 } 1310 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 1311 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 1312 hash_types &= 1313 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1314 } 1315 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 1316 rss_conf->rss_hf |= ETH_RSS_IPV6; 1317 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1318 } 1319 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 1320 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 1321 hash_types &= 1322 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1323 } 1324 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 1325 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 1326 hash_types &= 1327 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1328 } 1329 if (hash_types) { 1330 PMD_DRV_LOG(ERR, 1331 "Unknwon RSS config from firmware (%08x), RSS disabled", 1332 vnic->hash_type); 1333 return -ENOTSUP; 1334 } 1335 } else { 1336 rss_conf->rss_hf = 0; 1337 } 1338 return 0; 1339 } 1340 1341 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 1342 struct rte_eth_fc_conf *fc_conf) 1343 { 1344 struct bnxt *bp = dev->data->dev_private; 1345 struct rte_eth_link link_info; 1346 int rc; 1347 1348 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1349 if (rc) 1350 return rc; 1351 1352 memset(fc_conf, 0, sizeof(*fc_conf)); 1353 if (bp->link_info.auto_pause) 1354 fc_conf->autoneg = 1; 1355 switch (bp->link_info.pause) { 1356 case 0: 1357 fc_conf->mode = RTE_FC_NONE; 1358 break; 1359 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 1360 fc_conf->mode = RTE_FC_TX_PAUSE; 1361 break; 1362 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 1363 fc_conf->mode = RTE_FC_RX_PAUSE; 1364 break; 1365 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 1366 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 1367 fc_conf->mode = RTE_FC_FULL; 1368 break; 1369 } 1370 return 0; 1371 } 1372 1373 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 1374 struct rte_eth_fc_conf *fc_conf) 1375 { 1376 struct bnxt *bp = dev->data->dev_private; 1377 1378 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 1379 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); 1380 return -ENOTSUP; 1381 } 1382 1383 switch (fc_conf->mode) { 1384 case RTE_FC_NONE: 1385 bp->link_info.auto_pause = 0; 1386 bp->link_info.force_pause = 0; 1387 break; 1388 case RTE_FC_RX_PAUSE: 1389 if (fc_conf->autoneg) { 1390 bp->link_info.auto_pause = 1391 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1392 bp->link_info.force_pause = 0; 1393 } else { 1394 bp->link_info.auto_pause = 0; 1395 bp->link_info.force_pause = 1396 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1397 } 1398 break; 1399 case RTE_FC_TX_PAUSE: 1400 if (fc_conf->autoneg) { 1401 bp->link_info.auto_pause = 1402 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 1403 bp->link_info.force_pause = 0; 1404 } else { 1405 bp->link_info.auto_pause = 0; 1406 bp->link_info.force_pause = 1407 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 1408 } 1409 break; 1410 case RTE_FC_FULL: 1411 if (fc_conf->autoneg) { 1412 bp->link_info.auto_pause = 1413 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 1414 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1415 bp->link_info.force_pause = 0; 1416 } else { 1417 bp->link_info.auto_pause = 0; 1418 bp->link_info.force_pause = 1419 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 1420 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1421 } 1422 break; 1423 } 1424 return bnxt_set_hwrm_link_config(bp, true); 1425 } 1426 1427 /* Add UDP tunneling port */ 1428 static int 1429 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 1430 struct rte_eth_udp_tunnel *udp_tunnel) 1431 { 1432 struct bnxt *bp = eth_dev->data->dev_private; 1433 uint16_t tunnel_type = 0; 1434 int rc = 0; 1435 1436 switch (udp_tunnel->prot_type) { 1437 case RTE_TUNNEL_TYPE_VXLAN: 1438 if (bp->vxlan_port_cnt) { 1439 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 1440 udp_tunnel->udp_port); 1441 if (bp->vxlan_port != udp_tunnel->udp_port) { 1442 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 1443 return -ENOSPC; 1444 } 1445 bp->vxlan_port_cnt++; 1446 return 0; 1447 } 1448 tunnel_type = 1449 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 1450 bp->vxlan_port_cnt++; 1451 break; 1452 case RTE_TUNNEL_TYPE_GENEVE: 1453 if (bp->geneve_port_cnt) { 1454 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 1455 udp_tunnel->udp_port); 1456 if (bp->geneve_port != udp_tunnel->udp_port) { 1457 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 1458 return -ENOSPC; 1459 } 1460 bp->geneve_port_cnt++; 1461 return 0; 1462 } 1463 tunnel_type = 1464 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 1465 bp->geneve_port_cnt++; 1466 break; 1467 default: 1468 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 1469 return -ENOTSUP; 1470 } 1471 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 1472 tunnel_type); 1473 return rc; 1474 } 1475 1476 static int 1477 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 1478 struct rte_eth_udp_tunnel *udp_tunnel) 1479 { 1480 struct bnxt *bp = eth_dev->data->dev_private; 1481 uint16_t tunnel_type = 0; 1482 uint16_t port = 0; 1483 int rc = 0; 1484 1485 switch (udp_tunnel->prot_type) { 1486 case RTE_TUNNEL_TYPE_VXLAN: 1487 if (!bp->vxlan_port_cnt) { 1488 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 1489 return -EINVAL; 1490 } 1491 if (bp->vxlan_port != udp_tunnel->udp_port) { 1492 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 1493 udp_tunnel->udp_port, bp->vxlan_port); 1494 return -EINVAL; 1495 } 1496 if (--bp->vxlan_port_cnt) 1497 return 0; 1498 1499 tunnel_type = 1500 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 1501 port = bp->vxlan_fw_dst_port_id; 1502 break; 1503 case RTE_TUNNEL_TYPE_GENEVE: 1504 if (!bp->geneve_port_cnt) { 1505 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 1506 return -EINVAL; 1507 } 1508 if (bp->geneve_port != udp_tunnel->udp_port) { 1509 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 1510 udp_tunnel->udp_port, bp->geneve_port); 1511 return -EINVAL; 1512 } 1513 if (--bp->geneve_port_cnt) 1514 return 0; 1515 1516 tunnel_type = 1517 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 1518 port = bp->geneve_fw_dst_port_id; 1519 break; 1520 default: 1521 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 1522 return -ENOTSUP; 1523 } 1524 1525 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 1526 if (!rc) { 1527 if (tunnel_type == 1528 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 1529 bp->vxlan_port = 0; 1530 if (tunnel_type == 1531 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 1532 bp->geneve_port = 0; 1533 } 1534 return rc; 1535 } 1536 1537 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1538 { 1539 struct bnxt_filter_info *filter; 1540 struct bnxt_vnic_info *vnic; 1541 int rc = 0; 1542 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 1543 1544 /* if VLAN exists && VLAN matches vlan_id 1545 * remove the MAC+VLAN filter 1546 * add a new MAC only filter 1547 * else 1548 * VLAN filter doesn't exist, just skip and continue 1549 */ 1550 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1551 filter = STAILQ_FIRST(&vnic->filter); 1552 while (filter) { 1553 /* Search for this matching MAC+VLAN filter */ 1554 if (filter->enables & chk && filter->l2_ivlan == vlan_id && 1555 !memcmp(filter->l2_addr, 1556 bp->mac_addr, 1557 RTE_ETHER_ADDR_LEN)) { 1558 /* Delete the filter */ 1559 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 1560 if (rc) 1561 return rc; 1562 STAILQ_REMOVE(&vnic->filter, filter, 1563 bnxt_filter_info, next); 1564 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); 1565 1566 PMD_DRV_LOG(INFO, 1567 "Del Vlan filter for %d\n", 1568 vlan_id); 1569 return rc; 1570 } 1571 filter = STAILQ_NEXT(filter, next); 1572 } 1573 return -ENOENT; 1574 } 1575 1576 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1577 { 1578 struct bnxt_filter_info *filter; 1579 struct bnxt_vnic_info *vnic; 1580 int rc = 0; 1581 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 1582 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 1583 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 1584 1585 /* Implementation notes on the use of VNIC in this command: 1586 * 1587 * By default, these filters belong to default vnic for the function. 1588 * Once these filters are set up, only destination VNIC can be modified. 1589 * If the destination VNIC is not specified in this command, 1590 * then the HWRM shall only create an l2 context id. 1591 */ 1592 1593 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1594 filter = STAILQ_FIRST(&vnic->filter); 1595 /* Check if the VLAN has already been added */ 1596 while (filter) { 1597 if (filter->enables & chk && filter->l2_ivlan == vlan_id && 1598 !memcmp(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN)) 1599 return -EEXIST; 1600 1601 filter = STAILQ_NEXT(filter, next); 1602 } 1603 1604 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 1605 * command to create MAC+VLAN filter with the right flags, enables set. 1606 */ 1607 filter = bnxt_alloc_filter(bp); 1608 if (!filter) { 1609 PMD_DRV_LOG(ERR, 1610 "MAC/VLAN filter alloc failed\n"); 1611 return -ENOMEM; 1612 } 1613 /* MAC + VLAN ID filter */ 1614 filter->l2_ivlan = vlan_id; 1615 filter->l2_ivlan_mask = 0x0FFF; 1616 filter->enables |= en; 1617 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1618 if (rc) { 1619 /* Free the newly allocated filter as we were 1620 * not able to create the filter in hardware. 1621 */ 1622 filter->fw_l2_filter_id = UINT64_MAX; 1623 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); 1624 return rc; 1625 } 1626 1627 /* Add this new filter to the list */ 1628 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1629 PMD_DRV_LOG(INFO, 1630 "Added Vlan filter for %d\n", vlan_id); 1631 return rc; 1632 } 1633 1634 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 1635 uint16_t vlan_id, int on) 1636 { 1637 struct bnxt *bp = eth_dev->data->dev_private; 1638 1639 /* These operations apply to ALL existing MAC/VLAN filters */ 1640 if (on) 1641 return bnxt_add_vlan_filter(bp, vlan_id); 1642 else 1643 return bnxt_del_vlan_filter(bp, vlan_id); 1644 } 1645 1646 static int 1647 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 1648 { 1649 struct bnxt *bp = dev->data->dev_private; 1650 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1651 unsigned int i; 1652 1653 if (mask & ETH_VLAN_FILTER_MASK) { 1654 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 1655 /* Remove any VLAN filters programmed */ 1656 for (i = 0; i < 4095; i++) 1657 bnxt_del_vlan_filter(bp, i); 1658 } 1659 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 1660 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 1661 } 1662 1663 if (mask & ETH_VLAN_STRIP_MASK) { 1664 /* Enable or disable VLAN stripping */ 1665 for (i = 0; i < bp->nr_vnics; i++) { 1666 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1667 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1668 vnic->vlan_strip = true; 1669 else 1670 vnic->vlan_strip = false; 1671 bnxt_hwrm_vnic_cfg(bp, vnic); 1672 } 1673 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 1674 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 1675 } 1676 1677 if (mask & ETH_VLAN_EXTEND_MASK) 1678 PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n"); 1679 1680 return 0; 1681 } 1682 1683 static int 1684 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 1685 struct rte_ether_addr *addr) 1686 { 1687 struct bnxt *bp = dev->data->dev_private; 1688 /* Default Filter is tied to VNIC 0 */ 1689 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1690 struct bnxt_filter_info *filter; 1691 int rc; 1692 1693 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 1694 return -EPERM; 1695 1696 if (rte_is_zero_ether_addr(addr)) 1697 return -EINVAL; 1698 1699 STAILQ_FOREACH(filter, &vnic->filter, next) { 1700 /* Default Filter is at Index 0 */ 1701 if (filter->mac_index != 0) 1702 continue; 1703 1704 memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); 1705 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 1706 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 1707 filter->enables |= 1708 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 1709 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 1710 1711 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1712 if (rc) 1713 return rc; 1714 1715 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 1716 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 1717 return 0; 1718 } 1719 1720 return 0; 1721 } 1722 1723 static int 1724 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 1725 struct rte_ether_addr *mc_addr_set, 1726 uint32_t nb_mc_addr) 1727 { 1728 struct bnxt *bp = eth_dev->data->dev_private; 1729 char *mc_addr_list = (char *)mc_addr_set; 1730 struct bnxt_vnic_info *vnic; 1731 uint32_t off = 0, i = 0; 1732 1733 vnic = &bp->vnic_info[0]; 1734 1735 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 1736 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1737 goto allmulti; 1738 } 1739 1740 /* TODO Check for Duplicate mcast addresses */ 1741 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1742 for (i = 0; i < nb_mc_addr; i++) { 1743 memcpy(vnic->mc_list + off, &mc_addr_list[i], 1744 RTE_ETHER_ADDR_LEN); 1745 off += RTE_ETHER_ADDR_LEN; 1746 } 1747 1748 vnic->mc_addr_cnt = i; 1749 1750 allmulti: 1751 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1752 } 1753 1754 static int 1755 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1756 { 1757 struct bnxt *bp = dev->data->dev_private; 1758 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 1759 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 1760 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 1761 int ret; 1762 1763 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 1764 fw_major, fw_minor, fw_updt); 1765 1766 ret += 1; /* add the size of '\0' */ 1767 if (fw_size < (uint32_t)ret) 1768 return ret; 1769 else 1770 return 0; 1771 } 1772 1773 static void 1774 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1775 struct rte_eth_rxq_info *qinfo) 1776 { 1777 struct bnxt_rx_queue *rxq; 1778 1779 rxq = dev->data->rx_queues[queue_id]; 1780 1781 qinfo->mp = rxq->mb_pool; 1782 qinfo->scattered_rx = dev->data->scattered_rx; 1783 qinfo->nb_desc = rxq->nb_rx_desc; 1784 1785 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1786 qinfo->conf.rx_drop_en = 0; 1787 qinfo->conf.rx_deferred_start = 0; 1788 } 1789 1790 static void 1791 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1792 struct rte_eth_txq_info *qinfo) 1793 { 1794 struct bnxt_tx_queue *txq; 1795 1796 txq = dev->data->tx_queues[queue_id]; 1797 1798 qinfo->nb_desc = txq->nb_tx_desc; 1799 1800 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 1801 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 1802 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 1803 1804 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1805 qinfo->conf.tx_rs_thresh = 0; 1806 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1807 } 1808 1809 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 1810 { 1811 struct bnxt *bp = eth_dev->data->dev_private; 1812 struct rte_eth_dev_info dev_info; 1813 uint32_t new_pkt_size; 1814 uint32_t rc = 0; 1815 uint32_t i; 1816 1817 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 1818 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 1819 1820 rc = bnxt_dev_info_get_op(eth_dev, &dev_info); 1821 if (rc != 0) { 1822 PMD_DRV_LOG(ERR, "Error during getting ethernet device info\n"); 1823 return rc; 1824 } 1825 1826 if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) { 1827 PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n", 1828 RTE_ETHER_MIN_MTU, BNXT_MAX_MTU); 1829 return -EINVAL; 1830 } 1831 1832 #ifdef RTE_ARCH_X86 1833 /* 1834 * If vector-mode tx/rx is active, disallow any MTU change that would 1835 * require scattered receive support. 1836 */ 1837 if (eth_dev->data->dev_started && 1838 (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec || 1839 eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) && 1840 (new_pkt_size > 1841 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 1842 PMD_DRV_LOG(ERR, 1843 "MTU change would require scattered rx support. "); 1844 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 1845 return -EINVAL; 1846 } 1847 #endif 1848 1849 if (new_mtu > RTE_ETHER_MTU) { 1850 bp->flags |= BNXT_FLAG_JUMBO; 1851 bp->eth_dev->data->dev_conf.rxmode.offloads |= 1852 DEV_RX_OFFLOAD_JUMBO_FRAME; 1853 } else { 1854 bp->eth_dev->data->dev_conf.rxmode.offloads &= 1855 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1856 bp->flags &= ~BNXT_FLAG_JUMBO; 1857 } 1858 1859 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 1860 1861 eth_dev->data->mtu = new_mtu; 1862 PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu); 1863 1864 for (i = 0; i < bp->nr_vnics; i++) { 1865 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1866 uint16_t size = 0; 1867 1868 vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + 1869 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1870 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 1871 if (rc) 1872 break; 1873 1874 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 1875 size -= RTE_PKTMBUF_HEADROOM; 1876 1877 if (size < new_mtu) { 1878 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 1879 if (rc) 1880 return rc; 1881 } 1882 } 1883 1884 return rc; 1885 } 1886 1887 static int 1888 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 1889 { 1890 struct bnxt *bp = dev->data->dev_private; 1891 uint16_t vlan = bp->vlan; 1892 int rc; 1893 1894 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 1895 PMD_DRV_LOG(ERR, 1896 "PVID cannot be modified for this function\n"); 1897 return -ENOTSUP; 1898 } 1899 bp->vlan = on ? pvid : 0; 1900 1901 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 1902 if (rc) 1903 bp->vlan = vlan; 1904 return rc; 1905 } 1906 1907 static int 1908 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 1909 { 1910 struct bnxt *bp = dev->data->dev_private; 1911 1912 return bnxt_hwrm_port_led_cfg(bp, true); 1913 } 1914 1915 static int 1916 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 1917 { 1918 struct bnxt *bp = dev->data->dev_private; 1919 1920 return bnxt_hwrm_port_led_cfg(bp, false); 1921 } 1922 1923 static uint32_t 1924 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1925 { 1926 uint32_t desc = 0, raw_cons = 0, cons; 1927 struct bnxt_cp_ring_info *cpr; 1928 struct bnxt_rx_queue *rxq; 1929 struct rx_pkt_cmpl *rxcmp; 1930 uint16_t cmp_type; 1931 uint8_t cmp = 1; 1932 bool valid; 1933 1934 rxq = dev->data->rx_queues[rx_queue_id]; 1935 cpr = rxq->cp_ring; 1936 valid = cpr->valid; 1937 1938 while (raw_cons < rxq->nb_rx_desc) { 1939 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1940 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1941 1942 if (!CMPL_VALID(rxcmp, valid)) 1943 goto nothing_to_do; 1944 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); 1945 cmp_type = CMP_TYPE(rxcmp); 1946 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1947 cmp = (rte_le_to_cpu_32( 1948 ((struct rx_tpa_end_cmpl *) 1949 (rxcmp))->agg_bufs_v1) & 1950 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 1951 RX_TPA_END_CMPL_AGG_BUFS_SFT; 1952 desc++; 1953 } else if (cmp_type == 0x11) { 1954 desc++; 1955 cmp = (rxcmp->agg_bufs_v1 & 1956 RX_PKT_CMPL_AGG_BUFS_MASK) >> 1957 RX_PKT_CMPL_AGG_BUFS_SFT; 1958 } else { 1959 cmp = 1; 1960 } 1961 nothing_to_do: 1962 raw_cons += cmp ? cmp : 2; 1963 } 1964 1965 return desc; 1966 } 1967 1968 static int 1969 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 1970 { 1971 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 1972 struct bnxt_rx_ring_info *rxr; 1973 struct bnxt_cp_ring_info *cpr; 1974 struct bnxt_sw_rx_bd *rx_buf; 1975 struct rx_pkt_cmpl *rxcmp; 1976 uint32_t cons, cp_cons; 1977 1978 if (!rxq) 1979 return -EINVAL; 1980 1981 cpr = rxq->cp_ring; 1982 rxr = rxq->rx_ring; 1983 1984 if (offset >= rxq->nb_rx_desc) 1985 return -EINVAL; 1986 1987 cons = RING_CMP(cpr->cp_ring_struct, offset); 1988 cp_cons = cpr->cp_raw_cons; 1989 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1990 1991 if (cons > cp_cons) { 1992 if (CMPL_VALID(rxcmp, cpr->valid)) 1993 return RTE_ETH_RX_DESC_DONE; 1994 } else { 1995 if (CMPL_VALID(rxcmp, !cpr->valid)) 1996 return RTE_ETH_RX_DESC_DONE; 1997 } 1998 rx_buf = &rxr->rx_buf_ring[cons]; 1999 if (rx_buf->mbuf == NULL) 2000 return RTE_ETH_RX_DESC_UNAVAIL; 2001 2002 2003 return RTE_ETH_RX_DESC_AVAIL; 2004 } 2005 2006 static int 2007 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 2008 { 2009 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 2010 struct bnxt_tx_ring_info *txr; 2011 struct bnxt_cp_ring_info *cpr; 2012 struct bnxt_sw_tx_bd *tx_buf; 2013 struct tx_pkt_cmpl *txcmp; 2014 uint32_t cons, cp_cons; 2015 2016 if (!txq) 2017 return -EINVAL; 2018 2019 cpr = txq->cp_ring; 2020 txr = txq->tx_ring; 2021 2022 if (offset >= txq->nb_tx_desc) 2023 return -EINVAL; 2024 2025 cons = RING_CMP(cpr->cp_ring_struct, offset); 2026 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2027 cp_cons = cpr->cp_raw_cons; 2028 2029 if (cons > cp_cons) { 2030 if (CMPL_VALID(txcmp, cpr->valid)) 2031 return RTE_ETH_TX_DESC_UNAVAIL; 2032 } else { 2033 if (CMPL_VALID(txcmp, !cpr->valid)) 2034 return RTE_ETH_TX_DESC_UNAVAIL; 2035 } 2036 tx_buf = &txr->tx_buf_ring[cons]; 2037 if (tx_buf->mbuf == NULL) 2038 return RTE_ETH_TX_DESC_DONE; 2039 2040 return RTE_ETH_TX_DESC_FULL; 2041 } 2042 2043 static struct bnxt_filter_info * 2044 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 2045 struct rte_eth_ethertype_filter *efilter, 2046 struct bnxt_vnic_info *vnic0, 2047 struct bnxt_vnic_info *vnic, 2048 int *ret) 2049 { 2050 struct bnxt_filter_info *mfilter = NULL; 2051 int match = 0; 2052 *ret = 0; 2053 2054 if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 || 2055 efilter->ether_type == RTE_ETHER_TYPE_IPV6) { 2056 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" 2057 " ethertype filter.", efilter->ether_type); 2058 *ret = -EINVAL; 2059 goto exit; 2060 } 2061 if (efilter->queue >= bp->rx_nr_rings) { 2062 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2063 *ret = -EINVAL; 2064 goto exit; 2065 } 2066 2067 vnic0 = &bp->vnic_info[0]; 2068 vnic = &bp->vnic_info[efilter->queue]; 2069 if (vnic == NULL) { 2070 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2071 *ret = -EINVAL; 2072 goto exit; 2073 } 2074 2075 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 2076 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 2077 if ((!memcmp(efilter->mac_addr.addr_bytes, 2078 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 2079 mfilter->flags == 2080 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 2081 mfilter->ethertype == efilter->ether_type)) { 2082 match = 1; 2083 break; 2084 } 2085 } 2086 } else { 2087 STAILQ_FOREACH(mfilter, &vnic->filter, next) 2088 if ((!memcmp(efilter->mac_addr.addr_bytes, 2089 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 2090 mfilter->ethertype == efilter->ether_type && 2091 mfilter->flags == 2092 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 2093 match = 1; 2094 break; 2095 } 2096 } 2097 2098 if (match) 2099 *ret = -EEXIST; 2100 2101 exit: 2102 return mfilter; 2103 } 2104 2105 static int 2106 bnxt_ethertype_filter(struct rte_eth_dev *dev, 2107 enum rte_filter_op filter_op, 2108 void *arg) 2109 { 2110 struct bnxt *bp = dev->data->dev_private; 2111 struct rte_eth_ethertype_filter *efilter = 2112 (struct rte_eth_ethertype_filter *)arg; 2113 struct bnxt_filter_info *bfilter, *filter1; 2114 struct bnxt_vnic_info *vnic, *vnic0; 2115 int ret; 2116 2117 if (filter_op == RTE_ETH_FILTER_NOP) 2118 return 0; 2119 2120 if (arg == NULL) { 2121 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 2122 filter_op); 2123 return -EINVAL; 2124 } 2125 2126 vnic0 = &bp->vnic_info[0]; 2127 vnic = &bp->vnic_info[efilter->queue]; 2128 2129 switch (filter_op) { 2130 case RTE_ETH_FILTER_ADD: 2131 bnxt_match_and_validate_ether_filter(bp, efilter, 2132 vnic0, vnic, &ret); 2133 if (ret < 0) 2134 return ret; 2135 2136 bfilter = bnxt_get_unused_filter(bp); 2137 if (bfilter == NULL) { 2138 PMD_DRV_LOG(ERR, 2139 "Not enough resources for a new filter.\n"); 2140 return -ENOMEM; 2141 } 2142 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2143 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 2144 RTE_ETHER_ADDR_LEN); 2145 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 2146 RTE_ETHER_ADDR_LEN); 2147 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2148 bfilter->ethertype = efilter->ether_type; 2149 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2150 2151 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 2152 if (filter1 == NULL) { 2153 ret = -EINVAL; 2154 goto cleanup; 2155 } 2156 bfilter->enables |= 2157 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2158 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2159 2160 bfilter->dst_id = vnic->fw_vnic_id; 2161 2162 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 2163 bfilter->flags = 2164 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2165 } 2166 2167 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2168 if (ret) 2169 goto cleanup; 2170 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2171 break; 2172 case RTE_ETH_FILTER_DELETE: 2173 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 2174 vnic0, vnic, &ret); 2175 if (ret == -EEXIST) { 2176 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 2177 2178 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 2179 next); 2180 bnxt_free_filter(bp, filter1); 2181 } else if (ret == 0) { 2182 PMD_DRV_LOG(ERR, "No matching filter found\n"); 2183 } 2184 break; 2185 default: 2186 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 2187 ret = -EINVAL; 2188 goto error; 2189 } 2190 return ret; 2191 cleanup: 2192 bnxt_free_filter(bp, bfilter); 2193 error: 2194 return ret; 2195 } 2196 2197 static inline int 2198 parse_ntuple_filter(struct bnxt *bp, 2199 struct rte_eth_ntuple_filter *nfilter, 2200 struct bnxt_filter_info *bfilter) 2201 { 2202 uint32_t en = 0; 2203 2204 if (nfilter->queue >= bp->rx_nr_rings) { 2205 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); 2206 return -EINVAL; 2207 } 2208 2209 switch (nfilter->dst_port_mask) { 2210 case UINT16_MAX: 2211 bfilter->dst_port_mask = -1; 2212 bfilter->dst_port = nfilter->dst_port; 2213 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 2214 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2215 break; 2216 default: 2217 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 2218 return -EINVAL; 2219 } 2220 2221 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2222 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2223 2224 switch (nfilter->proto_mask) { 2225 case UINT8_MAX: 2226 if (nfilter->proto == 17) /* IPPROTO_UDP */ 2227 bfilter->ip_protocol = 17; 2228 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 2229 bfilter->ip_protocol = 6; 2230 else 2231 return -EINVAL; 2232 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2233 break; 2234 default: 2235 PMD_DRV_LOG(ERR, "invalid protocol mask."); 2236 return -EINVAL; 2237 } 2238 2239 switch (nfilter->dst_ip_mask) { 2240 case UINT32_MAX: 2241 bfilter->dst_ipaddr_mask[0] = -1; 2242 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 2243 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 2244 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2245 break; 2246 default: 2247 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 2248 return -EINVAL; 2249 } 2250 2251 switch (nfilter->src_ip_mask) { 2252 case UINT32_MAX: 2253 bfilter->src_ipaddr_mask[0] = -1; 2254 bfilter->src_ipaddr[0] = nfilter->src_ip; 2255 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 2256 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2257 break; 2258 default: 2259 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 2260 return -EINVAL; 2261 } 2262 2263 switch (nfilter->src_port_mask) { 2264 case UINT16_MAX: 2265 bfilter->src_port_mask = -1; 2266 bfilter->src_port = nfilter->src_port; 2267 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 2268 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2269 break; 2270 default: 2271 PMD_DRV_LOG(ERR, "invalid src_port mask."); 2272 return -EINVAL; 2273 } 2274 2275 //TODO Priority 2276 //nfilter->priority = (uint8_t)filter->priority; 2277 2278 bfilter->enables = en; 2279 return 0; 2280 } 2281 2282 static struct bnxt_filter_info* 2283 bnxt_match_ntuple_filter(struct bnxt *bp, 2284 struct bnxt_filter_info *bfilter, 2285 struct bnxt_vnic_info **mvnic) 2286 { 2287 struct bnxt_filter_info *mfilter = NULL; 2288 int i; 2289 2290 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2291 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2292 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 2293 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 2294 bfilter->src_ipaddr_mask[0] == 2295 mfilter->src_ipaddr_mask[0] && 2296 bfilter->src_port == mfilter->src_port && 2297 bfilter->src_port_mask == mfilter->src_port_mask && 2298 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 2299 bfilter->dst_ipaddr_mask[0] == 2300 mfilter->dst_ipaddr_mask[0] && 2301 bfilter->dst_port == mfilter->dst_port && 2302 bfilter->dst_port_mask == mfilter->dst_port_mask && 2303 bfilter->flags == mfilter->flags && 2304 bfilter->enables == mfilter->enables) { 2305 if (mvnic) 2306 *mvnic = vnic; 2307 return mfilter; 2308 } 2309 } 2310 } 2311 return NULL; 2312 } 2313 2314 static int 2315 bnxt_cfg_ntuple_filter(struct bnxt *bp, 2316 struct rte_eth_ntuple_filter *nfilter, 2317 enum rte_filter_op filter_op) 2318 { 2319 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 2320 struct bnxt_vnic_info *vnic, *vnic0, *mvnic; 2321 int ret; 2322 2323 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 2324 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 2325 return -EINVAL; 2326 } 2327 2328 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 2329 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); 2330 return -EINVAL; 2331 } 2332 2333 bfilter = bnxt_get_unused_filter(bp); 2334 if (bfilter == NULL) { 2335 PMD_DRV_LOG(ERR, 2336 "Not enough resources for a new filter.\n"); 2337 return -ENOMEM; 2338 } 2339 ret = parse_ntuple_filter(bp, nfilter, bfilter); 2340 if (ret < 0) 2341 goto free_filter; 2342 2343 vnic = &bp->vnic_info[nfilter->queue]; 2344 vnic0 = &bp->vnic_info[0]; 2345 filter1 = STAILQ_FIRST(&vnic0->filter); 2346 if (filter1 == NULL) { 2347 ret = -EINVAL; 2348 goto free_filter; 2349 } 2350 2351 bfilter->dst_id = vnic->fw_vnic_id; 2352 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2353 bfilter->enables |= 2354 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2355 bfilter->ethertype = 0x800; 2356 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2357 2358 mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); 2359 2360 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2361 bfilter->dst_id == mfilter->dst_id) { 2362 PMD_DRV_LOG(ERR, "filter exists.\n"); 2363 ret = -EEXIST; 2364 goto free_filter; 2365 } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2366 bfilter->dst_id != mfilter->dst_id) { 2367 mfilter->dst_id = vnic->fw_vnic_id; 2368 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); 2369 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); 2370 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); 2371 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); 2372 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); 2373 goto free_filter; 2374 } 2375 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2376 PMD_DRV_LOG(ERR, "filter doesn't exist."); 2377 ret = -ENOENT; 2378 goto free_filter; 2379 } 2380 2381 if (filter_op == RTE_ETH_FILTER_ADD) { 2382 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2383 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2384 if (ret) 2385 goto free_filter; 2386 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2387 } else { 2388 if (mfilter == NULL) { 2389 /* This should not happen. But for Coverity! */ 2390 ret = -ENOENT; 2391 goto free_filter; 2392 } 2393 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 2394 2395 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); 2396 bnxt_free_filter(bp, mfilter); 2397 mfilter->fw_l2_filter_id = -1; 2398 bnxt_free_filter(bp, bfilter); 2399 bfilter->fw_l2_filter_id = -1; 2400 } 2401 2402 return 0; 2403 free_filter: 2404 bfilter->fw_l2_filter_id = -1; 2405 bnxt_free_filter(bp, bfilter); 2406 return ret; 2407 } 2408 2409 static int 2410 bnxt_ntuple_filter(struct rte_eth_dev *dev, 2411 enum rte_filter_op filter_op, 2412 void *arg) 2413 { 2414 struct bnxt *bp = dev->data->dev_private; 2415 int ret; 2416 2417 if (filter_op == RTE_ETH_FILTER_NOP) 2418 return 0; 2419 2420 if (arg == NULL) { 2421 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 2422 filter_op); 2423 return -EINVAL; 2424 } 2425 2426 switch (filter_op) { 2427 case RTE_ETH_FILTER_ADD: 2428 ret = bnxt_cfg_ntuple_filter(bp, 2429 (struct rte_eth_ntuple_filter *)arg, 2430 filter_op); 2431 break; 2432 case RTE_ETH_FILTER_DELETE: 2433 ret = bnxt_cfg_ntuple_filter(bp, 2434 (struct rte_eth_ntuple_filter *)arg, 2435 filter_op); 2436 break; 2437 default: 2438 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 2439 ret = -EINVAL; 2440 break; 2441 } 2442 return ret; 2443 } 2444 2445 static int 2446 bnxt_parse_fdir_filter(struct bnxt *bp, 2447 struct rte_eth_fdir_filter *fdir, 2448 struct bnxt_filter_info *filter) 2449 { 2450 enum rte_fdir_mode fdir_mode = 2451 bp->eth_dev->data->dev_conf.fdir_conf.mode; 2452 struct bnxt_vnic_info *vnic0, *vnic; 2453 struct bnxt_filter_info *filter1; 2454 uint32_t en = 0; 2455 int i; 2456 2457 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2458 return -EINVAL; 2459 2460 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 2461 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 2462 2463 switch (fdir->input.flow_type) { 2464 case RTE_ETH_FLOW_IPV4: 2465 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2466 /* FALLTHROUGH */ 2467 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 2468 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2469 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 2470 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2471 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 2472 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2473 filter->ip_addr_type = 2474 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2475 filter->src_ipaddr_mask[0] = 0xffffffff; 2476 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2477 filter->dst_ipaddr_mask[0] = 0xffffffff; 2478 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2479 filter->ethertype = 0x800; 2480 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2481 break; 2482 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2483 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 2484 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2485 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 2486 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2487 filter->dst_port_mask = 0xffff; 2488 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2489 filter->src_port_mask = 0xffff; 2490 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2491 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 2492 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2493 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 2494 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2495 filter->ip_protocol = 6; 2496 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2497 filter->ip_addr_type = 2498 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2499 filter->src_ipaddr_mask[0] = 0xffffffff; 2500 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2501 filter->dst_ipaddr_mask[0] = 0xffffffff; 2502 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2503 filter->ethertype = 0x800; 2504 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2505 break; 2506 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2507 filter->src_port = fdir->input.flow.udp4_flow.src_port; 2508 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2509 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 2510 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2511 filter->dst_port_mask = 0xffff; 2512 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2513 filter->src_port_mask = 0xffff; 2514 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2515 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 2516 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2517 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 2518 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2519 filter->ip_protocol = 17; 2520 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2521 filter->ip_addr_type = 2522 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2523 filter->src_ipaddr_mask[0] = 0xffffffff; 2524 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2525 filter->dst_ipaddr_mask[0] = 0xffffffff; 2526 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2527 filter->ethertype = 0x800; 2528 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2529 break; 2530 case RTE_ETH_FLOW_IPV6: 2531 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2532 /* FALLTHROUGH */ 2533 filter->ip_addr_type = 2534 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2535 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 2536 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2537 rte_memcpy(filter->src_ipaddr, 2538 fdir->input.flow.ipv6_flow.src_ip, 16); 2539 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2540 rte_memcpy(filter->dst_ipaddr, 2541 fdir->input.flow.ipv6_flow.dst_ip, 16); 2542 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2543 memset(filter->dst_ipaddr_mask, 0xff, 16); 2544 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2545 memset(filter->src_ipaddr_mask, 0xff, 16); 2546 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2547 filter->ethertype = 0x86dd; 2548 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2549 break; 2550 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2551 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 2552 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2553 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 2554 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2555 filter->dst_port_mask = 0xffff; 2556 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2557 filter->src_port_mask = 0xffff; 2558 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2559 filter->ip_addr_type = 2560 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2561 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 2562 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2563 rte_memcpy(filter->src_ipaddr, 2564 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 2565 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2566 rte_memcpy(filter->dst_ipaddr, 2567 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 2568 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2569 memset(filter->dst_ipaddr_mask, 0xff, 16); 2570 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2571 memset(filter->src_ipaddr_mask, 0xff, 16); 2572 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2573 filter->ethertype = 0x86dd; 2574 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2575 break; 2576 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2577 filter->src_port = fdir->input.flow.udp6_flow.src_port; 2578 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2579 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 2580 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2581 filter->dst_port_mask = 0xffff; 2582 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2583 filter->src_port_mask = 0xffff; 2584 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2585 filter->ip_addr_type = 2586 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2587 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 2588 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2589 rte_memcpy(filter->src_ipaddr, 2590 fdir->input.flow.udp6_flow.ip.src_ip, 16); 2591 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2592 rte_memcpy(filter->dst_ipaddr, 2593 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 2594 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2595 memset(filter->dst_ipaddr_mask, 0xff, 16); 2596 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2597 memset(filter->src_ipaddr_mask, 0xff, 16); 2598 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2599 filter->ethertype = 0x86dd; 2600 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2601 break; 2602 case RTE_ETH_FLOW_L2_PAYLOAD: 2603 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 2604 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2605 break; 2606 case RTE_ETH_FLOW_VXLAN: 2607 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2608 return -EINVAL; 2609 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2610 filter->tunnel_type = 2611 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 2612 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2613 break; 2614 case RTE_ETH_FLOW_NVGRE: 2615 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2616 return -EINVAL; 2617 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2618 filter->tunnel_type = 2619 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 2620 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2621 break; 2622 case RTE_ETH_FLOW_UNKNOWN: 2623 case RTE_ETH_FLOW_RAW: 2624 case RTE_ETH_FLOW_FRAG_IPV4: 2625 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 2626 case RTE_ETH_FLOW_FRAG_IPV6: 2627 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 2628 case RTE_ETH_FLOW_IPV6_EX: 2629 case RTE_ETH_FLOW_IPV6_TCP_EX: 2630 case RTE_ETH_FLOW_IPV6_UDP_EX: 2631 case RTE_ETH_FLOW_GENEVE: 2632 /* FALLTHROUGH */ 2633 default: 2634 return -EINVAL; 2635 } 2636 2637 vnic0 = &bp->vnic_info[0]; 2638 vnic = &bp->vnic_info[fdir->action.rx_queue]; 2639 if (vnic == NULL) { 2640 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); 2641 return -EINVAL; 2642 } 2643 2644 2645 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2646 rte_memcpy(filter->dst_macaddr, 2647 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 2648 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2649 } 2650 2651 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 2652 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2653 filter1 = STAILQ_FIRST(&vnic0->filter); 2654 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 2655 } else { 2656 filter->dst_id = vnic->fw_vnic_id; 2657 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2658 if (filter->dst_macaddr[i] == 0x00) 2659 filter1 = STAILQ_FIRST(&vnic0->filter); 2660 else 2661 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 2662 } 2663 2664 if (filter1 == NULL) 2665 return -EINVAL; 2666 2667 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2668 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2669 2670 filter->enables = en; 2671 2672 return 0; 2673 } 2674 2675 static struct bnxt_filter_info * 2676 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, 2677 struct bnxt_vnic_info **mvnic) 2678 { 2679 struct bnxt_filter_info *mf = NULL; 2680 int i; 2681 2682 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2683 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2684 2685 STAILQ_FOREACH(mf, &vnic->filter, next) { 2686 if (mf->filter_type == nf->filter_type && 2687 mf->flags == nf->flags && 2688 mf->src_port == nf->src_port && 2689 mf->src_port_mask == nf->src_port_mask && 2690 mf->dst_port == nf->dst_port && 2691 mf->dst_port_mask == nf->dst_port_mask && 2692 mf->ip_protocol == nf->ip_protocol && 2693 mf->ip_addr_type == nf->ip_addr_type && 2694 mf->ethertype == nf->ethertype && 2695 mf->vni == nf->vni && 2696 mf->tunnel_type == nf->tunnel_type && 2697 mf->l2_ovlan == nf->l2_ovlan && 2698 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 2699 mf->l2_ivlan == nf->l2_ivlan && 2700 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 2701 !memcmp(mf->l2_addr, nf->l2_addr, 2702 RTE_ETHER_ADDR_LEN) && 2703 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 2704 RTE_ETHER_ADDR_LEN) && 2705 !memcmp(mf->src_macaddr, nf->src_macaddr, 2706 RTE_ETHER_ADDR_LEN) && 2707 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 2708 RTE_ETHER_ADDR_LEN) && 2709 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 2710 sizeof(nf->src_ipaddr)) && 2711 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 2712 sizeof(nf->src_ipaddr_mask)) && 2713 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 2714 sizeof(nf->dst_ipaddr)) && 2715 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 2716 sizeof(nf->dst_ipaddr_mask))) { 2717 if (mvnic) 2718 *mvnic = vnic; 2719 return mf; 2720 } 2721 } 2722 } 2723 return NULL; 2724 } 2725 2726 static int 2727 bnxt_fdir_filter(struct rte_eth_dev *dev, 2728 enum rte_filter_op filter_op, 2729 void *arg) 2730 { 2731 struct bnxt *bp = dev->data->dev_private; 2732 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 2733 struct bnxt_filter_info *filter, *match; 2734 struct bnxt_vnic_info *vnic, *mvnic; 2735 int ret = 0, i; 2736 2737 if (filter_op == RTE_ETH_FILTER_NOP) 2738 return 0; 2739 2740 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 2741 return -EINVAL; 2742 2743 switch (filter_op) { 2744 case RTE_ETH_FILTER_ADD: 2745 case RTE_ETH_FILTER_DELETE: 2746 /* FALLTHROUGH */ 2747 filter = bnxt_get_unused_filter(bp); 2748 if (filter == NULL) { 2749 PMD_DRV_LOG(ERR, 2750 "Not enough resources for a new flow.\n"); 2751 return -ENOMEM; 2752 } 2753 2754 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 2755 if (ret != 0) 2756 goto free_filter; 2757 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2758 2759 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2760 vnic = &bp->vnic_info[0]; 2761 else 2762 vnic = &bp->vnic_info[fdir->action.rx_queue]; 2763 2764 match = bnxt_match_fdir(bp, filter, &mvnic); 2765 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2766 if (match->dst_id == vnic->fw_vnic_id) { 2767 PMD_DRV_LOG(ERR, "Flow already exists.\n"); 2768 ret = -EEXIST; 2769 goto free_filter; 2770 } else { 2771 match->dst_id = vnic->fw_vnic_id; 2772 ret = bnxt_hwrm_set_ntuple_filter(bp, 2773 match->dst_id, 2774 match); 2775 STAILQ_REMOVE(&mvnic->filter, match, 2776 bnxt_filter_info, next); 2777 STAILQ_INSERT_TAIL(&vnic->filter, match, next); 2778 PMD_DRV_LOG(ERR, 2779 "Filter with matching pattern exist\n"); 2780 PMD_DRV_LOG(ERR, 2781 "Updated it to new destination q\n"); 2782 goto free_filter; 2783 } 2784 } 2785 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2786 PMD_DRV_LOG(ERR, "Flow does not exist.\n"); 2787 ret = -ENOENT; 2788 goto free_filter; 2789 } 2790 2791 if (filter_op == RTE_ETH_FILTER_ADD) { 2792 ret = bnxt_hwrm_set_ntuple_filter(bp, 2793 filter->dst_id, 2794 filter); 2795 if (ret) 2796 goto free_filter; 2797 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2798 } else { 2799 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 2800 STAILQ_REMOVE(&vnic->filter, match, 2801 bnxt_filter_info, next); 2802 bnxt_free_filter(bp, match); 2803 filter->fw_l2_filter_id = -1; 2804 bnxt_free_filter(bp, filter); 2805 } 2806 break; 2807 case RTE_ETH_FILTER_FLUSH: 2808 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2809 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2810 2811 STAILQ_FOREACH(filter, &vnic->filter, next) { 2812 if (filter->filter_type == 2813 HWRM_CFA_NTUPLE_FILTER) { 2814 ret = 2815 bnxt_hwrm_clear_ntuple_filter(bp, 2816 filter); 2817 STAILQ_REMOVE(&vnic->filter, filter, 2818 bnxt_filter_info, next); 2819 } 2820 } 2821 } 2822 return ret; 2823 case RTE_ETH_FILTER_UPDATE: 2824 case RTE_ETH_FILTER_STATS: 2825 case RTE_ETH_FILTER_INFO: 2826 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); 2827 break; 2828 default: 2829 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); 2830 ret = -EINVAL; 2831 break; 2832 } 2833 return ret; 2834 2835 free_filter: 2836 filter->fw_l2_filter_id = -1; 2837 bnxt_free_filter(bp, filter); 2838 return ret; 2839 } 2840 2841 static int 2842 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, 2843 enum rte_filter_type filter_type, 2844 enum rte_filter_op filter_op, void *arg) 2845 { 2846 int ret = 0; 2847 2848 switch (filter_type) { 2849 case RTE_ETH_FILTER_TUNNEL: 2850 PMD_DRV_LOG(ERR, 2851 "filter type: %d: To be implemented\n", filter_type); 2852 break; 2853 case RTE_ETH_FILTER_FDIR: 2854 ret = bnxt_fdir_filter(dev, filter_op, arg); 2855 break; 2856 case RTE_ETH_FILTER_NTUPLE: 2857 ret = bnxt_ntuple_filter(dev, filter_op, arg); 2858 break; 2859 case RTE_ETH_FILTER_ETHERTYPE: 2860 ret = bnxt_ethertype_filter(dev, filter_op, arg); 2861 break; 2862 case RTE_ETH_FILTER_GENERIC: 2863 if (filter_op != RTE_ETH_FILTER_GET) 2864 return -EINVAL; 2865 *(const void **)arg = &bnxt_flow_ops; 2866 break; 2867 default: 2868 PMD_DRV_LOG(ERR, 2869 "Filter type (%d) not supported", filter_type); 2870 ret = -EINVAL; 2871 break; 2872 } 2873 return ret; 2874 } 2875 2876 static const uint32_t * 2877 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 2878 { 2879 static const uint32_t ptypes[] = { 2880 RTE_PTYPE_L2_ETHER_VLAN, 2881 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2882 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2883 RTE_PTYPE_L4_ICMP, 2884 RTE_PTYPE_L4_TCP, 2885 RTE_PTYPE_L4_UDP, 2886 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2887 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2888 RTE_PTYPE_INNER_L4_ICMP, 2889 RTE_PTYPE_INNER_L4_TCP, 2890 RTE_PTYPE_INNER_L4_UDP, 2891 RTE_PTYPE_UNKNOWN 2892 }; 2893 2894 if (!dev->rx_pkt_burst) 2895 return NULL; 2896 2897 return ptypes; 2898 } 2899 2900 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 2901 int reg_win) 2902 { 2903 uint32_t reg_base = *reg_arr & 0xfffff000; 2904 uint32_t win_off; 2905 int i; 2906 2907 for (i = 0; i < count; i++) { 2908 if ((reg_arr[i] & 0xfffff000) != reg_base) 2909 return -ERANGE; 2910 } 2911 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 2912 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 2913 return 0; 2914 } 2915 2916 static int bnxt_map_ptp_regs(struct bnxt *bp) 2917 { 2918 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2919 uint32_t *reg_arr; 2920 int rc, i; 2921 2922 reg_arr = ptp->rx_regs; 2923 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 2924 if (rc) 2925 return rc; 2926 2927 reg_arr = ptp->tx_regs; 2928 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 2929 if (rc) 2930 return rc; 2931 2932 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 2933 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 2934 2935 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 2936 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 2937 2938 return 0; 2939 } 2940 2941 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 2942 { 2943 rte_write32(0, (uint8_t *)bp->bar0 + 2944 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 2945 rte_write32(0, (uint8_t *)bp->bar0 + 2946 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 2947 } 2948 2949 static uint64_t bnxt_cc_read(struct bnxt *bp) 2950 { 2951 uint64_t ns; 2952 2953 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2954 BNXT_GRCPF_REG_SYNC_TIME)); 2955 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2956 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 2957 return ns; 2958 } 2959 2960 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 2961 { 2962 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2963 uint32_t fifo; 2964 2965 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2966 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2967 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 2968 return -EAGAIN; 2969 2970 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2971 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2972 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2973 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 2974 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2975 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 2976 2977 return 0; 2978 } 2979 2980 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 2981 { 2982 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2983 struct bnxt_pf_info *pf = &bp->pf; 2984 uint16_t port_id; 2985 uint32_t fifo; 2986 2987 if (!ptp) 2988 return -ENODEV; 2989 2990 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2991 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 2992 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 2993 return -EAGAIN; 2994 2995 port_id = pf->port_id; 2996 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 2997 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 2998 2999 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3000 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3001 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 3002 /* bnxt_clr_rx_ts(bp); TBD */ 3003 return -EBUSY; 3004 } 3005 3006 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3007 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3008 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3009 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3010 3011 return 0; 3012 } 3013 3014 static int 3015 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3016 { 3017 uint64_t ns; 3018 struct bnxt *bp = dev->data->dev_private; 3019 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3020 3021 if (!ptp) 3022 return 0; 3023 3024 ns = rte_timespec_to_ns(ts); 3025 /* Set the timecounters to a new value. */ 3026 ptp->tc.nsec = ns; 3027 3028 return 0; 3029 } 3030 3031 static int 3032 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3033 { 3034 uint64_t ns, systime_cycles; 3035 struct bnxt *bp = dev->data->dev_private; 3036 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3037 3038 if (!ptp) 3039 return 0; 3040 3041 systime_cycles = bnxt_cc_read(bp); 3042 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3043 *ts = rte_ns_to_timespec(ns); 3044 3045 return 0; 3046 } 3047 static int 3048 bnxt_timesync_enable(struct rte_eth_dev *dev) 3049 { 3050 struct bnxt *bp = dev->data->dev_private; 3051 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3052 uint32_t shift = 0; 3053 3054 if (!ptp) 3055 return 0; 3056 3057 ptp->rx_filter = 1; 3058 ptp->tx_tstamp_en = 1; 3059 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3060 3061 if (!bnxt_hwrm_ptp_cfg(bp)) 3062 bnxt_map_ptp_regs(bp); 3063 3064 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3065 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3066 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3067 3068 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3069 ptp->tc.cc_shift = shift; 3070 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3071 3072 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3073 ptp->rx_tstamp_tc.cc_shift = shift; 3074 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3075 3076 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3077 ptp->tx_tstamp_tc.cc_shift = shift; 3078 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3079 3080 return 0; 3081 } 3082 3083 static int 3084 bnxt_timesync_disable(struct rte_eth_dev *dev) 3085 { 3086 struct bnxt *bp = dev->data->dev_private; 3087 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3088 3089 if (!ptp) 3090 return 0; 3091 3092 ptp->rx_filter = 0; 3093 ptp->tx_tstamp_en = 0; 3094 ptp->rxctl = 0; 3095 3096 bnxt_hwrm_ptp_cfg(bp); 3097 3098 bnxt_unmap_ptp_regs(bp); 3099 3100 return 0; 3101 } 3102 3103 static int 3104 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3105 struct timespec *timestamp, 3106 uint32_t flags __rte_unused) 3107 { 3108 struct bnxt *bp = dev->data->dev_private; 3109 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3110 uint64_t rx_tstamp_cycles = 0; 3111 uint64_t ns; 3112 3113 if (!ptp) 3114 return 0; 3115 3116 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3117 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3118 *timestamp = rte_ns_to_timespec(ns); 3119 return 0; 3120 } 3121 3122 static int 3123 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3124 struct timespec *timestamp) 3125 { 3126 struct bnxt *bp = dev->data->dev_private; 3127 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3128 uint64_t tx_tstamp_cycles = 0; 3129 uint64_t ns; 3130 3131 if (!ptp) 3132 return 0; 3133 3134 bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3135 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3136 *timestamp = rte_ns_to_timespec(ns); 3137 3138 return 0; 3139 } 3140 3141 static int 3142 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3143 { 3144 struct bnxt *bp = dev->data->dev_private; 3145 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3146 3147 if (!ptp) 3148 return 0; 3149 3150 ptp->tc.nsec += delta; 3151 3152 return 0; 3153 } 3154 3155 static int 3156 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3157 { 3158 struct bnxt *bp = dev->data->dev_private; 3159 int rc; 3160 uint32_t dir_entries; 3161 uint32_t entry_length; 3162 3163 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n", 3164 bp->pdev->addr.domain, bp->pdev->addr.bus, 3165 bp->pdev->addr.devid, bp->pdev->addr.function); 3166 3167 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3168 if (rc != 0) 3169 return rc; 3170 3171 return dir_entries * entry_length; 3172 } 3173 3174 static int 3175 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3176 struct rte_dev_eeprom_info *in_eeprom) 3177 { 3178 struct bnxt *bp = dev->data->dev_private; 3179 uint32_t index; 3180 uint32_t offset; 3181 3182 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " 3183 "len = %d\n", bp->pdev->addr.domain, 3184 bp->pdev->addr.bus, bp->pdev->addr.devid, 3185 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 3186 3187 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3188 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3189 in_eeprom->data); 3190 3191 index = in_eeprom->offset >> 24; 3192 offset = in_eeprom->offset & 0xffffff; 3193 3194 if (index != 0) 3195 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3196 in_eeprom->length, in_eeprom->data); 3197 3198 return 0; 3199 } 3200 3201 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3202 { 3203 switch (dir_type) { 3204 case BNX_DIR_TYPE_CHIMP_PATCH: 3205 case BNX_DIR_TYPE_BOOTCODE: 3206 case BNX_DIR_TYPE_BOOTCODE_2: 3207 case BNX_DIR_TYPE_APE_FW: 3208 case BNX_DIR_TYPE_APE_PATCH: 3209 case BNX_DIR_TYPE_KONG_FW: 3210 case BNX_DIR_TYPE_KONG_PATCH: 3211 case BNX_DIR_TYPE_BONO_FW: 3212 case BNX_DIR_TYPE_BONO_PATCH: 3213 /* FALLTHROUGH */ 3214 return true; 3215 } 3216 3217 return false; 3218 } 3219 3220 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3221 { 3222 switch (dir_type) { 3223 case BNX_DIR_TYPE_AVS: 3224 case BNX_DIR_TYPE_EXP_ROM_MBA: 3225 case BNX_DIR_TYPE_PCIE: 3226 case BNX_DIR_TYPE_TSCF_UCODE: 3227 case BNX_DIR_TYPE_EXT_PHY: 3228 case BNX_DIR_TYPE_CCM: 3229 case BNX_DIR_TYPE_ISCSI_BOOT: 3230 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3231 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3232 /* FALLTHROUGH */ 3233 return true; 3234 } 3235 3236 return false; 3237 } 3238 3239 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3240 { 3241 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3242 bnxt_dir_type_is_other_exec_format(dir_type); 3243 } 3244 3245 static int 3246 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3247 struct rte_dev_eeprom_info *in_eeprom) 3248 { 3249 struct bnxt *bp = dev->data->dev_private; 3250 uint8_t index, dir_op; 3251 uint16_t type, ext, ordinal, attr; 3252 3253 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " 3254 "len = %d\n", bp->pdev->addr.domain, 3255 bp->pdev->addr.bus, bp->pdev->addr.devid, 3256 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 3257 3258 if (!BNXT_PF(bp)) { 3259 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3260 return -EINVAL; 3261 } 3262 3263 type = in_eeprom->magic >> 16; 3264 3265 if (type == 0xffff) { /* special value for directory operations */ 3266 index = in_eeprom->magic & 0xff; 3267 dir_op = in_eeprom->magic >> 8; 3268 if (index == 0) 3269 return -EINVAL; 3270 switch (dir_op) { 3271 case 0x0e: /* erase */ 3272 if (in_eeprom->offset != ~in_eeprom->magic) 3273 return -EINVAL; 3274 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3275 default: 3276 return -EINVAL; 3277 } 3278 } 3279 3280 /* Create or re-write an NVM item: */ 3281 if (bnxt_dir_type_is_executable(type) == true) 3282 return -EOPNOTSUPP; 3283 ext = in_eeprom->magic & 0xffff; 3284 ordinal = in_eeprom->offset >> 16; 3285 attr = in_eeprom->offset & 0xffff; 3286 3287 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3288 in_eeprom->data, in_eeprom->length); 3289 } 3290 3291 /* 3292 * Initialization 3293 */ 3294 3295 static const struct eth_dev_ops bnxt_dev_ops = { 3296 .dev_infos_get = bnxt_dev_info_get_op, 3297 .dev_close = bnxt_dev_close_op, 3298 .dev_configure = bnxt_dev_configure_op, 3299 .dev_start = bnxt_dev_start_op, 3300 .dev_stop = bnxt_dev_stop_op, 3301 .dev_set_link_up = bnxt_dev_set_link_up_op, 3302 .dev_set_link_down = bnxt_dev_set_link_down_op, 3303 .stats_get = bnxt_stats_get_op, 3304 .stats_reset = bnxt_stats_reset_op, 3305 .rx_queue_setup = bnxt_rx_queue_setup_op, 3306 .rx_queue_release = bnxt_rx_queue_release_op, 3307 .tx_queue_setup = bnxt_tx_queue_setup_op, 3308 .tx_queue_release = bnxt_tx_queue_release_op, 3309 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 3310 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 3311 .reta_update = bnxt_reta_update_op, 3312 .reta_query = bnxt_reta_query_op, 3313 .rss_hash_update = bnxt_rss_hash_update_op, 3314 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 3315 .link_update = bnxt_link_update_op, 3316 .promiscuous_enable = bnxt_promiscuous_enable_op, 3317 .promiscuous_disable = bnxt_promiscuous_disable_op, 3318 .allmulticast_enable = bnxt_allmulticast_enable_op, 3319 .allmulticast_disable = bnxt_allmulticast_disable_op, 3320 .mac_addr_add = bnxt_mac_addr_add_op, 3321 .mac_addr_remove = bnxt_mac_addr_remove_op, 3322 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 3323 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 3324 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 3325 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 3326 .vlan_filter_set = bnxt_vlan_filter_set_op, 3327 .vlan_offload_set = bnxt_vlan_offload_set_op, 3328 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 3329 .mtu_set = bnxt_mtu_set_op, 3330 .mac_addr_set = bnxt_set_default_mac_addr_op, 3331 .xstats_get = bnxt_dev_xstats_get_op, 3332 .xstats_get_names = bnxt_dev_xstats_get_names_op, 3333 .xstats_reset = bnxt_dev_xstats_reset_op, 3334 .fw_version_get = bnxt_fw_version_get, 3335 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 3336 .rxq_info_get = bnxt_rxq_info_get_op, 3337 .txq_info_get = bnxt_txq_info_get_op, 3338 .dev_led_on = bnxt_dev_led_on_op, 3339 .dev_led_off = bnxt_dev_led_off_op, 3340 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 3341 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 3342 .rx_queue_count = bnxt_rx_queue_count_op, 3343 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 3344 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 3345 .rx_queue_start = bnxt_rx_queue_start, 3346 .rx_queue_stop = bnxt_rx_queue_stop, 3347 .tx_queue_start = bnxt_tx_queue_start, 3348 .tx_queue_stop = bnxt_tx_queue_stop, 3349 .filter_ctrl = bnxt_filter_ctrl_op, 3350 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 3351 .get_eeprom_length = bnxt_get_eeprom_length_op, 3352 .get_eeprom = bnxt_get_eeprom_op, 3353 .set_eeprom = bnxt_set_eeprom_op, 3354 .timesync_enable = bnxt_timesync_enable, 3355 .timesync_disable = bnxt_timesync_disable, 3356 .timesync_read_time = bnxt_timesync_read_time, 3357 .timesync_write_time = bnxt_timesync_write_time, 3358 .timesync_adjust_time = bnxt_timesync_adjust_time, 3359 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 3360 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 3361 }; 3362 3363 static bool bnxt_vf_pciid(uint16_t id) 3364 { 3365 if (id == BROADCOM_DEV_ID_57304_VF || 3366 id == BROADCOM_DEV_ID_57406_VF || 3367 id == BROADCOM_DEV_ID_5731X_VF || 3368 id == BROADCOM_DEV_ID_5741X_VF || 3369 id == BROADCOM_DEV_ID_57414_VF || 3370 id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || 3371 id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 || 3372 id == BROADCOM_DEV_ID_58802_VF || 3373 id == BROADCOM_DEV_ID_57500_VF1 || 3374 id == BROADCOM_DEV_ID_57500_VF2) 3375 return true; 3376 return false; 3377 } 3378 3379 bool bnxt_stratus_device(struct bnxt *bp) 3380 { 3381 uint16_t id = bp->pdev->id.device_id; 3382 3383 if (id == BROADCOM_DEV_ID_STRATUS_NIC || 3384 id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || 3385 id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) 3386 return true; 3387 return false; 3388 } 3389 3390 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 3391 { 3392 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3393 struct bnxt *bp = eth_dev->data->dev_private; 3394 3395 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 3396 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 3397 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 3398 if (!bp->bar0 || !bp->doorbell_base) { 3399 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 3400 return -ENODEV; 3401 } 3402 3403 bp->eth_dev = eth_dev; 3404 bp->pdev = pci_dev; 3405 3406 return 0; 3407 } 3408 3409 static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, 3410 struct bnxt_ctx_pg_info *ctx_pg, 3411 uint32_t mem_size, 3412 const char *suffix, 3413 uint16_t idx) 3414 { 3415 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 3416 const struct rte_memzone *mz = NULL; 3417 char mz_name[RTE_MEMZONE_NAMESIZE]; 3418 rte_iova_t mz_phys_addr; 3419 uint64_t valid_bits = 0; 3420 uint32_t sz; 3421 int i; 3422 3423 if (!mem_size) 3424 return 0; 3425 3426 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 3427 BNXT_PAGE_SIZE; 3428 rmem->page_size = BNXT_PAGE_SIZE; 3429 rmem->pg_arr = ctx_pg->ctx_pg_arr; 3430 rmem->dma_arr = ctx_pg->ctx_dma_arr; 3431 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 3432 3433 valid_bits = PTU_PTE_VALID; 3434 3435 if (rmem->nr_pages > 1) { 3436 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3437 "bnxt_ctx_pg_tbl%s_%x_%d", 3438 suffix, idx, bp->eth_dev->data->port_id); 3439 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3440 mz = rte_memzone_lookup(mz_name); 3441 if (!mz) { 3442 mz = rte_memzone_reserve_aligned(mz_name, 3443 rmem->nr_pages * 8, 3444 SOCKET_ID_ANY, 3445 RTE_MEMZONE_2MB | 3446 RTE_MEMZONE_SIZE_HINT_ONLY | 3447 RTE_MEMZONE_IOVA_CONTIG, 3448 BNXT_PAGE_SIZE); 3449 if (mz == NULL) 3450 return -ENOMEM; 3451 } 3452 3453 memset(mz->addr, 0, mz->len); 3454 mz_phys_addr = mz->iova; 3455 if ((unsigned long)mz->addr == mz_phys_addr) { 3456 PMD_DRV_LOG(WARNING, 3457 "Memzone physical address same as virtual.\n"); 3458 PMD_DRV_LOG(WARNING, 3459 "Using rte_mem_virt2iova()\n"); 3460 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3461 if (mz_phys_addr == RTE_BAD_IOVA) { 3462 PMD_DRV_LOG(ERR, 3463 "unable to map addr to phys memory\n"); 3464 return -ENOMEM; 3465 } 3466 } 3467 rte_mem_lock_page(((char *)mz->addr)); 3468 3469 rmem->pg_tbl = mz->addr; 3470 rmem->pg_tbl_map = mz_phys_addr; 3471 rmem->pg_tbl_mz = mz; 3472 } 3473 3474 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 3475 suffix, idx, bp->eth_dev->data->port_id); 3476 mz = rte_memzone_lookup(mz_name); 3477 if (!mz) { 3478 mz = rte_memzone_reserve_aligned(mz_name, 3479 mem_size, 3480 SOCKET_ID_ANY, 3481 RTE_MEMZONE_1GB | 3482 RTE_MEMZONE_SIZE_HINT_ONLY | 3483 RTE_MEMZONE_IOVA_CONTIG, 3484 BNXT_PAGE_SIZE); 3485 if (mz == NULL) 3486 return -ENOMEM; 3487 } 3488 3489 memset(mz->addr, 0, mz->len); 3490 mz_phys_addr = mz->iova; 3491 if ((unsigned long)mz->addr == mz_phys_addr) { 3492 PMD_DRV_LOG(WARNING, 3493 "Memzone physical address same as virtual.\n"); 3494 PMD_DRV_LOG(WARNING, 3495 "Using rte_mem_virt2iova()\n"); 3496 for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE) 3497 rte_mem_lock_page(((char *)mz->addr) + sz); 3498 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3499 if (mz_phys_addr == RTE_BAD_IOVA) { 3500 PMD_DRV_LOG(ERR, 3501 "unable to map addr to phys memory\n"); 3502 return -ENOMEM; 3503 } 3504 } 3505 3506 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 3507 rte_mem_lock_page(((char *)mz->addr) + sz); 3508 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 3509 rmem->dma_arr[i] = mz_phys_addr + sz; 3510 3511 if (rmem->nr_pages > 1) { 3512 if (i == rmem->nr_pages - 2 && 3513 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3514 valid_bits |= PTU_PTE_NEXT_TO_LAST; 3515 else if (i == rmem->nr_pages - 1 && 3516 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3517 valid_bits |= PTU_PTE_LAST; 3518 3519 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 3520 valid_bits); 3521 } 3522 } 3523 3524 rmem->mz = mz; 3525 if (rmem->vmem_size) 3526 rmem->vmem = (void **)mz->addr; 3527 rmem->dma_arr[0] = mz_phys_addr; 3528 return 0; 3529 } 3530 3531 static void bnxt_free_ctx_mem(struct bnxt *bp) 3532 { 3533 int i; 3534 3535 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 3536 return; 3537 3538 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 3539 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 3540 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 3541 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 3542 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 3543 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 3544 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 3545 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 3546 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 3547 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 3548 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 3549 3550 for (i = 0; i < BNXT_MAX_Q; i++) { 3551 if (bp->ctx->tqm_mem[i]) 3552 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 3553 } 3554 3555 rte_free(bp->ctx); 3556 bp->ctx = NULL; 3557 } 3558 3559 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 3560 3561 #define min_t(type, x, y) ({ \ 3562 type __min1 = (x); \ 3563 type __min2 = (y); \ 3564 __min1 < __min2 ? __min1 : __min2; }) 3565 3566 #define max_t(type, x, y) ({ \ 3567 type __max1 = (x); \ 3568 type __max2 = (y); \ 3569 __max1 > __max2 ? __max1 : __max2; }) 3570 3571 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 3572 3573 int bnxt_alloc_ctx_mem(struct bnxt *bp) 3574 { 3575 struct bnxt_ctx_pg_info *ctx_pg; 3576 struct bnxt_ctx_mem_info *ctx; 3577 uint32_t mem_size, ena, entries; 3578 int i, rc; 3579 3580 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 3581 if (rc) { 3582 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 3583 return rc; 3584 } 3585 ctx = bp->ctx; 3586 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 3587 return 0; 3588 3589 ctx_pg = &ctx->qp_mem; 3590 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 3591 mem_size = ctx->qp_entry_size * ctx_pg->entries; 3592 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 3593 if (rc) 3594 return rc; 3595 3596 ctx_pg = &ctx->srq_mem; 3597 ctx_pg->entries = ctx->srq_max_l2_entries; 3598 mem_size = ctx->srq_entry_size * ctx_pg->entries; 3599 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 3600 if (rc) 3601 return rc; 3602 3603 ctx_pg = &ctx->cq_mem; 3604 ctx_pg->entries = ctx->cq_max_l2_entries; 3605 mem_size = ctx->cq_entry_size * ctx_pg->entries; 3606 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 3607 if (rc) 3608 return rc; 3609 3610 ctx_pg = &ctx->vnic_mem; 3611 ctx_pg->entries = ctx->vnic_max_vnic_entries + 3612 ctx->vnic_max_ring_table_entries; 3613 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 3614 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 3615 if (rc) 3616 return rc; 3617 3618 ctx_pg = &ctx->stat_mem; 3619 ctx_pg->entries = ctx->stat_max_entries; 3620 mem_size = ctx->stat_entry_size * ctx_pg->entries; 3621 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 3622 if (rc) 3623 return rc; 3624 3625 entries = ctx->qp_max_l2_entries; 3626 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 3627 entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring, 3628 ctx->tqm_max_entries_per_ring); 3629 for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) { 3630 ctx_pg = ctx->tqm_mem[i]; 3631 /* use min tqm entries for now. */ 3632 ctx_pg->entries = entries; 3633 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 3634 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); 3635 if (rc) 3636 return rc; 3637 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 3638 } 3639 3640 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 3641 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 3642 if (rc) 3643 PMD_DRV_LOG(ERR, 3644 "Failed to configure context mem: rc = %d\n", rc); 3645 else 3646 ctx->flags |= BNXT_CTX_FLAG_INITED; 3647 3648 return rc; 3649 } 3650 3651 static int bnxt_alloc_stats_mem(struct bnxt *bp) 3652 { 3653 struct rte_pci_device *pci_dev = bp->pdev; 3654 char mz_name[RTE_MEMZONE_NAMESIZE]; 3655 const struct rte_memzone *mz = NULL; 3656 uint32_t total_alloc_len; 3657 rte_iova_t mz_phys_addr; 3658 3659 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 3660 return 0; 3661 3662 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3663 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 3664 pci_dev->addr.bus, pci_dev->addr.devid, 3665 pci_dev->addr.function, "rx_port_stats"); 3666 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3667 mz = rte_memzone_lookup(mz_name); 3668 total_alloc_len = 3669 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 3670 sizeof(struct rx_port_stats_ext) + 512); 3671 if (!mz) { 3672 mz = rte_memzone_reserve(mz_name, total_alloc_len, 3673 SOCKET_ID_ANY, 3674 RTE_MEMZONE_2MB | 3675 RTE_MEMZONE_SIZE_HINT_ONLY | 3676 RTE_MEMZONE_IOVA_CONTIG); 3677 if (mz == NULL) 3678 return -ENOMEM; 3679 } 3680 memset(mz->addr, 0, mz->len); 3681 mz_phys_addr = mz->iova; 3682 if ((unsigned long)mz->addr == mz_phys_addr) { 3683 PMD_DRV_LOG(WARNING, 3684 "Memzone physical address same as virtual.\n"); 3685 PMD_DRV_LOG(WARNING, 3686 "Using rte_mem_virt2iova()\n"); 3687 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3688 if (mz_phys_addr == RTE_BAD_IOVA) { 3689 PMD_DRV_LOG(ERR, 3690 "Can't map address to physical memory\n"); 3691 return -ENOMEM; 3692 } 3693 } 3694 3695 bp->rx_mem_zone = (const void *)mz; 3696 bp->hw_rx_port_stats = mz->addr; 3697 bp->hw_rx_port_stats_map = mz_phys_addr; 3698 3699 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3700 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 3701 pci_dev->addr.bus, pci_dev->addr.devid, 3702 pci_dev->addr.function, "tx_port_stats"); 3703 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3704 mz = rte_memzone_lookup(mz_name); 3705 total_alloc_len = 3706 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 3707 sizeof(struct tx_port_stats_ext) + 512); 3708 if (!mz) { 3709 mz = rte_memzone_reserve(mz_name, 3710 total_alloc_len, 3711 SOCKET_ID_ANY, 3712 RTE_MEMZONE_2MB | 3713 RTE_MEMZONE_SIZE_HINT_ONLY | 3714 RTE_MEMZONE_IOVA_CONTIG); 3715 if (mz == NULL) 3716 return -ENOMEM; 3717 } 3718 memset(mz->addr, 0, mz->len); 3719 mz_phys_addr = mz->iova; 3720 if ((unsigned long)mz->addr == mz_phys_addr) { 3721 PMD_DRV_LOG(WARNING, 3722 "Memzone physical address same as virtual\n"); 3723 PMD_DRV_LOG(WARNING, 3724 "Using rte_mem_virt2iova()\n"); 3725 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3726 if (mz_phys_addr == RTE_BAD_IOVA) { 3727 PMD_DRV_LOG(ERR, 3728 "Can't map address to physical memory\n"); 3729 return -ENOMEM; 3730 } 3731 } 3732 3733 bp->tx_mem_zone = (const void *)mz; 3734 bp->hw_tx_port_stats = mz->addr; 3735 bp->hw_tx_port_stats_map = mz_phys_addr; 3736 bp->flags |= BNXT_FLAG_PORT_STATS; 3737 3738 /* Display extended statistics if FW supports it */ 3739 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 3740 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 3741 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 3742 return 0; 3743 3744 bp->hw_rx_port_stats_ext = (void *) 3745 ((uint8_t *)bp->hw_rx_port_stats + 3746 sizeof(struct rx_port_stats)); 3747 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 3748 sizeof(struct rx_port_stats); 3749 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 3750 3751 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 3752 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 3753 bp->hw_tx_port_stats_ext = (void *) 3754 ((uint8_t *)bp->hw_tx_port_stats + 3755 sizeof(struct tx_port_stats)); 3756 bp->hw_tx_port_stats_ext_map = 3757 bp->hw_tx_port_stats_map + 3758 sizeof(struct tx_port_stats); 3759 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 3760 } 3761 3762 return 0; 3763 } 3764 3765 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 3766 { 3767 struct bnxt *bp = eth_dev->data->dev_private; 3768 int rc = 0; 3769 3770 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 3771 RTE_ETHER_ADDR_LEN * 3772 bp->max_l2_ctx, 3773 0); 3774 if (eth_dev->data->mac_addrs == NULL) { 3775 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 3776 return -ENOMEM; 3777 } 3778 3779 if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) { 3780 if (BNXT_PF(bp)) 3781 return -EINVAL; 3782 3783 /* Generate a random MAC address, if none was assigned by PF */ 3784 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 3785 bnxt_eth_hw_addr_random(bp->mac_addr); 3786 PMD_DRV_LOG(INFO, 3787 "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", 3788 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 3789 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 3790 3791 rc = bnxt_hwrm_set_mac(bp); 3792 if (!rc) 3793 memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr, 3794 RTE_ETHER_ADDR_LEN); 3795 return rc; 3796 } 3797 3798 /* Copy the permanent MAC from the FUNC_QCAPS response */ 3799 memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN); 3800 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 3801 3802 return rc; 3803 } 3804 3805 #define ALLOW_FUNC(x) \ 3806 { \ 3807 uint32_t arg = (x); \ 3808 bp->pf.vf_req_fwd[((arg) >> 5)] &= \ 3809 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 3810 } 3811 static int 3812 bnxt_dev_init(struct rte_eth_dev *eth_dev) 3813 { 3814 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3815 static int version_printed; 3816 struct bnxt *bp; 3817 uint16_t mtu; 3818 int rc; 3819 3820 if (version_printed++ == 0) 3821 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 3822 3823 rte_eth_copy_pci_info(eth_dev, pci_dev); 3824 3825 bp = eth_dev->data->dev_private; 3826 3827 bp->dev_stopped = 1; 3828 3829 eth_dev->dev_ops = &bnxt_dev_ops; 3830 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 3831 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 3832 3833 /* 3834 * For secondary processes, we don't initialise any further 3835 * as primary has already done this work. 3836 */ 3837 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3838 return 0; 3839 3840 if (bnxt_vf_pciid(pci_dev->id.device_id)) 3841 bp->flags |= BNXT_FLAG_VF; 3842 3843 if (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 || 3844 pci_dev->id.device_id == BROADCOM_DEV_ID_57504 || 3845 pci_dev->id.device_id == BROADCOM_DEV_ID_57502 || 3846 pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF1 || 3847 pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF2) 3848 bp->flags |= BNXT_FLAG_THOR_CHIP; 3849 3850 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 3851 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 3852 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 3853 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 3854 bp->flags |= BNXT_FLAG_STINGRAY; 3855 3856 rc = bnxt_init_board(eth_dev); 3857 if (rc) { 3858 PMD_DRV_LOG(ERR, 3859 "Board initialization failed rc: %x\n", rc); 3860 goto error; 3861 } 3862 3863 rc = bnxt_alloc_hwrm_resources(bp); 3864 if (rc) { 3865 PMD_DRV_LOG(ERR, 3866 "hwrm resource allocation failure rc: %x\n", rc); 3867 goto error_free; 3868 } 3869 rc = bnxt_hwrm_ver_get(bp); 3870 if (rc) 3871 goto error_free; 3872 3873 rc = bnxt_hwrm_func_reset(bp); 3874 if (rc) { 3875 PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc); 3876 rc = -EIO; 3877 goto error_free; 3878 } 3879 3880 rc = bnxt_hwrm_queue_qportcfg(bp); 3881 if (rc) { 3882 PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n"); 3883 goto error_free; 3884 } 3885 /* Get the MAX capabilities for this function */ 3886 rc = bnxt_hwrm_func_qcaps(bp); 3887 if (rc) { 3888 PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc); 3889 goto error_free; 3890 } 3891 3892 rc = bnxt_alloc_stats_mem(bp); 3893 if (rc) 3894 goto error_free; 3895 3896 if (bp->max_tx_rings == 0) { 3897 PMD_DRV_LOG(ERR, "No TX rings available!\n"); 3898 rc = -EBUSY; 3899 goto error_free; 3900 } 3901 3902 rc = bnxt_setup_mac_addr(eth_dev); 3903 if (rc) 3904 goto error_free; 3905 3906 /* THOR does not support ring groups. 3907 * But we will use the array to save RSS context IDs. 3908 */ 3909 if (BNXT_CHIP_THOR(bp)) { 3910 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 3911 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) { 3912 /* 1 ring is for default completion ring */ 3913 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n"); 3914 rc = -ENOSPC; 3915 goto error_free; 3916 } 3917 3918 if (BNXT_HAS_RING_GRPS(bp)) { 3919 bp->grp_info = rte_zmalloc("bnxt_grp_info", 3920 sizeof(*bp->grp_info) * 3921 bp->max_ring_grps, 0); 3922 if (!bp->grp_info) { 3923 PMD_DRV_LOG(ERR, 3924 "Failed to alloc %zu bytes for grp info tbl.\n", 3925 sizeof(*bp->grp_info) * bp->max_ring_grps); 3926 rc = -ENOMEM; 3927 goto error_free; 3928 } 3929 } 3930 3931 /* Forward all requests if firmware is new enough */ 3932 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 3933 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 3934 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 3935 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); 3936 } else { 3937 PMD_DRV_LOG(WARNING, 3938 "Firmware too old for VF mailbox functionality\n"); 3939 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); 3940 } 3941 3942 /* 3943 * The following are used for driver cleanup. If we disallow these, 3944 * VF drivers can't clean up cleanly. 3945 */ 3946 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 3947 ALLOW_FUNC(HWRM_VNIC_FREE); 3948 ALLOW_FUNC(HWRM_RING_FREE); 3949 ALLOW_FUNC(HWRM_RING_GRP_FREE); 3950 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 3951 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 3952 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 3953 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 3954 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 3955 rc = bnxt_hwrm_func_driver_register(bp); 3956 if (rc) { 3957 PMD_DRV_LOG(ERR, 3958 "Failed to register driver"); 3959 rc = -EBUSY; 3960 goto error_free; 3961 } 3962 3963 PMD_DRV_LOG(INFO, 3964 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", 3965 pci_dev->mem_resource[0].phys_addr, 3966 pci_dev->mem_resource[0].addr); 3967 3968 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 3969 if (rc) { 3970 PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n"); 3971 goto error_free; 3972 } 3973 3974 if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU && 3975 mtu != eth_dev->data->mtu) 3976 eth_dev->data->mtu = mtu; 3977 3978 if (BNXT_PF(bp)) { 3979 //if (bp->pf.active_vfs) { 3980 // TODO: Deallocate VF resources? 3981 //} 3982 if (bp->pdev->max_vfs) { 3983 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 3984 if (rc) { 3985 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 3986 goto error_free; 3987 } 3988 } else { 3989 rc = bnxt_hwrm_allocate_pf_only(bp); 3990 if (rc) { 3991 PMD_DRV_LOG(ERR, 3992 "Failed to allocate PF resources\n"); 3993 goto error_free; 3994 } 3995 } 3996 } 3997 3998 bnxt_hwrm_port_led_qcaps(bp); 3999 4000 rc = bnxt_setup_int(bp); 4001 if (rc) 4002 goto error_free; 4003 4004 rc = bnxt_alloc_mem(bp); 4005 if (rc) 4006 goto error_free; 4007 4008 bnxt_init_nic(bp); 4009 4010 rc = bnxt_request_int(bp); 4011 if (rc) 4012 goto error_free; 4013 4014 return 0; 4015 4016 error_free: 4017 bnxt_dev_uninit(eth_dev); 4018 error: 4019 return rc; 4020 } 4021 4022 static int 4023 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 4024 { 4025 struct bnxt *bp = eth_dev->data->dev_private; 4026 int rc; 4027 4028 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 4029 return -EPERM; 4030 4031 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 4032 bnxt_disable_int(bp); 4033 bnxt_free_int(bp); 4034 bnxt_free_mem(bp); 4035 4036 bnxt_hwrm_func_buf_unrgtr(bp); 4037 4038 if (bp->grp_info != NULL) { 4039 rte_free(bp->grp_info); 4040 bp->grp_info = NULL; 4041 } 4042 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 4043 bnxt_free_hwrm_resources(bp); 4044 4045 if (bp->tx_mem_zone) { 4046 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 4047 bp->tx_mem_zone = NULL; 4048 } 4049 4050 if (bp->rx_mem_zone) { 4051 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 4052 bp->rx_mem_zone = NULL; 4053 } 4054 4055 if (bp->dev_stopped == 0) 4056 bnxt_dev_close_op(eth_dev); 4057 if (bp->pf.vf_info) 4058 rte_free(bp->pf.vf_info); 4059 bnxt_free_ctx_mem(bp); 4060 eth_dev->dev_ops = NULL; 4061 eth_dev->rx_pkt_burst = NULL; 4062 eth_dev->tx_pkt_burst = NULL; 4063 4064 return rc; 4065 } 4066 4067 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 4068 struct rte_pci_device *pci_dev) 4069 { 4070 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), 4071 bnxt_dev_init); 4072 } 4073 4074 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 4075 { 4076 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4077 return rte_eth_dev_pci_generic_remove(pci_dev, 4078 bnxt_dev_uninit); 4079 else 4080 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 4081 } 4082 4083 static struct rte_pci_driver bnxt_rte_pmd = { 4084 .id_table = bnxt_pci_id_map, 4085 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 4086 .probe = bnxt_pci_probe, 4087 .remove = bnxt_pci_remove, 4088 }; 4089 4090 static bool 4091 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 4092 { 4093 if (strcmp(dev->device->driver->name, drv->driver.name)) 4094 return false; 4095 4096 return true; 4097 } 4098 4099 bool is_bnxt_supported(struct rte_eth_dev *dev) 4100 { 4101 return is_device_supported(dev, &bnxt_rte_pmd); 4102 } 4103 4104 RTE_INIT(bnxt_init_log) 4105 { 4106 bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver"); 4107 if (bnxt_logtype_driver >= 0) 4108 rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); 4109 } 4110 4111 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 4112 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 4113 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 4114