1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <rte_ethdev_driver.h> 11 #include <rte_ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 15 #include "bnxt.h" 16 #include "bnxt_cpr.h" 17 #include "bnxt_filter.h" 18 #include "bnxt_hwrm.h" 19 #include "bnxt_irq.h" 20 #include "bnxt_ring.h" 21 #include "bnxt_rxq.h" 22 #include "bnxt_rxr.h" 23 #include "bnxt_stats.h" 24 #include "bnxt_txq.h" 25 #include "bnxt_txr.h" 26 #include "bnxt_vnic.h" 27 #include "hsi_struct_def_dpdk.h" 28 #include "bnxt_nvm_defs.h" 29 #include "bnxt_util.h" 30 31 #define DRV_MODULE_NAME "bnxt" 32 static const char bnxt_version[] = 33 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 34 int bnxt_logtype_driver; 35 36 #define PCI_VENDOR_ID_BROADCOM 0x14E4 37 38 #define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 39 #define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 40 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 41 #define BROADCOM_DEV_ID_57414_VF 0x16c1 42 #define BROADCOM_DEV_ID_57301 0x16c8 43 #define BROADCOM_DEV_ID_57302 0x16c9 44 #define BROADCOM_DEV_ID_57304_PF 0x16ca 45 #define BROADCOM_DEV_ID_57304_VF 0x16cb 46 #define BROADCOM_DEV_ID_57417_MF 0x16cc 47 #define BROADCOM_DEV_ID_NS2 0x16cd 48 #define BROADCOM_DEV_ID_57311 0x16ce 49 #define BROADCOM_DEV_ID_57312 0x16cf 50 #define BROADCOM_DEV_ID_57402 0x16d0 51 #define BROADCOM_DEV_ID_57404 0x16d1 52 #define BROADCOM_DEV_ID_57406_PF 0x16d2 53 #define BROADCOM_DEV_ID_57406_VF 0x16d3 54 #define BROADCOM_DEV_ID_57402_MF 0x16d4 55 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 56 #define BROADCOM_DEV_ID_57412 0x16d6 57 #define BROADCOM_DEV_ID_57414 0x16d7 58 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8 59 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9 60 #define BROADCOM_DEV_ID_5741X_VF 0x16dc 61 #define BROADCOM_DEV_ID_57412_MF 0x16de 62 #define BROADCOM_DEV_ID_57314 0x16df 63 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0 64 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 65 #define BROADCOM_DEV_ID_57417_SFP 0x16e2 66 #define BROADCOM_DEV_ID_57416_SFP 0x16e3 67 #define BROADCOM_DEV_ID_57317_SFP 0x16e4 68 #define BROADCOM_DEV_ID_57404_MF 0x16e7 69 #define BROADCOM_DEV_ID_57406_MF 0x16e8 70 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 71 #define BROADCOM_DEV_ID_57407_MF 0x16ea 72 #define BROADCOM_DEV_ID_57414_MF 0x16ec 73 #define BROADCOM_DEV_ID_57416_MF 0x16ee 74 #define BROADCOM_DEV_ID_57508 0x1750 75 #define BROADCOM_DEV_ID_57504 0x1751 76 #define BROADCOM_DEV_ID_57502 0x1752 77 #define BROADCOM_DEV_ID_57500_VF1 0x1806 78 #define BROADCOM_DEV_ID_57500_VF2 0x1807 79 #define BROADCOM_DEV_ID_58802 0xd802 80 #define BROADCOM_DEV_ID_58804 0xd804 81 #define BROADCOM_DEV_ID_58808 0x16f0 82 #define BROADCOM_DEV_ID_58802_VF 0xd800 83 84 static const struct rte_pci_id bnxt_pci_id_map[] = { 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 86 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 88 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 97 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 98 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 99 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 132 { .vendor_id = 0, /* sentinel */ }, 133 }; 134 135 #define BNXT_ETH_RSS_SUPPORT ( \ 136 ETH_RSS_IPV4 | \ 137 ETH_RSS_NONFRAG_IPV4_TCP | \ 138 ETH_RSS_NONFRAG_IPV4_UDP | \ 139 ETH_RSS_IPV6 | \ 140 ETH_RSS_NONFRAG_IPV6_TCP | \ 141 ETH_RSS_NONFRAG_IPV6_UDP) 142 143 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ 144 DEV_TX_OFFLOAD_IPV4_CKSUM | \ 145 DEV_TX_OFFLOAD_TCP_CKSUM | \ 146 DEV_TX_OFFLOAD_UDP_CKSUM | \ 147 DEV_TX_OFFLOAD_TCP_TSO | \ 148 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ 149 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ 150 DEV_TX_OFFLOAD_GRE_TNL_TSO | \ 151 DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ 152 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ 153 DEV_TX_OFFLOAD_MULTI_SEGS) 154 155 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ 156 DEV_RX_OFFLOAD_VLAN_STRIP | \ 157 DEV_RX_OFFLOAD_IPV4_CKSUM | \ 158 DEV_RX_OFFLOAD_UDP_CKSUM | \ 159 DEV_RX_OFFLOAD_TCP_CKSUM | \ 160 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ 161 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 162 DEV_RX_OFFLOAD_KEEP_CRC | \ 163 DEV_RX_OFFLOAD_TCP_LRO) 164 165 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 166 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); 167 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); 168 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 169 170 /***********************/ 171 172 /* 173 * High level utility functions 174 */ 175 176 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 177 { 178 if (!BNXT_CHIP_THOR(bp)) 179 return 1; 180 181 return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, 182 BNXT_RSS_ENTRIES_PER_CTX_THOR) / 183 BNXT_RSS_ENTRIES_PER_CTX_THOR; 184 } 185 186 static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 187 { 188 if (!BNXT_CHIP_THOR(bp)) 189 return HW_HASH_INDEX_SIZE; 190 191 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; 192 } 193 194 static void bnxt_free_mem(struct bnxt *bp) 195 { 196 bnxt_free_filter_mem(bp); 197 bnxt_free_vnic_attributes(bp); 198 bnxt_free_vnic_mem(bp); 199 200 bnxt_free_stats(bp); 201 bnxt_free_tx_rings(bp); 202 bnxt_free_rx_rings(bp); 203 } 204 205 static int bnxt_alloc_mem(struct bnxt *bp) 206 { 207 int rc; 208 209 rc = bnxt_alloc_vnic_mem(bp); 210 if (rc) 211 goto alloc_mem_err; 212 213 rc = bnxt_alloc_vnic_attributes(bp); 214 if (rc) 215 goto alloc_mem_err; 216 217 rc = bnxt_alloc_filter_mem(bp); 218 if (rc) 219 goto alloc_mem_err; 220 221 return 0; 222 223 alloc_mem_err: 224 bnxt_free_mem(bp); 225 return rc; 226 } 227 228 static int bnxt_init_chip(struct bnxt *bp) 229 { 230 struct bnxt_rx_queue *rxq; 231 struct rte_eth_link new; 232 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 233 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 234 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 235 uint64_t rx_offloads = dev_conf->rxmode.offloads; 236 uint32_t intr_vector = 0; 237 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 238 uint32_t vec = BNXT_MISC_VEC_ID; 239 unsigned int i, j; 240 int rc; 241 242 /* disable uio/vfio intr/eventfd mapping */ 243 rte_intr_disable(intr_handle); 244 245 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 246 bp->eth_dev->data->dev_conf.rxmode.offloads |= 247 DEV_RX_OFFLOAD_JUMBO_FRAME; 248 bp->flags |= BNXT_FLAG_JUMBO; 249 } else { 250 bp->eth_dev->data->dev_conf.rxmode.offloads &= 251 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 252 bp->flags &= ~BNXT_FLAG_JUMBO; 253 } 254 255 /* THOR does not support ring groups. 256 * But we will use the array to save RSS context IDs. 257 */ 258 if (BNXT_CHIP_THOR(bp)) 259 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 260 261 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 262 if (rc) { 263 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 264 goto err_out; 265 } 266 267 rc = bnxt_alloc_hwrm_rings(bp); 268 if (rc) { 269 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 270 goto err_out; 271 } 272 273 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 274 if (rc) { 275 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 276 goto err_out; 277 } 278 279 rc = bnxt_mq_rx_configure(bp); 280 if (rc) { 281 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 282 goto err_out; 283 } 284 285 /* VNIC configuration */ 286 for (i = 0; i < bp->nr_vnics; i++) { 287 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 288 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 289 uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; 290 291 vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); 292 if (!vnic->fw_grp_ids) { 293 PMD_DRV_LOG(ERR, 294 "Failed to alloc %d bytes for group ids\n", 295 size); 296 rc = -ENOMEM; 297 goto err_out; 298 } 299 memset(vnic->fw_grp_ids, -1, size); 300 301 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 302 i, vnic, vnic->fw_grp_ids); 303 304 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 305 if (rc) { 306 PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n", 307 i, rc); 308 goto err_out; 309 } 310 311 /* Alloc RSS context only if RSS mode is enabled */ 312 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 313 int j, nr_ctxs = bnxt_rss_ctxts(bp); 314 315 rc = 0; 316 for (j = 0; j < nr_ctxs; j++) { 317 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 318 if (rc) 319 break; 320 } 321 if (rc) { 322 PMD_DRV_LOG(ERR, 323 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 324 i, j, rc); 325 goto err_out; 326 } 327 vnic->num_lb_ctxts = nr_ctxs; 328 } 329 330 /* 331 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 332 * setting is not available at this time, it will not be 333 * configured correctly in the CFA. 334 */ 335 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 336 vnic->vlan_strip = true; 337 else 338 vnic->vlan_strip = false; 339 340 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 341 if (rc) { 342 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 343 i, rc); 344 goto err_out; 345 } 346 347 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 348 if (rc) { 349 PMD_DRV_LOG(ERR, 350 "HWRM vnic %d filter failure rc: %x\n", 351 i, rc); 352 goto err_out; 353 } 354 355 for (j = 0; j < bp->rx_nr_rings; j++) { 356 rxq = bp->eth_dev->data->rx_queues[j]; 357 358 PMD_DRV_LOG(DEBUG, 359 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 360 j, rxq->vnic, rxq->vnic->fw_grp_ids); 361 362 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 363 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 364 } 365 366 rc = bnxt_vnic_rss_configure(bp, vnic); 367 if (rc) { 368 PMD_DRV_LOG(ERR, 369 "HWRM vnic set RSS failure rc: %x\n", rc); 370 goto err_out; 371 } 372 373 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 374 375 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 376 DEV_RX_OFFLOAD_TCP_LRO) 377 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 378 else 379 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 380 } 381 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 382 if (rc) { 383 PMD_DRV_LOG(ERR, 384 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 385 goto err_out; 386 } 387 388 /* check and configure queue intr-vector mapping */ 389 if ((rte_intr_cap_multiple(intr_handle) || 390 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 391 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 392 intr_vector = bp->eth_dev->data->nb_rx_queues; 393 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 394 if (intr_vector > bp->rx_cp_nr_rings) { 395 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 396 bp->rx_cp_nr_rings); 397 return -ENOTSUP; 398 } 399 rc = rte_intr_efd_enable(intr_handle, intr_vector); 400 if (rc) 401 return rc; 402 } 403 404 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 405 intr_handle->intr_vec = 406 rte_zmalloc("intr_vec", 407 bp->eth_dev->data->nb_rx_queues * 408 sizeof(int), 0); 409 if (intr_handle->intr_vec == NULL) { 410 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 411 " intr_vec", bp->eth_dev->data->nb_rx_queues); 412 rc = -ENOMEM; 413 goto err_disable; 414 } 415 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 416 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 417 intr_handle->intr_vec, intr_handle->nb_efd, 418 intr_handle->max_intr); 419 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 420 queue_id++) { 421 intr_handle->intr_vec[queue_id] = 422 vec + BNXT_RX_VEC_START; 423 if (vec < base + intr_handle->nb_efd - 1) 424 vec++; 425 } 426 } 427 428 /* enable uio/vfio intr/eventfd mapping */ 429 rc = rte_intr_enable(intr_handle); 430 if (rc) 431 goto err_free; 432 433 rc = bnxt_get_hwrm_link_config(bp, &new); 434 if (rc) { 435 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); 436 goto err_free; 437 } 438 439 if (!bp->link_info.link_up) { 440 rc = bnxt_set_hwrm_link_config(bp, true); 441 if (rc) { 442 PMD_DRV_LOG(ERR, 443 "HWRM link config failure rc: %x\n", rc); 444 goto err_free; 445 } 446 } 447 bnxt_print_link_info(bp->eth_dev); 448 449 return 0; 450 451 err_free: 452 rte_free(intr_handle->intr_vec); 453 err_disable: 454 rte_intr_efd_disable(intr_handle); 455 err_out: 456 /* Some of the error status returned by FW may not be from errno.h */ 457 if (rc > 0) 458 rc = -EIO; 459 460 return rc; 461 } 462 463 static int bnxt_shutdown_nic(struct bnxt *bp) 464 { 465 bnxt_free_all_hwrm_resources(bp); 466 bnxt_free_all_filters(bp); 467 bnxt_free_all_vnics(bp); 468 return 0; 469 } 470 471 static int bnxt_init_nic(struct bnxt *bp) 472 { 473 int rc; 474 475 if (BNXT_HAS_RING_GRPS(bp)) { 476 rc = bnxt_init_ring_grps(bp); 477 if (rc) 478 return rc; 479 } 480 481 bnxt_init_vnics(bp); 482 bnxt_init_filters(bp); 483 484 return 0; 485 } 486 487 /* 488 * Device configuration and status function 489 */ 490 491 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 492 struct rte_eth_dev_info *dev_info) 493 { 494 struct bnxt *bp = eth_dev->data->dev_private; 495 uint16_t max_vnics, i, j, vpool, vrxq; 496 unsigned int max_rx_rings; 497 498 /* MAC Specifics */ 499 dev_info->max_mac_addrs = bp->max_l2_ctx; 500 dev_info->max_hash_mac_addrs = 0; 501 502 /* PF/VF specifics */ 503 if (BNXT_PF(bp)) 504 dev_info->max_vfs = bp->pdev->max_vfs; 505 max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx); 506 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 507 dev_info->max_rx_queues = max_rx_rings; 508 dev_info->max_tx_queues = max_rx_rings; 509 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 510 dev_info->hash_key_size = 40; 511 max_vnics = bp->max_vnics; 512 513 /* Fast path specifics */ 514 dev_info->min_rx_bufsize = 1; 515 dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN + 516 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 517 518 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 519 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 520 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 521 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; 522 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 523 524 /* *INDENT-OFF* */ 525 dev_info->default_rxconf = (struct rte_eth_rxconf) { 526 .rx_thresh = { 527 .pthresh = 8, 528 .hthresh = 8, 529 .wthresh = 0, 530 }, 531 .rx_free_thresh = 32, 532 /* If no descriptors available, pkts are dropped by default */ 533 .rx_drop_en = 1, 534 }; 535 536 dev_info->default_txconf = (struct rte_eth_txconf) { 537 .tx_thresh = { 538 .pthresh = 32, 539 .hthresh = 0, 540 .wthresh = 0, 541 }, 542 .tx_free_thresh = 32, 543 .tx_rs_thresh = 32, 544 }; 545 eth_dev->data->dev_conf.intr_conf.lsc = 1; 546 547 eth_dev->data->dev_conf.intr_conf.rxq = 1; 548 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 549 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 550 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 551 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 552 553 /* *INDENT-ON* */ 554 555 /* 556 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 557 * need further investigation. 558 */ 559 560 /* VMDq resources */ 561 vpool = 64; /* ETH_64_POOLS */ 562 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 563 for (i = 0; i < 4; vpool >>= 1, i++) { 564 if (max_vnics > vpool) { 565 for (j = 0; j < 5; vrxq >>= 1, j++) { 566 if (dev_info->max_rx_queues > vrxq) { 567 if (vpool > vrxq) 568 vpool = vrxq; 569 goto found; 570 } 571 } 572 /* Not enough resources to support VMDq */ 573 break; 574 } 575 } 576 /* Not enough resources to support VMDq */ 577 vpool = 0; 578 vrxq = 0; 579 found: 580 dev_info->max_vmdq_pools = vpool; 581 dev_info->vmdq_queue_num = vrxq; 582 583 dev_info->vmdq_pool_base = 0; 584 dev_info->vmdq_queue_base = 0; 585 } 586 587 /* Configure the device based on the configuration provided */ 588 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 589 { 590 struct bnxt *bp = eth_dev->data->dev_private; 591 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 592 int rc; 593 594 bp->rx_queues = (void *)eth_dev->data->rx_queues; 595 bp->tx_queues = (void *)eth_dev->data->tx_queues; 596 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 597 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 598 599 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 600 rc = bnxt_hwrm_check_vf_rings(bp); 601 if (rc) { 602 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 603 return -ENOSPC; 604 } 605 606 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 607 if (rc) { 608 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 609 return -ENOSPC; 610 } 611 } else { 612 /* legacy driver needs to get updated values */ 613 rc = bnxt_hwrm_func_qcaps(bp); 614 if (rc) { 615 PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); 616 return rc; 617 } 618 } 619 620 /* Inherit new configurations */ 621 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 622 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 623 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 624 bp->max_cp_rings || 625 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 626 bp->max_stat_ctx) 627 goto resource_error; 628 629 if (BNXT_HAS_RING_GRPS(bp) && 630 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 631 goto resource_error; 632 633 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 634 bp->max_vnics < eth_dev->data->nb_rx_queues) 635 goto resource_error; 636 637 bp->rx_cp_nr_rings = bp->rx_nr_rings; 638 bp->tx_cp_nr_rings = bp->tx_nr_rings; 639 640 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 641 eth_dev->data->mtu = 642 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 643 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 644 BNXT_NUM_VLANS; 645 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 646 } 647 return 0; 648 649 resource_error: 650 PMD_DRV_LOG(ERR, 651 "Insufficient resources to support requested config\n"); 652 PMD_DRV_LOG(ERR, 653 "Num Queues Requested: Tx %d, Rx %d\n", 654 eth_dev->data->nb_tx_queues, 655 eth_dev->data->nb_rx_queues); 656 PMD_DRV_LOG(ERR, 657 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 658 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 659 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 660 return -ENOSPC; 661 } 662 663 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 664 { 665 struct rte_eth_link *link = ð_dev->data->dev_link; 666 667 if (link->link_status) 668 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 669 eth_dev->data->port_id, 670 (uint32_t)link->link_speed, 671 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 672 ("full-duplex") : ("half-duplex\n")); 673 else 674 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 675 eth_dev->data->port_id); 676 } 677 678 /* 679 * Determine whether the current configuration requires support for scattered 680 * receive; return 1 if scattered receive is required and 0 if not. 681 */ 682 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 683 { 684 uint16_t buf_size; 685 int i; 686 687 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 688 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 689 690 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 691 RTE_PKTMBUF_HEADROOM); 692 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 693 return 1; 694 } 695 return 0; 696 } 697 698 static eth_rx_burst_t 699 bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev) 700 { 701 #ifdef RTE_ARCH_X86 702 /* 703 * Vector mode receive can be enabled only if scatter rx is not 704 * in use and rx offloads are limited to VLAN stripping and 705 * CRC stripping. 706 */ 707 if (!eth_dev->data->scattered_rx && 708 !(eth_dev->data->dev_conf.rxmode.offloads & 709 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 710 DEV_RX_OFFLOAD_KEEP_CRC | 711 DEV_RX_OFFLOAD_JUMBO_FRAME | 712 DEV_RX_OFFLOAD_IPV4_CKSUM | 713 DEV_RX_OFFLOAD_UDP_CKSUM | 714 DEV_RX_OFFLOAD_TCP_CKSUM | 715 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 716 DEV_RX_OFFLOAD_VLAN_FILTER))) { 717 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", 718 eth_dev->data->port_id); 719 return bnxt_recv_pkts_vec; 720 } 721 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 722 eth_dev->data->port_id); 723 PMD_DRV_LOG(INFO, 724 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 725 eth_dev->data->port_id, 726 eth_dev->data->scattered_rx, 727 eth_dev->data->dev_conf.rxmode.offloads); 728 #endif 729 return bnxt_recv_pkts; 730 } 731 732 static eth_tx_burst_t 733 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 734 { 735 #ifdef RTE_ARCH_X86 736 /* 737 * Vector mode receive can be enabled only if scatter tx is not 738 * in use and tx offloads other than VLAN insertion are not 739 * in use. 740 */ 741 if (!eth_dev->data->scattered_rx && 742 !(eth_dev->data->dev_conf.txmode.offloads & 743 ~DEV_TX_OFFLOAD_VLAN_INSERT)) { 744 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", 745 eth_dev->data->port_id); 746 return bnxt_xmit_pkts_vec; 747 } 748 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 749 eth_dev->data->port_id); 750 PMD_DRV_LOG(INFO, 751 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 752 eth_dev->data->port_id, 753 eth_dev->data->scattered_rx, 754 eth_dev->data->dev_conf.txmode.offloads); 755 #endif 756 return bnxt_xmit_pkts; 757 } 758 759 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 760 { 761 struct bnxt *bp = eth_dev->data->dev_private; 762 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 763 int vlan_mask = 0; 764 int rc; 765 766 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 767 PMD_DRV_LOG(ERR, 768 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 769 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 770 } 771 772 rc = bnxt_init_chip(bp); 773 if (rc) 774 goto error; 775 776 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 777 778 bnxt_link_update_op(eth_dev, 1); 779 780 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 781 vlan_mask |= ETH_VLAN_FILTER_MASK; 782 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 783 vlan_mask |= ETH_VLAN_STRIP_MASK; 784 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 785 if (rc) 786 goto error; 787 788 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 789 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 790 bnxt_enable_int(bp); 791 bp->flags |= BNXT_FLAG_INIT_DONE; 792 bp->dev_stopped = 0; 793 return 0; 794 795 error: 796 bnxt_shutdown_nic(bp); 797 bnxt_free_tx_mbufs(bp); 798 bnxt_free_rx_mbufs(bp); 799 return rc; 800 } 801 802 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 803 { 804 struct bnxt *bp = eth_dev->data->dev_private; 805 int rc = 0; 806 807 if (!bp->link_info.link_up) 808 rc = bnxt_set_hwrm_link_config(bp, true); 809 if (!rc) 810 eth_dev->data->dev_link.link_status = 1; 811 812 bnxt_print_link_info(eth_dev); 813 return 0; 814 } 815 816 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 817 { 818 struct bnxt *bp = eth_dev->data->dev_private; 819 820 eth_dev->data->dev_link.link_status = 0; 821 bnxt_set_hwrm_link_config(bp, false); 822 bp->link_info.link_up = 0; 823 824 return 0; 825 } 826 827 /* Unload the driver, release resources */ 828 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 829 { 830 struct bnxt *bp = eth_dev->data->dev_private; 831 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 832 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 833 834 bnxt_disable_int(bp); 835 836 /* disable uio/vfio intr/eventfd mapping */ 837 rte_intr_disable(intr_handle); 838 839 bp->flags &= ~BNXT_FLAG_INIT_DONE; 840 if (bp->eth_dev->data->dev_started) { 841 /* TBD: STOP HW queues DMA */ 842 eth_dev->data->dev_link.link_status = 0; 843 } 844 bnxt_set_hwrm_link_config(bp, false); 845 846 /* Clean queue intr-vector mapping */ 847 rte_intr_efd_disable(intr_handle); 848 if (intr_handle->intr_vec != NULL) { 849 rte_free(intr_handle->intr_vec); 850 intr_handle->intr_vec = NULL; 851 } 852 853 bnxt_hwrm_port_clr_stats(bp); 854 bnxt_free_tx_mbufs(bp); 855 bnxt_free_rx_mbufs(bp); 856 bnxt_shutdown_nic(bp); 857 bp->dev_stopped = 1; 858 } 859 860 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 861 { 862 struct bnxt *bp = eth_dev->data->dev_private; 863 864 if (bp->dev_stopped == 0) 865 bnxt_dev_stop_op(eth_dev); 866 867 if (eth_dev->data->mac_addrs != NULL) { 868 rte_free(eth_dev->data->mac_addrs); 869 eth_dev->data->mac_addrs = NULL; 870 } 871 if (bp->grp_info != NULL) { 872 rte_free(bp->grp_info); 873 bp->grp_info = NULL; 874 } 875 876 bnxt_dev_uninit(eth_dev); 877 } 878 879 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 880 uint32_t index) 881 { 882 struct bnxt *bp = eth_dev->data->dev_private; 883 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 884 struct bnxt_vnic_info *vnic; 885 struct bnxt_filter_info *filter, *temp_filter; 886 uint32_t i; 887 888 /* 889 * Loop through all VNICs from the specified filter flow pools to 890 * remove the corresponding MAC addr filter 891 */ 892 for (i = 0; i < bp->nr_vnics; i++) { 893 if (!(pool_mask & (1ULL << i))) 894 continue; 895 896 vnic = &bp->vnic_info[i]; 897 filter = STAILQ_FIRST(&vnic->filter); 898 while (filter) { 899 temp_filter = STAILQ_NEXT(filter, next); 900 if (filter->mac_index == index) { 901 STAILQ_REMOVE(&vnic->filter, filter, 902 bnxt_filter_info, next); 903 bnxt_hwrm_clear_l2_filter(bp, filter); 904 filter->mac_index = INVALID_MAC_INDEX; 905 memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); 906 STAILQ_INSERT_TAIL(&bp->free_filter_list, 907 filter, next); 908 } 909 filter = temp_filter; 910 } 911 } 912 } 913 914 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 915 struct rte_ether_addr *mac_addr, 916 uint32_t index, uint32_t pool) 917 { 918 struct bnxt *bp = eth_dev->data->dev_private; 919 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 920 struct bnxt_filter_info *filter; 921 int rc = 0; 922 923 if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { 924 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 925 return -ENOTSUP; 926 } 927 928 if (!vnic) { 929 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 930 return -EINVAL; 931 } 932 /* Attach requested MAC address to the new l2_filter */ 933 STAILQ_FOREACH(filter, &vnic->filter, next) { 934 if (filter->mac_index == index) { 935 PMD_DRV_LOG(ERR, 936 "MAC addr already existed for pool %d\n", pool); 937 return 0; 938 } 939 } 940 filter = bnxt_alloc_filter(bp); 941 if (!filter) { 942 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 943 return -ENODEV; 944 } 945 946 filter->mac_index = index; 947 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 948 949 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 950 if (!rc) { 951 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 952 } else { 953 filter->mac_index = INVALID_MAC_INDEX; 954 memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); 955 bnxt_free_filter(bp, filter); 956 } 957 958 return rc; 959 } 960 961 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 962 { 963 int rc = 0; 964 struct bnxt *bp = eth_dev->data->dev_private; 965 struct rte_eth_link new; 966 unsigned int cnt = BNXT_LINK_WAIT_CNT; 967 968 memset(&new, 0, sizeof(new)); 969 do { 970 /* Retrieve link info from hardware */ 971 rc = bnxt_get_hwrm_link_config(bp, &new); 972 if (rc) { 973 new.link_speed = ETH_LINK_SPEED_100M; 974 new.link_duplex = ETH_LINK_FULL_DUPLEX; 975 PMD_DRV_LOG(ERR, 976 "Failed to retrieve link rc = 0x%x!\n", rc); 977 goto out; 978 } 979 980 if (!wait_to_complete || new.link_status) 981 break; 982 983 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 984 } while (cnt--); 985 986 out: 987 /* Timed out or success */ 988 if (new.link_status != eth_dev->data->dev_link.link_status || 989 new.link_speed != eth_dev->data->dev_link.link_speed) { 990 memcpy(ð_dev->data->dev_link, &new, 991 sizeof(struct rte_eth_link)); 992 993 _rte_eth_dev_callback_process(eth_dev, 994 RTE_ETH_EVENT_INTR_LSC, 995 NULL); 996 997 bnxt_print_link_info(eth_dev); 998 } 999 1000 return rc; 1001 } 1002 1003 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1004 { 1005 struct bnxt *bp = eth_dev->data->dev_private; 1006 struct bnxt_vnic_info *vnic; 1007 1008 if (bp->vnic_info == NULL) 1009 return; 1010 1011 vnic = &bp->vnic_info[0]; 1012 1013 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1014 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1015 } 1016 1017 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1018 { 1019 struct bnxt *bp = eth_dev->data->dev_private; 1020 struct bnxt_vnic_info *vnic; 1021 1022 if (bp->vnic_info == NULL) 1023 return; 1024 1025 vnic = &bp->vnic_info[0]; 1026 1027 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1028 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1029 } 1030 1031 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1032 { 1033 struct bnxt *bp = eth_dev->data->dev_private; 1034 struct bnxt_vnic_info *vnic; 1035 1036 if (bp->vnic_info == NULL) 1037 return; 1038 1039 vnic = &bp->vnic_info[0]; 1040 1041 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1042 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1043 } 1044 1045 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1046 { 1047 struct bnxt *bp = eth_dev->data->dev_private; 1048 struct bnxt_vnic_info *vnic; 1049 1050 if (bp->vnic_info == NULL) 1051 return; 1052 1053 vnic = &bp->vnic_info[0]; 1054 1055 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1056 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1057 } 1058 1059 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1060 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1061 { 1062 if (qid >= bp->rx_nr_rings) 1063 return NULL; 1064 1065 return bp->eth_dev->data->rx_queues[qid]; 1066 } 1067 1068 /* Return rxq corresponding to a given rss table ring/group ID. */ 1069 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1070 { 1071 struct bnxt_rx_queue *rxq; 1072 unsigned int i; 1073 1074 if (!BNXT_HAS_RING_GRPS(bp)) { 1075 for (i = 0; i < bp->rx_nr_rings; i++) { 1076 rxq = bp->eth_dev->data->rx_queues[i]; 1077 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1078 return rxq->index; 1079 } 1080 } else { 1081 for (i = 0; i < bp->rx_nr_rings; i++) { 1082 if (bp->grp_info[i].fw_grp_id == fwr) 1083 return i; 1084 } 1085 } 1086 1087 return INVALID_HW_RING_ID; 1088 } 1089 1090 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1091 struct rte_eth_rss_reta_entry64 *reta_conf, 1092 uint16_t reta_size) 1093 { 1094 struct bnxt *bp = eth_dev->data->dev_private; 1095 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1096 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1097 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1098 uint16_t idx, sft; 1099 int i; 1100 1101 if (!vnic->rss_table) 1102 return -EINVAL; 1103 1104 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 1105 return -EINVAL; 1106 1107 if (reta_size != tbl_size) { 1108 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1109 "(%d) must equal the size supported by the hardware " 1110 "(%d)\n", reta_size, tbl_size); 1111 return -EINVAL; 1112 } 1113 1114 for (i = 0; i < reta_size; i++) { 1115 struct bnxt_rx_queue *rxq; 1116 1117 idx = i / RTE_RETA_GROUP_SIZE; 1118 sft = i % RTE_RETA_GROUP_SIZE; 1119 1120 if (!(reta_conf[idx].mask & (1ULL << sft))) 1121 continue; 1122 1123 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 1124 if (!rxq) { 1125 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 1126 return -EINVAL; 1127 } 1128 1129 if (BNXT_CHIP_THOR(bp)) { 1130 vnic->rss_table[i * 2] = 1131 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1132 vnic->rss_table[i * 2 + 1] = 1133 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1134 } else { 1135 vnic->rss_table[i] = 1136 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1137 } 1138 1139 vnic->rss_table[i] = 1140 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1141 } 1142 1143 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1144 return 0; 1145 } 1146 1147 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 1148 struct rte_eth_rss_reta_entry64 *reta_conf, 1149 uint16_t reta_size) 1150 { 1151 struct bnxt *bp = eth_dev->data->dev_private; 1152 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1153 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1154 uint16_t idx, sft, i; 1155 1156 /* Retrieve from the default VNIC */ 1157 if (!vnic) 1158 return -EINVAL; 1159 if (!vnic->rss_table) 1160 return -EINVAL; 1161 1162 if (reta_size != tbl_size) { 1163 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1164 "(%d) must equal the size supported by the hardware " 1165 "(%d)\n", reta_size, tbl_size); 1166 return -EINVAL; 1167 } 1168 1169 for (idx = 0, i = 0; i < reta_size; i++) { 1170 idx = i / RTE_RETA_GROUP_SIZE; 1171 sft = i % RTE_RETA_GROUP_SIZE; 1172 1173 if (reta_conf[idx].mask & (1ULL << sft)) { 1174 uint16_t qid; 1175 1176 if (BNXT_CHIP_THOR(bp)) 1177 qid = bnxt_rss_to_qid(bp, 1178 vnic->rss_table[i * 2]); 1179 else 1180 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 1181 1182 if (qid == INVALID_HW_RING_ID) { 1183 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 1184 return -EINVAL; 1185 } 1186 reta_conf[idx].reta[sft] = qid; 1187 } 1188 } 1189 1190 return 0; 1191 } 1192 1193 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 1194 struct rte_eth_rss_conf *rss_conf) 1195 { 1196 struct bnxt *bp = eth_dev->data->dev_private; 1197 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1198 struct bnxt_vnic_info *vnic; 1199 uint16_t hash_type = 0; 1200 unsigned int i; 1201 1202 /* 1203 * If RSS enablement were different than dev_configure, 1204 * then return -EINVAL 1205 */ 1206 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 1207 if (!rss_conf->rss_hf) 1208 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 1209 } else { 1210 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 1211 return -EINVAL; 1212 } 1213 1214 bp->flags |= BNXT_FLAG_UPDATE_HASH; 1215 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); 1216 1217 if (rss_conf->rss_hf & ETH_RSS_IPV4) 1218 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1219 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 1220 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1221 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 1222 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1223 if (rss_conf->rss_hf & ETH_RSS_IPV6) 1224 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1225 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 1226 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1227 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 1228 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1229 1230 /* Update the RSS VNIC(s) */ 1231 for (i = 0; i < bp->nr_vnics; i++) { 1232 vnic = &bp->vnic_info[i]; 1233 vnic->hash_type = hash_type; 1234 1235 /* 1236 * Use the supplied key if the key length is 1237 * acceptable and the rss_key is not NULL 1238 */ 1239 if (rss_conf->rss_key && 1240 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) 1241 memcpy(vnic->rss_hash_key, rss_conf->rss_key, 1242 rss_conf->rss_key_len); 1243 1244 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1245 } 1246 return 0; 1247 } 1248 1249 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 1250 struct rte_eth_rss_conf *rss_conf) 1251 { 1252 struct bnxt *bp = eth_dev->data->dev_private; 1253 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1254 int len; 1255 uint32_t hash_types; 1256 1257 /* RSS configuration is the same for all VNICs */ 1258 if (vnic && vnic->rss_hash_key) { 1259 if (rss_conf->rss_key) { 1260 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 1261 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 1262 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 1263 } 1264 1265 hash_types = vnic->hash_type; 1266 rss_conf->rss_hf = 0; 1267 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 1268 rss_conf->rss_hf |= ETH_RSS_IPV4; 1269 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1270 } 1271 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 1272 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1273 hash_types &= 1274 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1275 } 1276 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 1277 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 1278 hash_types &= 1279 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1280 } 1281 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 1282 rss_conf->rss_hf |= ETH_RSS_IPV6; 1283 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1284 } 1285 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 1286 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 1287 hash_types &= 1288 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1289 } 1290 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 1291 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 1292 hash_types &= 1293 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1294 } 1295 if (hash_types) { 1296 PMD_DRV_LOG(ERR, 1297 "Unknwon RSS config from firmware (%08x), RSS disabled", 1298 vnic->hash_type); 1299 return -ENOTSUP; 1300 } 1301 } else { 1302 rss_conf->rss_hf = 0; 1303 } 1304 return 0; 1305 } 1306 1307 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 1308 struct rte_eth_fc_conf *fc_conf) 1309 { 1310 struct bnxt *bp = dev->data->dev_private; 1311 struct rte_eth_link link_info; 1312 int rc; 1313 1314 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1315 if (rc) 1316 return rc; 1317 1318 memset(fc_conf, 0, sizeof(*fc_conf)); 1319 if (bp->link_info.auto_pause) 1320 fc_conf->autoneg = 1; 1321 switch (bp->link_info.pause) { 1322 case 0: 1323 fc_conf->mode = RTE_FC_NONE; 1324 break; 1325 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 1326 fc_conf->mode = RTE_FC_TX_PAUSE; 1327 break; 1328 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 1329 fc_conf->mode = RTE_FC_RX_PAUSE; 1330 break; 1331 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 1332 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 1333 fc_conf->mode = RTE_FC_FULL; 1334 break; 1335 } 1336 return 0; 1337 } 1338 1339 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 1340 struct rte_eth_fc_conf *fc_conf) 1341 { 1342 struct bnxt *bp = dev->data->dev_private; 1343 1344 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 1345 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); 1346 return -ENOTSUP; 1347 } 1348 1349 switch (fc_conf->mode) { 1350 case RTE_FC_NONE: 1351 bp->link_info.auto_pause = 0; 1352 bp->link_info.force_pause = 0; 1353 break; 1354 case RTE_FC_RX_PAUSE: 1355 if (fc_conf->autoneg) { 1356 bp->link_info.auto_pause = 1357 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1358 bp->link_info.force_pause = 0; 1359 } else { 1360 bp->link_info.auto_pause = 0; 1361 bp->link_info.force_pause = 1362 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1363 } 1364 break; 1365 case RTE_FC_TX_PAUSE: 1366 if (fc_conf->autoneg) { 1367 bp->link_info.auto_pause = 1368 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 1369 bp->link_info.force_pause = 0; 1370 } else { 1371 bp->link_info.auto_pause = 0; 1372 bp->link_info.force_pause = 1373 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 1374 } 1375 break; 1376 case RTE_FC_FULL: 1377 if (fc_conf->autoneg) { 1378 bp->link_info.auto_pause = 1379 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 1380 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1381 bp->link_info.force_pause = 0; 1382 } else { 1383 bp->link_info.auto_pause = 0; 1384 bp->link_info.force_pause = 1385 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 1386 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1387 } 1388 break; 1389 } 1390 return bnxt_set_hwrm_link_config(bp, true); 1391 } 1392 1393 /* Add UDP tunneling port */ 1394 static int 1395 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 1396 struct rte_eth_udp_tunnel *udp_tunnel) 1397 { 1398 struct bnxt *bp = eth_dev->data->dev_private; 1399 uint16_t tunnel_type = 0; 1400 int rc = 0; 1401 1402 switch (udp_tunnel->prot_type) { 1403 case RTE_TUNNEL_TYPE_VXLAN: 1404 if (bp->vxlan_port_cnt) { 1405 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 1406 udp_tunnel->udp_port); 1407 if (bp->vxlan_port != udp_tunnel->udp_port) { 1408 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 1409 return -ENOSPC; 1410 } 1411 bp->vxlan_port_cnt++; 1412 return 0; 1413 } 1414 tunnel_type = 1415 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 1416 bp->vxlan_port_cnt++; 1417 break; 1418 case RTE_TUNNEL_TYPE_GENEVE: 1419 if (bp->geneve_port_cnt) { 1420 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 1421 udp_tunnel->udp_port); 1422 if (bp->geneve_port != udp_tunnel->udp_port) { 1423 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 1424 return -ENOSPC; 1425 } 1426 bp->geneve_port_cnt++; 1427 return 0; 1428 } 1429 tunnel_type = 1430 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 1431 bp->geneve_port_cnt++; 1432 break; 1433 default: 1434 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 1435 return -ENOTSUP; 1436 } 1437 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 1438 tunnel_type); 1439 return rc; 1440 } 1441 1442 static int 1443 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 1444 struct rte_eth_udp_tunnel *udp_tunnel) 1445 { 1446 struct bnxt *bp = eth_dev->data->dev_private; 1447 uint16_t tunnel_type = 0; 1448 uint16_t port = 0; 1449 int rc = 0; 1450 1451 switch (udp_tunnel->prot_type) { 1452 case RTE_TUNNEL_TYPE_VXLAN: 1453 if (!bp->vxlan_port_cnt) { 1454 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 1455 return -EINVAL; 1456 } 1457 if (bp->vxlan_port != udp_tunnel->udp_port) { 1458 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 1459 udp_tunnel->udp_port, bp->vxlan_port); 1460 return -EINVAL; 1461 } 1462 if (--bp->vxlan_port_cnt) 1463 return 0; 1464 1465 tunnel_type = 1466 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 1467 port = bp->vxlan_fw_dst_port_id; 1468 break; 1469 case RTE_TUNNEL_TYPE_GENEVE: 1470 if (!bp->geneve_port_cnt) { 1471 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 1472 return -EINVAL; 1473 } 1474 if (bp->geneve_port != udp_tunnel->udp_port) { 1475 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 1476 udp_tunnel->udp_port, bp->geneve_port); 1477 return -EINVAL; 1478 } 1479 if (--bp->geneve_port_cnt) 1480 return 0; 1481 1482 tunnel_type = 1483 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 1484 port = bp->geneve_fw_dst_port_id; 1485 break; 1486 default: 1487 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 1488 return -ENOTSUP; 1489 } 1490 1491 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 1492 if (!rc) { 1493 if (tunnel_type == 1494 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 1495 bp->vxlan_port = 0; 1496 if (tunnel_type == 1497 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 1498 bp->geneve_port = 0; 1499 } 1500 return rc; 1501 } 1502 1503 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1504 { 1505 struct bnxt_filter_info *filter; 1506 struct bnxt_vnic_info *vnic; 1507 int rc = 0; 1508 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 1509 1510 /* if VLAN exists && VLAN matches vlan_id 1511 * remove the MAC+VLAN filter 1512 * add a new MAC only filter 1513 * else 1514 * VLAN filter doesn't exist, just skip and continue 1515 */ 1516 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1517 filter = STAILQ_FIRST(&vnic->filter); 1518 while (filter) { 1519 /* Search for this matching MAC+VLAN filter */ 1520 if (filter->enables & chk && filter->l2_ivlan == vlan_id && 1521 !memcmp(filter->l2_addr, 1522 bp->mac_addr, 1523 RTE_ETHER_ADDR_LEN)) { 1524 /* Delete the filter */ 1525 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 1526 if (rc) 1527 return rc; 1528 STAILQ_REMOVE(&vnic->filter, filter, 1529 bnxt_filter_info, next); 1530 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); 1531 1532 PMD_DRV_LOG(INFO, 1533 "Del Vlan filter for %d\n", 1534 vlan_id); 1535 return rc; 1536 } 1537 filter = STAILQ_NEXT(filter, next); 1538 } 1539 return -ENOENT; 1540 } 1541 1542 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1543 { 1544 struct bnxt_filter_info *filter; 1545 struct bnxt_vnic_info *vnic; 1546 int rc = 0; 1547 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 1548 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 1549 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 1550 1551 /* Implementation notes on the use of VNIC in this command: 1552 * 1553 * By default, these filters belong to default vnic for the function. 1554 * Once these filters are set up, only destination VNIC can be modified. 1555 * If the destination VNIC is not specified in this command, 1556 * then the HWRM shall only create an l2 context id. 1557 */ 1558 1559 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1560 filter = STAILQ_FIRST(&vnic->filter); 1561 /* Check if the VLAN has already been added */ 1562 while (filter) { 1563 if (filter->enables & chk && filter->l2_ivlan == vlan_id && 1564 !memcmp(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN)) 1565 return -EEXIST; 1566 1567 filter = STAILQ_NEXT(filter, next); 1568 } 1569 1570 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 1571 * command to create MAC+VLAN filter with the right flags, enables set. 1572 */ 1573 filter = bnxt_alloc_filter(bp); 1574 if (!filter) { 1575 PMD_DRV_LOG(ERR, 1576 "MAC/VLAN filter alloc failed\n"); 1577 return -ENOMEM; 1578 } 1579 /* MAC + VLAN ID filter */ 1580 filter->l2_ivlan = vlan_id; 1581 filter->l2_ivlan_mask = 0x0FFF; 1582 filter->enables |= en; 1583 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1584 if (rc) { 1585 /* Free the newly allocated filter as we were 1586 * not able to create the filter in hardware. 1587 */ 1588 filter->fw_l2_filter_id = UINT64_MAX; 1589 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); 1590 return rc; 1591 } 1592 1593 /* Add this new filter to the list */ 1594 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1595 PMD_DRV_LOG(INFO, 1596 "Added Vlan filter for %d\n", vlan_id); 1597 return rc; 1598 } 1599 1600 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 1601 uint16_t vlan_id, int on) 1602 { 1603 struct bnxt *bp = eth_dev->data->dev_private; 1604 1605 /* These operations apply to ALL existing MAC/VLAN filters */ 1606 if (on) 1607 return bnxt_add_vlan_filter(bp, vlan_id); 1608 else 1609 return bnxt_del_vlan_filter(bp, vlan_id); 1610 } 1611 1612 static int 1613 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 1614 { 1615 struct bnxt *bp = dev->data->dev_private; 1616 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1617 unsigned int i; 1618 1619 if (mask & ETH_VLAN_FILTER_MASK) { 1620 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 1621 /* Remove any VLAN filters programmed */ 1622 for (i = 0; i < 4095; i++) 1623 bnxt_del_vlan_filter(bp, i); 1624 } 1625 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 1626 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 1627 } 1628 1629 if (mask & ETH_VLAN_STRIP_MASK) { 1630 /* Enable or disable VLAN stripping */ 1631 for (i = 0; i < bp->nr_vnics; i++) { 1632 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1633 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1634 vnic->vlan_strip = true; 1635 else 1636 vnic->vlan_strip = false; 1637 bnxt_hwrm_vnic_cfg(bp, vnic); 1638 } 1639 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 1640 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 1641 } 1642 1643 if (mask & ETH_VLAN_EXTEND_MASK) 1644 PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n"); 1645 1646 return 0; 1647 } 1648 1649 static int 1650 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 1651 struct rte_ether_addr *addr) 1652 { 1653 struct bnxt *bp = dev->data->dev_private; 1654 /* Default Filter is tied to VNIC 0 */ 1655 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1656 struct bnxt_filter_info *filter; 1657 int rc; 1658 1659 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 1660 return -EPERM; 1661 1662 if (rte_is_zero_ether_addr(addr)) 1663 return -EINVAL; 1664 1665 STAILQ_FOREACH(filter, &vnic->filter, next) { 1666 /* Default Filter is at Index 0 */ 1667 if (filter->mac_index != 0) 1668 continue; 1669 1670 memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); 1671 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 1672 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 1673 filter->enables |= 1674 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 1675 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 1676 1677 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1678 if (rc) 1679 return rc; 1680 1681 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 1682 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 1683 return 0; 1684 } 1685 1686 return 0; 1687 } 1688 1689 static int 1690 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 1691 struct rte_ether_addr *mc_addr_set, 1692 uint32_t nb_mc_addr) 1693 { 1694 struct bnxt *bp = eth_dev->data->dev_private; 1695 char *mc_addr_list = (char *)mc_addr_set; 1696 struct bnxt_vnic_info *vnic; 1697 uint32_t off = 0, i = 0; 1698 1699 vnic = &bp->vnic_info[0]; 1700 1701 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 1702 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1703 goto allmulti; 1704 } 1705 1706 /* TODO Check for Duplicate mcast addresses */ 1707 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1708 for (i = 0; i < nb_mc_addr; i++) { 1709 memcpy(vnic->mc_list + off, &mc_addr_list[i], 1710 RTE_ETHER_ADDR_LEN); 1711 off += RTE_ETHER_ADDR_LEN; 1712 } 1713 1714 vnic->mc_addr_cnt = i; 1715 1716 allmulti: 1717 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1718 } 1719 1720 static int 1721 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1722 { 1723 struct bnxt *bp = dev->data->dev_private; 1724 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 1725 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 1726 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 1727 int ret; 1728 1729 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 1730 fw_major, fw_minor, fw_updt); 1731 1732 ret += 1; /* add the size of '\0' */ 1733 if (fw_size < (uint32_t)ret) 1734 return ret; 1735 else 1736 return 0; 1737 } 1738 1739 static void 1740 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1741 struct rte_eth_rxq_info *qinfo) 1742 { 1743 struct bnxt_rx_queue *rxq; 1744 1745 rxq = dev->data->rx_queues[queue_id]; 1746 1747 qinfo->mp = rxq->mb_pool; 1748 qinfo->scattered_rx = dev->data->scattered_rx; 1749 qinfo->nb_desc = rxq->nb_rx_desc; 1750 1751 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1752 qinfo->conf.rx_drop_en = 0; 1753 qinfo->conf.rx_deferred_start = 0; 1754 } 1755 1756 static void 1757 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1758 struct rte_eth_txq_info *qinfo) 1759 { 1760 struct bnxt_tx_queue *txq; 1761 1762 txq = dev->data->tx_queues[queue_id]; 1763 1764 qinfo->nb_desc = txq->nb_tx_desc; 1765 1766 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 1767 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 1768 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 1769 1770 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1771 qinfo->conf.tx_rs_thresh = 0; 1772 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1773 } 1774 1775 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 1776 { 1777 struct bnxt *bp = eth_dev->data->dev_private; 1778 struct rte_eth_dev_info dev_info; 1779 uint32_t new_pkt_size; 1780 uint32_t rc = 0; 1781 uint32_t i; 1782 1783 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 1784 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 1785 1786 bnxt_dev_info_get_op(eth_dev, &dev_info); 1787 1788 if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) { 1789 PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n", 1790 RTE_ETHER_MIN_MTU, BNXT_MAX_MTU); 1791 return -EINVAL; 1792 } 1793 1794 #ifdef RTE_ARCH_X86 1795 /* 1796 * If vector-mode tx/rx is active, disallow any MTU change that would 1797 * require scattered receive support. 1798 */ 1799 if (eth_dev->data->dev_started && 1800 (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec || 1801 eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) && 1802 (new_pkt_size > 1803 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 1804 PMD_DRV_LOG(ERR, 1805 "MTU change would require scattered rx support. "); 1806 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 1807 return -EINVAL; 1808 } 1809 #endif 1810 1811 if (new_mtu > RTE_ETHER_MTU) { 1812 bp->flags |= BNXT_FLAG_JUMBO; 1813 bp->eth_dev->data->dev_conf.rxmode.offloads |= 1814 DEV_RX_OFFLOAD_JUMBO_FRAME; 1815 } else { 1816 bp->eth_dev->data->dev_conf.rxmode.offloads &= 1817 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1818 bp->flags &= ~BNXT_FLAG_JUMBO; 1819 } 1820 1821 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 1822 1823 eth_dev->data->mtu = new_mtu; 1824 PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu); 1825 1826 for (i = 0; i < bp->nr_vnics; i++) { 1827 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1828 uint16_t size = 0; 1829 1830 vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + 1831 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1832 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 1833 if (rc) 1834 break; 1835 1836 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 1837 size -= RTE_PKTMBUF_HEADROOM; 1838 1839 if (size < new_mtu) { 1840 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 1841 if (rc) 1842 return rc; 1843 } 1844 } 1845 1846 return rc; 1847 } 1848 1849 static int 1850 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 1851 { 1852 struct bnxt *bp = dev->data->dev_private; 1853 uint16_t vlan = bp->vlan; 1854 int rc; 1855 1856 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 1857 PMD_DRV_LOG(ERR, 1858 "PVID cannot be modified for this function\n"); 1859 return -ENOTSUP; 1860 } 1861 bp->vlan = on ? pvid : 0; 1862 1863 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 1864 if (rc) 1865 bp->vlan = vlan; 1866 return rc; 1867 } 1868 1869 static int 1870 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 1871 { 1872 struct bnxt *bp = dev->data->dev_private; 1873 1874 return bnxt_hwrm_port_led_cfg(bp, true); 1875 } 1876 1877 static int 1878 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 1879 { 1880 struct bnxt *bp = dev->data->dev_private; 1881 1882 return bnxt_hwrm_port_led_cfg(bp, false); 1883 } 1884 1885 static uint32_t 1886 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1887 { 1888 uint32_t desc = 0, raw_cons = 0, cons; 1889 struct bnxt_cp_ring_info *cpr; 1890 struct bnxt_rx_queue *rxq; 1891 struct rx_pkt_cmpl *rxcmp; 1892 uint16_t cmp_type; 1893 uint8_t cmp = 1; 1894 bool valid; 1895 1896 rxq = dev->data->rx_queues[rx_queue_id]; 1897 cpr = rxq->cp_ring; 1898 valid = cpr->valid; 1899 1900 while (raw_cons < rxq->nb_rx_desc) { 1901 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1902 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1903 1904 if (!CMPL_VALID(rxcmp, valid)) 1905 goto nothing_to_do; 1906 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); 1907 cmp_type = CMP_TYPE(rxcmp); 1908 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1909 cmp = (rte_le_to_cpu_32( 1910 ((struct rx_tpa_end_cmpl *) 1911 (rxcmp))->agg_bufs_v1) & 1912 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 1913 RX_TPA_END_CMPL_AGG_BUFS_SFT; 1914 desc++; 1915 } else if (cmp_type == 0x11) { 1916 desc++; 1917 cmp = (rxcmp->agg_bufs_v1 & 1918 RX_PKT_CMPL_AGG_BUFS_MASK) >> 1919 RX_PKT_CMPL_AGG_BUFS_SFT; 1920 } else { 1921 cmp = 1; 1922 } 1923 nothing_to_do: 1924 raw_cons += cmp ? cmp : 2; 1925 } 1926 1927 return desc; 1928 } 1929 1930 static int 1931 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 1932 { 1933 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 1934 struct bnxt_rx_ring_info *rxr; 1935 struct bnxt_cp_ring_info *cpr; 1936 struct bnxt_sw_rx_bd *rx_buf; 1937 struct rx_pkt_cmpl *rxcmp; 1938 uint32_t cons, cp_cons; 1939 1940 if (!rxq) 1941 return -EINVAL; 1942 1943 cpr = rxq->cp_ring; 1944 rxr = rxq->rx_ring; 1945 1946 if (offset >= rxq->nb_rx_desc) 1947 return -EINVAL; 1948 1949 cons = RING_CMP(cpr->cp_ring_struct, offset); 1950 cp_cons = cpr->cp_raw_cons; 1951 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1952 1953 if (cons > cp_cons) { 1954 if (CMPL_VALID(rxcmp, cpr->valid)) 1955 return RTE_ETH_RX_DESC_DONE; 1956 } else { 1957 if (CMPL_VALID(rxcmp, !cpr->valid)) 1958 return RTE_ETH_RX_DESC_DONE; 1959 } 1960 rx_buf = &rxr->rx_buf_ring[cons]; 1961 if (rx_buf->mbuf == NULL) 1962 return RTE_ETH_RX_DESC_UNAVAIL; 1963 1964 1965 return RTE_ETH_RX_DESC_AVAIL; 1966 } 1967 1968 static int 1969 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 1970 { 1971 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 1972 struct bnxt_tx_ring_info *txr; 1973 struct bnxt_cp_ring_info *cpr; 1974 struct bnxt_sw_tx_bd *tx_buf; 1975 struct tx_pkt_cmpl *txcmp; 1976 uint32_t cons, cp_cons; 1977 1978 if (!txq) 1979 return -EINVAL; 1980 1981 cpr = txq->cp_ring; 1982 txr = txq->tx_ring; 1983 1984 if (offset >= txq->nb_tx_desc) 1985 return -EINVAL; 1986 1987 cons = RING_CMP(cpr->cp_ring_struct, offset); 1988 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1989 cp_cons = cpr->cp_raw_cons; 1990 1991 if (cons > cp_cons) { 1992 if (CMPL_VALID(txcmp, cpr->valid)) 1993 return RTE_ETH_TX_DESC_UNAVAIL; 1994 } else { 1995 if (CMPL_VALID(txcmp, !cpr->valid)) 1996 return RTE_ETH_TX_DESC_UNAVAIL; 1997 } 1998 tx_buf = &txr->tx_buf_ring[cons]; 1999 if (tx_buf->mbuf == NULL) 2000 return RTE_ETH_TX_DESC_DONE; 2001 2002 return RTE_ETH_TX_DESC_FULL; 2003 } 2004 2005 static struct bnxt_filter_info * 2006 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 2007 struct rte_eth_ethertype_filter *efilter, 2008 struct bnxt_vnic_info *vnic0, 2009 struct bnxt_vnic_info *vnic, 2010 int *ret) 2011 { 2012 struct bnxt_filter_info *mfilter = NULL; 2013 int match = 0; 2014 *ret = 0; 2015 2016 if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 || 2017 efilter->ether_type == RTE_ETHER_TYPE_IPV6) { 2018 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" 2019 " ethertype filter.", efilter->ether_type); 2020 *ret = -EINVAL; 2021 goto exit; 2022 } 2023 if (efilter->queue >= bp->rx_nr_rings) { 2024 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2025 *ret = -EINVAL; 2026 goto exit; 2027 } 2028 2029 vnic0 = &bp->vnic_info[0]; 2030 vnic = &bp->vnic_info[efilter->queue]; 2031 if (vnic == NULL) { 2032 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2033 *ret = -EINVAL; 2034 goto exit; 2035 } 2036 2037 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 2038 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 2039 if ((!memcmp(efilter->mac_addr.addr_bytes, 2040 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 2041 mfilter->flags == 2042 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 2043 mfilter->ethertype == efilter->ether_type)) { 2044 match = 1; 2045 break; 2046 } 2047 } 2048 } else { 2049 STAILQ_FOREACH(mfilter, &vnic->filter, next) 2050 if ((!memcmp(efilter->mac_addr.addr_bytes, 2051 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 2052 mfilter->ethertype == efilter->ether_type && 2053 mfilter->flags == 2054 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 2055 match = 1; 2056 break; 2057 } 2058 } 2059 2060 if (match) 2061 *ret = -EEXIST; 2062 2063 exit: 2064 return mfilter; 2065 } 2066 2067 static int 2068 bnxt_ethertype_filter(struct rte_eth_dev *dev, 2069 enum rte_filter_op filter_op, 2070 void *arg) 2071 { 2072 struct bnxt *bp = dev->data->dev_private; 2073 struct rte_eth_ethertype_filter *efilter = 2074 (struct rte_eth_ethertype_filter *)arg; 2075 struct bnxt_filter_info *bfilter, *filter1; 2076 struct bnxt_vnic_info *vnic, *vnic0; 2077 int ret; 2078 2079 if (filter_op == RTE_ETH_FILTER_NOP) 2080 return 0; 2081 2082 if (arg == NULL) { 2083 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 2084 filter_op); 2085 return -EINVAL; 2086 } 2087 2088 vnic0 = &bp->vnic_info[0]; 2089 vnic = &bp->vnic_info[efilter->queue]; 2090 2091 switch (filter_op) { 2092 case RTE_ETH_FILTER_ADD: 2093 bnxt_match_and_validate_ether_filter(bp, efilter, 2094 vnic0, vnic, &ret); 2095 if (ret < 0) 2096 return ret; 2097 2098 bfilter = bnxt_get_unused_filter(bp); 2099 if (bfilter == NULL) { 2100 PMD_DRV_LOG(ERR, 2101 "Not enough resources for a new filter.\n"); 2102 return -ENOMEM; 2103 } 2104 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2105 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 2106 RTE_ETHER_ADDR_LEN); 2107 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 2108 RTE_ETHER_ADDR_LEN); 2109 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2110 bfilter->ethertype = efilter->ether_type; 2111 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2112 2113 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 2114 if (filter1 == NULL) { 2115 ret = -EINVAL; 2116 goto cleanup; 2117 } 2118 bfilter->enables |= 2119 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2120 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2121 2122 bfilter->dst_id = vnic->fw_vnic_id; 2123 2124 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 2125 bfilter->flags = 2126 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2127 } 2128 2129 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2130 if (ret) 2131 goto cleanup; 2132 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2133 break; 2134 case RTE_ETH_FILTER_DELETE: 2135 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 2136 vnic0, vnic, &ret); 2137 if (ret == -EEXIST) { 2138 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 2139 2140 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 2141 next); 2142 bnxt_free_filter(bp, filter1); 2143 } else if (ret == 0) { 2144 PMD_DRV_LOG(ERR, "No matching filter found\n"); 2145 } 2146 break; 2147 default: 2148 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 2149 ret = -EINVAL; 2150 goto error; 2151 } 2152 return ret; 2153 cleanup: 2154 bnxt_free_filter(bp, bfilter); 2155 error: 2156 return ret; 2157 } 2158 2159 static inline int 2160 parse_ntuple_filter(struct bnxt *bp, 2161 struct rte_eth_ntuple_filter *nfilter, 2162 struct bnxt_filter_info *bfilter) 2163 { 2164 uint32_t en = 0; 2165 2166 if (nfilter->queue >= bp->rx_nr_rings) { 2167 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); 2168 return -EINVAL; 2169 } 2170 2171 switch (nfilter->dst_port_mask) { 2172 case UINT16_MAX: 2173 bfilter->dst_port_mask = -1; 2174 bfilter->dst_port = nfilter->dst_port; 2175 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 2176 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2177 break; 2178 default: 2179 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 2180 return -EINVAL; 2181 } 2182 2183 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2184 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2185 2186 switch (nfilter->proto_mask) { 2187 case UINT8_MAX: 2188 if (nfilter->proto == 17) /* IPPROTO_UDP */ 2189 bfilter->ip_protocol = 17; 2190 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 2191 bfilter->ip_protocol = 6; 2192 else 2193 return -EINVAL; 2194 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2195 break; 2196 default: 2197 PMD_DRV_LOG(ERR, "invalid protocol mask."); 2198 return -EINVAL; 2199 } 2200 2201 switch (nfilter->dst_ip_mask) { 2202 case UINT32_MAX: 2203 bfilter->dst_ipaddr_mask[0] = -1; 2204 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 2205 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 2206 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2207 break; 2208 default: 2209 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 2210 return -EINVAL; 2211 } 2212 2213 switch (nfilter->src_ip_mask) { 2214 case UINT32_MAX: 2215 bfilter->src_ipaddr_mask[0] = -1; 2216 bfilter->src_ipaddr[0] = nfilter->src_ip; 2217 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 2218 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2219 break; 2220 default: 2221 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 2222 return -EINVAL; 2223 } 2224 2225 switch (nfilter->src_port_mask) { 2226 case UINT16_MAX: 2227 bfilter->src_port_mask = -1; 2228 bfilter->src_port = nfilter->src_port; 2229 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 2230 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2231 break; 2232 default: 2233 PMD_DRV_LOG(ERR, "invalid src_port mask."); 2234 return -EINVAL; 2235 } 2236 2237 //TODO Priority 2238 //nfilter->priority = (uint8_t)filter->priority; 2239 2240 bfilter->enables = en; 2241 return 0; 2242 } 2243 2244 static struct bnxt_filter_info* 2245 bnxt_match_ntuple_filter(struct bnxt *bp, 2246 struct bnxt_filter_info *bfilter, 2247 struct bnxt_vnic_info **mvnic) 2248 { 2249 struct bnxt_filter_info *mfilter = NULL; 2250 int i; 2251 2252 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2253 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2254 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 2255 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 2256 bfilter->src_ipaddr_mask[0] == 2257 mfilter->src_ipaddr_mask[0] && 2258 bfilter->src_port == mfilter->src_port && 2259 bfilter->src_port_mask == mfilter->src_port_mask && 2260 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 2261 bfilter->dst_ipaddr_mask[0] == 2262 mfilter->dst_ipaddr_mask[0] && 2263 bfilter->dst_port == mfilter->dst_port && 2264 bfilter->dst_port_mask == mfilter->dst_port_mask && 2265 bfilter->flags == mfilter->flags && 2266 bfilter->enables == mfilter->enables) { 2267 if (mvnic) 2268 *mvnic = vnic; 2269 return mfilter; 2270 } 2271 } 2272 } 2273 return NULL; 2274 } 2275 2276 static int 2277 bnxt_cfg_ntuple_filter(struct bnxt *bp, 2278 struct rte_eth_ntuple_filter *nfilter, 2279 enum rte_filter_op filter_op) 2280 { 2281 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 2282 struct bnxt_vnic_info *vnic, *vnic0, *mvnic; 2283 int ret; 2284 2285 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 2286 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 2287 return -EINVAL; 2288 } 2289 2290 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 2291 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); 2292 return -EINVAL; 2293 } 2294 2295 bfilter = bnxt_get_unused_filter(bp); 2296 if (bfilter == NULL) { 2297 PMD_DRV_LOG(ERR, 2298 "Not enough resources for a new filter.\n"); 2299 return -ENOMEM; 2300 } 2301 ret = parse_ntuple_filter(bp, nfilter, bfilter); 2302 if (ret < 0) 2303 goto free_filter; 2304 2305 vnic = &bp->vnic_info[nfilter->queue]; 2306 vnic0 = &bp->vnic_info[0]; 2307 filter1 = STAILQ_FIRST(&vnic0->filter); 2308 if (filter1 == NULL) { 2309 ret = -EINVAL; 2310 goto free_filter; 2311 } 2312 2313 bfilter->dst_id = vnic->fw_vnic_id; 2314 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2315 bfilter->enables |= 2316 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2317 bfilter->ethertype = 0x800; 2318 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2319 2320 mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); 2321 2322 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2323 bfilter->dst_id == mfilter->dst_id) { 2324 PMD_DRV_LOG(ERR, "filter exists.\n"); 2325 ret = -EEXIST; 2326 goto free_filter; 2327 } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2328 bfilter->dst_id != mfilter->dst_id) { 2329 mfilter->dst_id = vnic->fw_vnic_id; 2330 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); 2331 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); 2332 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); 2333 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); 2334 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); 2335 goto free_filter; 2336 } 2337 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2338 PMD_DRV_LOG(ERR, "filter doesn't exist."); 2339 ret = -ENOENT; 2340 goto free_filter; 2341 } 2342 2343 if (filter_op == RTE_ETH_FILTER_ADD) { 2344 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2345 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2346 if (ret) 2347 goto free_filter; 2348 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2349 } else { 2350 if (mfilter == NULL) { 2351 /* This should not happen. But for Coverity! */ 2352 ret = -ENOENT; 2353 goto free_filter; 2354 } 2355 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 2356 2357 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); 2358 bnxt_free_filter(bp, mfilter); 2359 mfilter->fw_l2_filter_id = -1; 2360 bnxt_free_filter(bp, bfilter); 2361 bfilter->fw_l2_filter_id = -1; 2362 } 2363 2364 return 0; 2365 free_filter: 2366 bfilter->fw_l2_filter_id = -1; 2367 bnxt_free_filter(bp, bfilter); 2368 return ret; 2369 } 2370 2371 static int 2372 bnxt_ntuple_filter(struct rte_eth_dev *dev, 2373 enum rte_filter_op filter_op, 2374 void *arg) 2375 { 2376 struct bnxt *bp = dev->data->dev_private; 2377 int ret; 2378 2379 if (filter_op == RTE_ETH_FILTER_NOP) 2380 return 0; 2381 2382 if (arg == NULL) { 2383 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 2384 filter_op); 2385 return -EINVAL; 2386 } 2387 2388 switch (filter_op) { 2389 case RTE_ETH_FILTER_ADD: 2390 ret = bnxt_cfg_ntuple_filter(bp, 2391 (struct rte_eth_ntuple_filter *)arg, 2392 filter_op); 2393 break; 2394 case RTE_ETH_FILTER_DELETE: 2395 ret = bnxt_cfg_ntuple_filter(bp, 2396 (struct rte_eth_ntuple_filter *)arg, 2397 filter_op); 2398 break; 2399 default: 2400 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 2401 ret = -EINVAL; 2402 break; 2403 } 2404 return ret; 2405 } 2406 2407 static int 2408 bnxt_parse_fdir_filter(struct bnxt *bp, 2409 struct rte_eth_fdir_filter *fdir, 2410 struct bnxt_filter_info *filter) 2411 { 2412 enum rte_fdir_mode fdir_mode = 2413 bp->eth_dev->data->dev_conf.fdir_conf.mode; 2414 struct bnxt_vnic_info *vnic0, *vnic; 2415 struct bnxt_filter_info *filter1; 2416 uint32_t en = 0; 2417 int i; 2418 2419 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2420 return -EINVAL; 2421 2422 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 2423 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 2424 2425 switch (fdir->input.flow_type) { 2426 case RTE_ETH_FLOW_IPV4: 2427 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2428 /* FALLTHROUGH */ 2429 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 2430 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2431 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 2432 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2433 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 2434 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2435 filter->ip_addr_type = 2436 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2437 filter->src_ipaddr_mask[0] = 0xffffffff; 2438 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2439 filter->dst_ipaddr_mask[0] = 0xffffffff; 2440 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2441 filter->ethertype = 0x800; 2442 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2443 break; 2444 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2445 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 2446 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2447 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 2448 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2449 filter->dst_port_mask = 0xffff; 2450 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2451 filter->src_port_mask = 0xffff; 2452 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2453 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 2454 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2455 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 2456 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2457 filter->ip_protocol = 6; 2458 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2459 filter->ip_addr_type = 2460 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2461 filter->src_ipaddr_mask[0] = 0xffffffff; 2462 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2463 filter->dst_ipaddr_mask[0] = 0xffffffff; 2464 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2465 filter->ethertype = 0x800; 2466 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2467 break; 2468 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2469 filter->src_port = fdir->input.flow.udp4_flow.src_port; 2470 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2471 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 2472 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2473 filter->dst_port_mask = 0xffff; 2474 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2475 filter->src_port_mask = 0xffff; 2476 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2477 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 2478 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2479 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 2480 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2481 filter->ip_protocol = 17; 2482 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2483 filter->ip_addr_type = 2484 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2485 filter->src_ipaddr_mask[0] = 0xffffffff; 2486 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2487 filter->dst_ipaddr_mask[0] = 0xffffffff; 2488 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2489 filter->ethertype = 0x800; 2490 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2491 break; 2492 case RTE_ETH_FLOW_IPV6: 2493 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2494 /* FALLTHROUGH */ 2495 filter->ip_addr_type = 2496 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2497 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 2498 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2499 rte_memcpy(filter->src_ipaddr, 2500 fdir->input.flow.ipv6_flow.src_ip, 16); 2501 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2502 rte_memcpy(filter->dst_ipaddr, 2503 fdir->input.flow.ipv6_flow.dst_ip, 16); 2504 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2505 memset(filter->dst_ipaddr_mask, 0xff, 16); 2506 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2507 memset(filter->src_ipaddr_mask, 0xff, 16); 2508 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2509 filter->ethertype = 0x86dd; 2510 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2511 break; 2512 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2513 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 2514 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2515 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 2516 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2517 filter->dst_port_mask = 0xffff; 2518 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2519 filter->src_port_mask = 0xffff; 2520 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2521 filter->ip_addr_type = 2522 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2523 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 2524 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2525 rte_memcpy(filter->src_ipaddr, 2526 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 2527 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2528 rte_memcpy(filter->dst_ipaddr, 2529 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 2530 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2531 memset(filter->dst_ipaddr_mask, 0xff, 16); 2532 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2533 memset(filter->src_ipaddr_mask, 0xff, 16); 2534 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2535 filter->ethertype = 0x86dd; 2536 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2537 break; 2538 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2539 filter->src_port = fdir->input.flow.udp6_flow.src_port; 2540 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2541 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 2542 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2543 filter->dst_port_mask = 0xffff; 2544 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2545 filter->src_port_mask = 0xffff; 2546 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2547 filter->ip_addr_type = 2548 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2549 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 2550 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2551 rte_memcpy(filter->src_ipaddr, 2552 fdir->input.flow.udp6_flow.ip.src_ip, 16); 2553 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2554 rte_memcpy(filter->dst_ipaddr, 2555 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 2556 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2557 memset(filter->dst_ipaddr_mask, 0xff, 16); 2558 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2559 memset(filter->src_ipaddr_mask, 0xff, 16); 2560 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2561 filter->ethertype = 0x86dd; 2562 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2563 break; 2564 case RTE_ETH_FLOW_L2_PAYLOAD: 2565 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 2566 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2567 break; 2568 case RTE_ETH_FLOW_VXLAN: 2569 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2570 return -EINVAL; 2571 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2572 filter->tunnel_type = 2573 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 2574 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2575 break; 2576 case RTE_ETH_FLOW_NVGRE: 2577 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2578 return -EINVAL; 2579 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2580 filter->tunnel_type = 2581 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 2582 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2583 break; 2584 case RTE_ETH_FLOW_UNKNOWN: 2585 case RTE_ETH_FLOW_RAW: 2586 case RTE_ETH_FLOW_FRAG_IPV4: 2587 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 2588 case RTE_ETH_FLOW_FRAG_IPV6: 2589 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 2590 case RTE_ETH_FLOW_IPV6_EX: 2591 case RTE_ETH_FLOW_IPV6_TCP_EX: 2592 case RTE_ETH_FLOW_IPV6_UDP_EX: 2593 case RTE_ETH_FLOW_GENEVE: 2594 /* FALLTHROUGH */ 2595 default: 2596 return -EINVAL; 2597 } 2598 2599 vnic0 = &bp->vnic_info[0]; 2600 vnic = &bp->vnic_info[fdir->action.rx_queue]; 2601 if (vnic == NULL) { 2602 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); 2603 return -EINVAL; 2604 } 2605 2606 2607 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2608 rte_memcpy(filter->dst_macaddr, 2609 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 2610 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2611 } 2612 2613 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 2614 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2615 filter1 = STAILQ_FIRST(&vnic0->filter); 2616 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 2617 } else { 2618 filter->dst_id = vnic->fw_vnic_id; 2619 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2620 if (filter->dst_macaddr[i] == 0x00) 2621 filter1 = STAILQ_FIRST(&vnic0->filter); 2622 else 2623 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 2624 } 2625 2626 if (filter1 == NULL) 2627 return -EINVAL; 2628 2629 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2630 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2631 2632 filter->enables = en; 2633 2634 return 0; 2635 } 2636 2637 static struct bnxt_filter_info * 2638 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, 2639 struct bnxt_vnic_info **mvnic) 2640 { 2641 struct bnxt_filter_info *mf = NULL; 2642 int i; 2643 2644 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2645 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2646 2647 STAILQ_FOREACH(mf, &vnic->filter, next) { 2648 if (mf->filter_type == nf->filter_type && 2649 mf->flags == nf->flags && 2650 mf->src_port == nf->src_port && 2651 mf->src_port_mask == nf->src_port_mask && 2652 mf->dst_port == nf->dst_port && 2653 mf->dst_port_mask == nf->dst_port_mask && 2654 mf->ip_protocol == nf->ip_protocol && 2655 mf->ip_addr_type == nf->ip_addr_type && 2656 mf->ethertype == nf->ethertype && 2657 mf->vni == nf->vni && 2658 mf->tunnel_type == nf->tunnel_type && 2659 mf->l2_ovlan == nf->l2_ovlan && 2660 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 2661 mf->l2_ivlan == nf->l2_ivlan && 2662 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 2663 !memcmp(mf->l2_addr, nf->l2_addr, 2664 RTE_ETHER_ADDR_LEN) && 2665 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 2666 RTE_ETHER_ADDR_LEN) && 2667 !memcmp(mf->src_macaddr, nf->src_macaddr, 2668 RTE_ETHER_ADDR_LEN) && 2669 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 2670 RTE_ETHER_ADDR_LEN) && 2671 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 2672 sizeof(nf->src_ipaddr)) && 2673 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 2674 sizeof(nf->src_ipaddr_mask)) && 2675 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 2676 sizeof(nf->dst_ipaddr)) && 2677 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 2678 sizeof(nf->dst_ipaddr_mask))) { 2679 if (mvnic) 2680 *mvnic = vnic; 2681 return mf; 2682 } 2683 } 2684 } 2685 return NULL; 2686 } 2687 2688 static int 2689 bnxt_fdir_filter(struct rte_eth_dev *dev, 2690 enum rte_filter_op filter_op, 2691 void *arg) 2692 { 2693 struct bnxt *bp = dev->data->dev_private; 2694 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 2695 struct bnxt_filter_info *filter, *match; 2696 struct bnxt_vnic_info *vnic, *mvnic; 2697 int ret = 0, i; 2698 2699 if (filter_op == RTE_ETH_FILTER_NOP) 2700 return 0; 2701 2702 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 2703 return -EINVAL; 2704 2705 switch (filter_op) { 2706 case RTE_ETH_FILTER_ADD: 2707 case RTE_ETH_FILTER_DELETE: 2708 /* FALLTHROUGH */ 2709 filter = bnxt_get_unused_filter(bp); 2710 if (filter == NULL) { 2711 PMD_DRV_LOG(ERR, 2712 "Not enough resources for a new flow.\n"); 2713 return -ENOMEM; 2714 } 2715 2716 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 2717 if (ret != 0) 2718 goto free_filter; 2719 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2720 2721 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2722 vnic = &bp->vnic_info[0]; 2723 else 2724 vnic = &bp->vnic_info[fdir->action.rx_queue]; 2725 2726 match = bnxt_match_fdir(bp, filter, &mvnic); 2727 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2728 if (match->dst_id == vnic->fw_vnic_id) { 2729 PMD_DRV_LOG(ERR, "Flow already exists.\n"); 2730 ret = -EEXIST; 2731 goto free_filter; 2732 } else { 2733 match->dst_id = vnic->fw_vnic_id; 2734 ret = bnxt_hwrm_set_ntuple_filter(bp, 2735 match->dst_id, 2736 match); 2737 STAILQ_REMOVE(&mvnic->filter, match, 2738 bnxt_filter_info, next); 2739 STAILQ_INSERT_TAIL(&vnic->filter, match, next); 2740 PMD_DRV_LOG(ERR, 2741 "Filter with matching pattern exist\n"); 2742 PMD_DRV_LOG(ERR, 2743 "Updated it to new destination q\n"); 2744 goto free_filter; 2745 } 2746 } 2747 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2748 PMD_DRV_LOG(ERR, "Flow does not exist.\n"); 2749 ret = -ENOENT; 2750 goto free_filter; 2751 } 2752 2753 if (filter_op == RTE_ETH_FILTER_ADD) { 2754 ret = bnxt_hwrm_set_ntuple_filter(bp, 2755 filter->dst_id, 2756 filter); 2757 if (ret) 2758 goto free_filter; 2759 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2760 } else { 2761 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 2762 STAILQ_REMOVE(&vnic->filter, match, 2763 bnxt_filter_info, next); 2764 bnxt_free_filter(bp, match); 2765 filter->fw_l2_filter_id = -1; 2766 bnxt_free_filter(bp, filter); 2767 } 2768 break; 2769 case RTE_ETH_FILTER_FLUSH: 2770 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2771 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2772 2773 STAILQ_FOREACH(filter, &vnic->filter, next) { 2774 if (filter->filter_type == 2775 HWRM_CFA_NTUPLE_FILTER) { 2776 ret = 2777 bnxt_hwrm_clear_ntuple_filter(bp, 2778 filter); 2779 STAILQ_REMOVE(&vnic->filter, filter, 2780 bnxt_filter_info, next); 2781 } 2782 } 2783 } 2784 return ret; 2785 case RTE_ETH_FILTER_UPDATE: 2786 case RTE_ETH_FILTER_STATS: 2787 case RTE_ETH_FILTER_INFO: 2788 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); 2789 break; 2790 default: 2791 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); 2792 ret = -EINVAL; 2793 break; 2794 } 2795 return ret; 2796 2797 free_filter: 2798 filter->fw_l2_filter_id = -1; 2799 bnxt_free_filter(bp, filter); 2800 return ret; 2801 } 2802 2803 static int 2804 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, 2805 enum rte_filter_type filter_type, 2806 enum rte_filter_op filter_op, void *arg) 2807 { 2808 int ret = 0; 2809 2810 switch (filter_type) { 2811 case RTE_ETH_FILTER_TUNNEL: 2812 PMD_DRV_LOG(ERR, 2813 "filter type: %d: To be implemented\n", filter_type); 2814 break; 2815 case RTE_ETH_FILTER_FDIR: 2816 ret = bnxt_fdir_filter(dev, filter_op, arg); 2817 break; 2818 case RTE_ETH_FILTER_NTUPLE: 2819 ret = bnxt_ntuple_filter(dev, filter_op, arg); 2820 break; 2821 case RTE_ETH_FILTER_ETHERTYPE: 2822 ret = bnxt_ethertype_filter(dev, filter_op, arg); 2823 break; 2824 case RTE_ETH_FILTER_GENERIC: 2825 if (filter_op != RTE_ETH_FILTER_GET) 2826 return -EINVAL; 2827 *(const void **)arg = &bnxt_flow_ops; 2828 break; 2829 default: 2830 PMD_DRV_LOG(ERR, 2831 "Filter type (%d) not supported", filter_type); 2832 ret = -EINVAL; 2833 break; 2834 } 2835 return ret; 2836 } 2837 2838 static const uint32_t * 2839 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 2840 { 2841 static const uint32_t ptypes[] = { 2842 RTE_PTYPE_L2_ETHER_VLAN, 2843 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2844 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2845 RTE_PTYPE_L4_ICMP, 2846 RTE_PTYPE_L4_TCP, 2847 RTE_PTYPE_L4_UDP, 2848 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2849 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2850 RTE_PTYPE_INNER_L4_ICMP, 2851 RTE_PTYPE_INNER_L4_TCP, 2852 RTE_PTYPE_INNER_L4_UDP, 2853 RTE_PTYPE_UNKNOWN 2854 }; 2855 2856 if (!dev->rx_pkt_burst) 2857 return NULL; 2858 2859 return ptypes; 2860 } 2861 2862 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 2863 int reg_win) 2864 { 2865 uint32_t reg_base = *reg_arr & 0xfffff000; 2866 uint32_t win_off; 2867 int i; 2868 2869 for (i = 0; i < count; i++) { 2870 if ((reg_arr[i] & 0xfffff000) != reg_base) 2871 return -ERANGE; 2872 } 2873 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 2874 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 2875 return 0; 2876 } 2877 2878 static int bnxt_map_ptp_regs(struct bnxt *bp) 2879 { 2880 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2881 uint32_t *reg_arr; 2882 int rc, i; 2883 2884 reg_arr = ptp->rx_regs; 2885 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 2886 if (rc) 2887 return rc; 2888 2889 reg_arr = ptp->tx_regs; 2890 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 2891 if (rc) 2892 return rc; 2893 2894 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 2895 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 2896 2897 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 2898 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 2899 2900 return 0; 2901 } 2902 2903 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 2904 { 2905 rte_write32(0, (uint8_t *)bp->bar0 + 2906 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 2907 rte_write32(0, (uint8_t *)bp->bar0 + 2908 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 2909 } 2910 2911 static uint64_t bnxt_cc_read(struct bnxt *bp) 2912 { 2913 uint64_t ns; 2914 2915 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2916 BNXT_GRCPF_REG_SYNC_TIME)); 2917 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2918 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 2919 return ns; 2920 } 2921 2922 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 2923 { 2924 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2925 uint32_t fifo; 2926 2927 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2928 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2929 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 2930 return -EAGAIN; 2931 2932 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2933 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2934 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2935 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 2936 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2937 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 2938 2939 return 0; 2940 } 2941 2942 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 2943 { 2944 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2945 struct bnxt_pf_info *pf = &bp->pf; 2946 uint16_t port_id; 2947 uint32_t fifo; 2948 2949 if (!ptp) 2950 return -ENODEV; 2951 2952 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2953 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 2954 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 2955 return -EAGAIN; 2956 2957 port_id = pf->port_id; 2958 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 2959 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 2960 2961 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2962 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 2963 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 2964 /* bnxt_clr_rx_ts(bp); TBD */ 2965 return -EBUSY; 2966 } 2967 2968 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2969 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 2970 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2971 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 2972 2973 return 0; 2974 } 2975 2976 static int 2977 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 2978 { 2979 uint64_t ns; 2980 struct bnxt *bp = dev->data->dev_private; 2981 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2982 2983 if (!ptp) 2984 return 0; 2985 2986 ns = rte_timespec_to_ns(ts); 2987 /* Set the timecounters to a new value. */ 2988 ptp->tc.nsec = ns; 2989 2990 return 0; 2991 } 2992 2993 static int 2994 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 2995 { 2996 uint64_t ns, systime_cycles; 2997 struct bnxt *bp = dev->data->dev_private; 2998 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2999 3000 if (!ptp) 3001 return 0; 3002 3003 systime_cycles = bnxt_cc_read(bp); 3004 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3005 *ts = rte_ns_to_timespec(ns); 3006 3007 return 0; 3008 } 3009 static int 3010 bnxt_timesync_enable(struct rte_eth_dev *dev) 3011 { 3012 struct bnxt *bp = dev->data->dev_private; 3013 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3014 uint32_t shift = 0; 3015 3016 if (!ptp) 3017 return 0; 3018 3019 ptp->rx_filter = 1; 3020 ptp->tx_tstamp_en = 1; 3021 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3022 3023 if (!bnxt_hwrm_ptp_cfg(bp)) 3024 bnxt_map_ptp_regs(bp); 3025 3026 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3027 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3028 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3029 3030 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3031 ptp->tc.cc_shift = shift; 3032 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3033 3034 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3035 ptp->rx_tstamp_tc.cc_shift = shift; 3036 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3037 3038 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3039 ptp->tx_tstamp_tc.cc_shift = shift; 3040 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3041 3042 return 0; 3043 } 3044 3045 static int 3046 bnxt_timesync_disable(struct rte_eth_dev *dev) 3047 { 3048 struct bnxt *bp = dev->data->dev_private; 3049 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3050 3051 if (!ptp) 3052 return 0; 3053 3054 ptp->rx_filter = 0; 3055 ptp->tx_tstamp_en = 0; 3056 ptp->rxctl = 0; 3057 3058 bnxt_hwrm_ptp_cfg(bp); 3059 3060 bnxt_unmap_ptp_regs(bp); 3061 3062 return 0; 3063 } 3064 3065 static int 3066 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3067 struct timespec *timestamp, 3068 uint32_t flags __rte_unused) 3069 { 3070 struct bnxt *bp = dev->data->dev_private; 3071 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3072 uint64_t rx_tstamp_cycles = 0; 3073 uint64_t ns; 3074 3075 if (!ptp) 3076 return 0; 3077 3078 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3079 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3080 *timestamp = rte_ns_to_timespec(ns); 3081 return 0; 3082 } 3083 3084 static int 3085 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3086 struct timespec *timestamp) 3087 { 3088 struct bnxt *bp = dev->data->dev_private; 3089 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3090 uint64_t tx_tstamp_cycles = 0; 3091 uint64_t ns; 3092 3093 if (!ptp) 3094 return 0; 3095 3096 bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3097 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3098 *timestamp = rte_ns_to_timespec(ns); 3099 3100 return 0; 3101 } 3102 3103 static int 3104 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3105 { 3106 struct bnxt *bp = dev->data->dev_private; 3107 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3108 3109 if (!ptp) 3110 return 0; 3111 3112 ptp->tc.nsec += delta; 3113 3114 return 0; 3115 } 3116 3117 static int 3118 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3119 { 3120 struct bnxt *bp = dev->data->dev_private; 3121 int rc; 3122 uint32_t dir_entries; 3123 uint32_t entry_length; 3124 3125 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n", 3126 bp->pdev->addr.domain, bp->pdev->addr.bus, 3127 bp->pdev->addr.devid, bp->pdev->addr.function); 3128 3129 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3130 if (rc != 0) 3131 return rc; 3132 3133 return dir_entries * entry_length; 3134 } 3135 3136 static int 3137 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3138 struct rte_dev_eeprom_info *in_eeprom) 3139 { 3140 struct bnxt *bp = dev->data->dev_private; 3141 uint32_t index; 3142 uint32_t offset; 3143 3144 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " 3145 "len = %d\n", bp->pdev->addr.domain, 3146 bp->pdev->addr.bus, bp->pdev->addr.devid, 3147 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 3148 3149 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3150 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3151 in_eeprom->data); 3152 3153 index = in_eeprom->offset >> 24; 3154 offset = in_eeprom->offset & 0xffffff; 3155 3156 if (index != 0) 3157 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3158 in_eeprom->length, in_eeprom->data); 3159 3160 return 0; 3161 } 3162 3163 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3164 { 3165 switch (dir_type) { 3166 case BNX_DIR_TYPE_CHIMP_PATCH: 3167 case BNX_DIR_TYPE_BOOTCODE: 3168 case BNX_DIR_TYPE_BOOTCODE_2: 3169 case BNX_DIR_TYPE_APE_FW: 3170 case BNX_DIR_TYPE_APE_PATCH: 3171 case BNX_DIR_TYPE_KONG_FW: 3172 case BNX_DIR_TYPE_KONG_PATCH: 3173 case BNX_DIR_TYPE_BONO_FW: 3174 case BNX_DIR_TYPE_BONO_PATCH: 3175 /* FALLTHROUGH */ 3176 return true; 3177 } 3178 3179 return false; 3180 } 3181 3182 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3183 { 3184 switch (dir_type) { 3185 case BNX_DIR_TYPE_AVS: 3186 case BNX_DIR_TYPE_EXP_ROM_MBA: 3187 case BNX_DIR_TYPE_PCIE: 3188 case BNX_DIR_TYPE_TSCF_UCODE: 3189 case BNX_DIR_TYPE_EXT_PHY: 3190 case BNX_DIR_TYPE_CCM: 3191 case BNX_DIR_TYPE_ISCSI_BOOT: 3192 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3193 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3194 /* FALLTHROUGH */ 3195 return true; 3196 } 3197 3198 return false; 3199 } 3200 3201 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3202 { 3203 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3204 bnxt_dir_type_is_other_exec_format(dir_type); 3205 } 3206 3207 static int 3208 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3209 struct rte_dev_eeprom_info *in_eeprom) 3210 { 3211 struct bnxt *bp = dev->data->dev_private; 3212 uint8_t index, dir_op; 3213 uint16_t type, ext, ordinal, attr; 3214 3215 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " 3216 "len = %d\n", bp->pdev->addr.domain, 3217 bp->pdev->addr.bus, bp->pdev->addr.devid, 3218 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 3219 3220 if (!BNXT_PF(bp)) { 3221 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3222 return -EINVAL; 3223 } 3224 3225 type = in_eeprom->magic >> 16; 3226 3227 if (type == 0xffff) { /* special value for directory operations */ 3228 index = in_eeprom->magic & 0xff; 3229 dir_op = in_eeprom->magic >> 8; 3230 if (index == 0) 3231 return -EINVAL; 3232 switch (dir_op) { 3233 case 0x0e: /* erase */ 3234 if (in_eeprom->offset != ~in_eeprom->magic) 3235 return -EINVAL; 3236 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3237 default: 3238 return -EINVAL; 3239 } 3240 } 3241 3242 /* Create or re-write an NVM item: */ 3243 if (bnxt_dir_type_is_executable(type) == true) 3244 return -EOPNOTSUPP; 3245 ext = in_eeprom->magic & 0xffff; 3246 ordinal = in_eeprom->offset >> 16; 3247 attr = in_eeprom->offset & 0xffff; 3248 3249 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3250 in_eeprom->data, in_eeprom->length); 3251 } 3252 3253 /* 3254 * Initialization 3255 */ 3256 3257 static const struct eth_dev_ops bnxt_dev_ops = { 3258 .dev_infos_get = bnxt_dev_info_get_op, 3259 .dev_close = bnxt_dev_close_op, 3260 .dev_configure = bnxt_dev_configure_op, 3261 .dev_start = bnxt_dev_start_op, 3262 .dev_stop = bnxt_dev_stop_op, 3263 .dev_set_link_up = bnxt_dev_set_link_up_op, 3264 .dev_set_link_down = bnxt_dev_set_link_down_op, 3265 .stats_get = bnxt_stats_get_op, 3266 .stats_reset = bnxt_stats_reset_op, 3267 .rx_queue_setup = bnxt_rx_queue_setup_op, 3268 .rx_queue_release = bnxt_rx_queue_release_op, 3269 .tx_queue_setup = bnxt_tx_queue_setup_op, 3270 .tx_queue_release = bnxt_tx_queue_release_op, 3271 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 3272 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 3273 .reta_update = bnxt_reta_update_op, 3274 .reta_query = bnxt_reta_query_op, 3275 .rss_hash_update = bnxt_rss_hash_update_op, 3276 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 3277 .link_update = bnxt_link_update_op, 3278 .promiscuous_enable = bnxt_promiscuous_enable_op, 3279 .promiscuous_disable = bnxt_promiscuous_disable_op, 3280 .allmulticast_enable = bnxt_allmulticast_enable_op, 3281 .allmulticast_disable = bnxt_allmulticast_disable_op, 3282 .mac_addr_add = bnxt_mac_addr_add_op, 3283 .mac_addr_remove = bnxt_mac_addr_remove_op, 3284 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 3285 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 3286 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 3287 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 3288 .vlan_filter_set = bnxt_vlan_filter_set_op, 3289 .vlan_offload_set = bnxt_vlan_offload_set_op, 3290 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 3291 .mtu_set = bnxt_mtu_set_op, 3292 .mac_addr_set = bnxt_set_default_mac_addr_op, 3293 .xstats_get = bnxt_dev_xstats_get_op, 3294 .xstats_get_names = bnxt_dev_xstats_get_names_op, 3295 .xstats_reset = bnxt_dev_xstats_reset_op, 3296 .fw_version_get = bnxt_fw_version_get, 3297 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 3298 .rxq_info_get = bnxt_rxq_info_get_op, 3299 .txq_info_get = bnxt_txq_info_get_op, 3300 .dev_led_on = bnxt_dev_led_on_op, 3301 .dev_led_off = bnxt_dev_led_off_op, 3302 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 3303 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 3304 .rx_queue_count = bnxt_rx_queue_count_op, 3305 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 3306 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 3307 .rx_queue_start = bnxt_rx_queue_start, 3308 .rx_queue_stop = bnxt_rx_queue_stop, 3309 .tx_queue_start = bnxt_tx_queue_start, 3310 .tx_queue_stop = bnxt_tx_queue_stop, 3311 .filter_ctrl = bnxt_filter_ctrl_op, 3312 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 3313 .get_eeprom_length = bnxt_get_eeprom_length_op, 3314 .get_eeprom = bnxt_get_eeprom_op, 3315 .set_eeprom = bnxt_set_eeprom_op, 3316 .timesync_enable = bnxt_timesync_enable, 3317 .timesync_disable = bnxt_timesync_disable, 3318 .timesync_read_time = bnxt_timesync_read_time, 3319 .timesync_write_time = bnxt_timesync_write_time, 3320 .timesync_adjust_time = bnxt_timesync_adjust_time, 3321 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 3322 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 3323 }; 3324 3325 static bool bnxt_vf_pciid(uint16_t id) 3326 { 3327 if (id == BROADCOM_DEV_ID_57304_VF || 3328 id == BROADCOM_DEV_ID_57406_VF || 3329 id == BROADCOM_DEV_ID_5731X_VF || 3330 id == BROADCOM_DEV_ID_5741X_VF || 3331 id == BROADCOM_DEV_ID_57414_VF || 3332 id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || 3333 id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 || 3334 id == BROADCOM_DEV_ID_58802_VF || 3335 id == BROADCOM_DEV_ID_57500_VF1 || 3336 id == BROADCOM_DEV_ID_57500_VF2) 3337 return true; 3338 return false; 3339 } 3340 3341 bool bnxt_stratus_device(struct bnxt *bp) 3342 { 3343 uint16_t id = bp->pdev->id.device_id; 3344 3345 if (id == BROADCOM_DEV_ID_STRATUS_NIC || 3346 id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || 3347 id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) 3348 return true; 3349 return false; 3350 } 3351 3352 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 3353 { 3354 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3355 struct bnxt *bp = eth_dev->data->dev_private; 3356 3357 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 3358 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 3359 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 3360 if (!bp->bar0 || !bp->doorbell_base) { 3361 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 3362 return -ENODEV; 3363 } 3364 3365 bp->eth_dev = eth_dev; 3366 bp->pdev = pci_dev; 3367 3368 return 0; 3369 } 3370 3371 static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, 3372 struct bnxt_ctx_pg_info *ctx_pg, 3373 uint32_t mem_size, 3374 const char *suffix, 3375 uint16_t idx) 3376 { 3377 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 3378 const struct rte_memzone *mz = NULL; 3379 char mz_name[RTE_MEMZONE_NAMESIZE]; 3380 rte_iova_t mz_phys_addr; 3381 uint64_t valid_bits = 0; 3382 uint32_t sz; 3383 int i; 3384 3385 if (!mem_size) 3386 return 0; 3387 3388 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 3389 BNXT_PAGE_SIZE; 3390 rmem->page_size = BNXT_PAGE_SIZE; 3391 rmem->pg_arr = ctx_pg->ctx_pg_arr; 3392 rmem->dma_arr = ctx_pg->ctx_dma_arr; 3393 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 3394 3395 valid_bits = PTU_PTE_VALID; 3396 3397 if (rmem->nr_pages > 1) { 3398 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_tbl%s_%x", 3399 suffix, idx); 3400 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3401 mz = rte_memzone_lookup(mz_name); 3402 if (!mz) { 3403 mz = rte_memzone_reserve_aligned(mz_name, 3404 rmem->nr_pages * 8, 3405 SOCKET_ID_ANY, 3406 RTE_MEMZONE_2MB | 3407 RTE_MEMZONE_SIZE_HINT_ONLY | 3408 RTE_MEMZONE_IOVA_CONTIG, 3409 BNXT_PAGE_SIZE); 3410 if (mz == NULL) 3411 return -ENOMEM; 3412 } 3413 3414 memset(mz->addr, 0, mz->len); 3415 mz_phys_addr = mz->iova; 3416 if ((unsigned long)mz->addr == mz_phys_addr) { 3417 PMD_DRV_LOG(WARNING, 3418 "Memzone physical address same as virtual.\n"); 3419 PMD_DRV_LOG(WARNING, 3420 "Using rte_mem_virt2iova()\n"); 3421 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3422 if (mz_phys_addr == RTE_BAD_IOVA) { 3423 PMD_DRV_LOG(ERR, 3424 "unable to map addr to phys memory\n"); 3425 return -ENOMEM; 3426 } 3427 } 3428 rte_mem_lock_page(((char *)mz->addr)); 3429 3430 rmem->pg_tbl = mz->addr; 3431 rmem->pg_tbl_map = mz_phys_addr; 3432 rmem->pg_tbl_mz = mz; 3433 } 3434 3435 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x", suffix, idx); 3436 mz = rte_memzone_lookup(mz_name); 3437 if (!mz) { 3438 mz = rte_memzone_reserve_aligned(mz_name, 3439 mem_size, 3440 SOCKET_ID_ANY, 3441 RTE_MEMZONE_1GB | 3442 RTE_MEMZONE_SIZE_HINT_ONLY | 3443 RTE_MEMZONE_IOVA_CONTIG, 3444 BNXT_PAGE_SIZE); 3445 if (mz == NULL) 3446 return -ENOMEM; 3447 } 3448 3449 memset(mz->addr, 0, mz->len); 3450 mz_phys_addr = mz->iova; 3451 if ((unsigned long)mz->addr == mz_phys_addr) { 3452 PMD_DRV_LOG(WARNING, 3453 "Memzone physical address same as virtual.\n"); 3454 PMD_DRV_LOG(WARNING, 3455 "Using rte_mem_virt2iova()\n"); 3456 for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE) 3457 rte_mem_lock_page(((char *)mz->addr) + sz); 3458 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3459 if (mz_phys_addr == RTE_BAD_IOVA) { 3460 PMD_DRV_LOG(ERR, 3461 "unable to map addr to phys memory\n"); 3462 return -ENOMEM; 3463 } 3464 } 3465 3466 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 3467 rte_mem_lock_page(((char *)mz->addr) + sz); 3468 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 3469 rmem->dma_arr[i] = mz_phys_addr + sz; 3470 3471 if (rmem->nr_pages > 1) { 3472 if (i == rmem->nr_pages - 2 && 3473 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3474 valid_bits |= PTU_PTE_NEXT_TO_LAST; 3475 else if (i == rmem->nr_pages - 1 && 3476 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3477 valid_bits |= PTU_PTE_LAST; 3478 3479 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 3480 valid_bits); 3481 } 3482 } 3483 3484 rmem->mz = mz; 3485 if (rmem->vmem_size) 3486 rmem->vmem = (void **)mz->addr; 3487 rmem->dma_arr[0] = mz_phys_addr; 3488 return 0; 3489 } 3490 3491 static void bnxt_free_ctx_mem(struct bnxt *bp) 3492 { 3493 int i; 3494 3495 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 3496 return; 3497 3498 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 3499 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 3500 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 3501 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 3502 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 3503 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 3504 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 3505 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 3506 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 3507 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 3508 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 3509 3510 for (i = 0; i < BNXT_MAX_Q; i++) { 3511 if (bp->ctx->tqm_mem[i]) 3512 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 3513 } 3514 3515 rte_free(bp->ctx); 3516 bp->ctx = NULL; 3517 } 3518 3519 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 3520 3521 #define min_t(type, x, y) ({ \ 3522 type __min1 = (x); \ 3523 type __min2 = (y); \ 3524 __min1 < __min2 ? __min1 : __min2; }) 3525 3526 #define max_t(type, x, y) ({ \ 3527 type __max1 = (x); \ 3528 type __max2 = (y); \ 3529 __max1 > __max2 ? __max1 : __max2; }) 3530 3531 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 3532 3533 int bnxt_alloc_ctx_mem(struct bnxt *bp) 3534 { 3535 struct bnxt_ctx_pg_info *ctx_pg; 3536 struct bnxt_ctx_mem_info *ctx; 3537 uint32_t mem_size, ena, entries; 3538 int i, rc; 3539 3540 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 3541 if (rc) { 3542 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 3543 return rc; 3544 } 3545 ctx = bp->ctx; 3546 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 3547 return 0; 3548 3549 ctx_pg = &ctx->qp_mem; 3550 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 3551 mem_size = ctx->qp_entry_size * ctx_pg->entries; 3552 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 3553 if (rc) 3554 return rc; 3555 3556 ctx_pg = &ctx->srq_mem; 3557 ctx_pg->entries = ctx->srq_max_l2_entries; 3558 mem_size = ctx->srq_entry_size * ctx_pg->entries; 3559 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 3560 if (rc) 3561 return rc; 3562 3563 ctx_pg = &ctx->cq_mem; 3564 ctx_pg->entries = ctx->cq_max_l2_entries; 3565 mem_size = ctx->cq_entry_size * ctx_pg->entries; 3566 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 3567 if (rc) 3568 return rc; 3569 3570 ctx_pg = &ctx->vnic_mem; 3571 ctx_pg->entries = ctx->vnic_max_vnic_entries + 3572 ctx->vnic_max_ring_table_entries; 3573 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 3574 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 3575 if (rc) 3576 return rc; 3577 3578 ctx_pg = &ctx->stat_mem; 3579 ctx_pg->entries = ctx->stat_max_entries; 3580 mem_size = ctx->stat_entry_size * ctx_pg->entries; 3581 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 3582 if (rc) 3583 return rc; 3584 3585 entries = ctx->qp_max_l2_entries; 3586 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 3587 entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring, 3588 ctx->tqm_max_entries_per_ring); 3589 for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) { 3590 ctx_pg = ctx->tqm_mem[i]; 3591 /* use min tqm entries for now. */ 3592 ctx_pg->entries = entries; 3593 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 3594 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); 3595 if (rc) 3596 return rc; 3597 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 3598 } 3599 3600 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 3601 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 3602 if (rc) 3603 PMD_DRV_LOG(ERR, 3604 "Failed to configure context mem: rc = %d\n", rc); 3605 else 3606 ctx->flags |= BNXT_CTX_FLAG_INITED; 3607 3608 return rc; 3609 } 3610 3611 static int bnxt_alloc_stats_mem(struct bnxt *bp) 3612 { 3613 struct rte_pci_device *pci_dev = bp->pdev; 3614 char mz_name[RTE_MEMZONE_NAMESIZE]; 3615 const struct rte_memzone *mz = NULL; 3616 uint32_t total_alloc_len; 3617 rte_iova_t mz_phys_addr; 3618 3619 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 3620 return 0; 3621 3622 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3623 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 3624 pci_dev->addr.bus, pci_dev->addr.devid, 3625 pci_dev->addr.function, "rx_port_stats"); 3626 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3627 mz = rte_memzone_lookup(mz_name); 3628 total_alloc_len = 3629 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 3630 sizeof(struct rx_port_stats_ext) + 512); 3631 if (!mz) { 3632 mz = rte_memzone_reserve(mz_name, total_alloc_len, 3633 SOCKET_ID_ANY, 3634 RTE_MEMZONE_2MB | 3635 RTE_MEMZONE_SIZE_HINT_ONLY | 3636 RTE_MEMZONE_IOVA_CONTIG); 3637 if (mz == NULL) 3638 return -ENOMEM; 3639 } 3640 memset(mz->addr, 0, mz->len); 3641 mz_phys_addr = mz->iova; 3642 if ((unsigned long)mz->addr == mz_phys_addr) { 3643 PMD_DRV_LOG(WARNING, 3644 "Memzone physical address same as virtual.\n"); 3645 PMD_DRV_LOG(WARNING, 3646 "Using rte_mem_virt2iova()\n"); 3647 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3648 if (mz_phys_addr == RTE_BAD_IOVA) { 3649 PMD_DRV_LOG(ERR, 3650 "Can't map address to physical memory\n"); 3651 return -ENOMEM; 3652 } 3653 } 3654 3655 bp->rx_mem_zone = (const void *)mz; 3656 bp->hw_rx_port_stats = mz->addr; 3657 bp->hw_rx_port_stats_map = mz_phys_addr; 3658 3659 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3660 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 3661 pci_dev->addr.bus, pci_dev->addr.devid, 3662 pci_dev->addr.function, "tx_port_stats"); 3663 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3664 mz = rte_memzone_lookup(mz_name); 3665 total_alloc_len = 3666 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 3667 sizeof(struct tx_port_stats_ext) + 512); 3668 if (!mz) { 3669 mz = rte_memzone_reserve(mz_name, 3670 total_alloc_len, 3671 SOCKET_ID_ANY, 3672 RTE_MEMZONE_2MB | 3673 RTE_MEMZONE_SIZE_HINT_ONLY | 3674 RTE_MEMZONE_IOVA_CONTIG); 3675 if (mz == NULL) 3676 return -ENOMEM; 3677 } 3678 memset(mz->addr, 0, mz->len); 3679 mz_phys_addr = mz->iova; 3680 if ((unsigned long)mz->addr == mz_phys_addr) { 3681 PMD_DRV_LOG(WARNING, 3682 "Memzone physical address same as virtual\n"); 3683 PMD_DRV_LOG(WARNING, 3684 "Using rte_mem_virt2iova()\n"); 3685 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3686 if (mz_phys_addr == RTE_BAD_IOVA) { 3687 PMD_DRV_LOG(ERR, 3688 "Can't map address to physical memory\n"); 3689 return -ENOMEM; 3690 } 3691 } 3692 3693 bp->tx_mem_zone = (const void *)mz; 3694 bp->hw_tx_port_stats = mz->addr; 3695 bp->hw_tx_port_stats_map = mz_phys_addr; 3696 bp->flags |= BNXT_FLAG_PORT_STATS; 3697 3698 /* Display extended statistics if FW supports it */ 3699 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 3700 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 3701 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 3702 return 0; 3703 3704 bp->hw_rx_port_stats_ext = (void *) 3705 ((uint8_t *)bp->hw_rx_port_stats + 3706 sizeof(struct rx_port_stats)); 3707 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 3708 sizeof(struct rx_port_stats); 3709 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 3710 3711 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 3712 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 3713 bp->hw_tx_port_stats_ext = (void *) 3714 ((uint8_t *)bp->hw_tx_port_stats + 3715 sizeof(struct tx_port_stats)); 3716 bp->hw_tx_port_stats_ext_map = 3717 bp->hw_tx_port_stats_map + 3718 sizeof(struct tx_port_stats); 3719 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 3720 } 3721 3722 return 0; 3723 } 3724 3725 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 3726 { 3727 struct bnxt *bp = eth_dev->data->dev_private; 3728 int rc = 0; 3729 3730 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 3731 RTE_ETHER_ADDR_LEN * 3732 bp->max_l2_ctx, 3733 0); 3734 if (eth_dev->data->mac_addrs == NULL) { 3735 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 3736 return -ENOMEM; 3737 } 3738 3739 if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) { 3740 if (BNXT_PF(bp)) 3741 return -EINVAL; 3742 3743 /* Generate a random MAC address, if none was assigned by PF */ 3744 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 3745 bnxt_eth_hw_addr_random(bp->mac_addr); 3746 PMD_DRV_LOG(INFO, 3747 "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", 3748 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 3749 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 3750 3751 rc = bnxt_hwrm_set_mac(bp); 3752 if (!rc) 3753 memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr, 3754 RTE_ETHER_ADDR_LEN); 3755 return rc; 3756 } 3757 3758 /* Copy the permanent MAC from the FUNC_QCAPS response */ 3759 memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN); 3760 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 3761 3762 return rc; 3763 } 3764 3765 #define ALLOW_FUNC(x) \ 3766 { \ 3767 uint32_t arg = (x); \ 3768 bp->pf.vf_req_fwd[((arg) >> 5)] &= \ 3769 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 3770 } 3771 static int 3772 bnxt_dev_init(struct rte_eth_dev *eth_dev) 3773 { 3774 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3775 static int version_printed; 3776 struct bnxt *bp; 3777 uint16_t mtu; 3778 int rc; 3779 3780 if (version_printed++ == 0) 3781 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 3782 3783 rte_eth_copy_pci_info(eth_dev, pci_dev); 3784 3785 bp = eth_dev->data->dev_private; 3786 3787 bp->dev_stopped = 1; 3788 3789 eth_dev->dev_ops = &bnxt_dev_ops; 3790 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 3791 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 3792 3793 /* 3794 * For secondary processes, we don't initialise any further 3795 * as primary has already done this work. 3796 */ 3797 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3798 return 0; 3799 3800 if (bnxt_vf_pciid(pci_dev->id.device_id)) 3801 bp->flags |= BNXT_FLAG_VF; 3802 3803 if (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 || 3804 pci_dev->id.device_id == BROADCOM_DEV_ID_57504 || 3805 pci_dev->id.device_id == BROADCOM_DEV_ID_57502 || 3806 pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF1 || 3807 pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF2) 3808 bp->flags |= BNXT_FLAG_THOR_CHIP; 3809 3810 rc = bnxt_init_board(eth_dev); 3811 if (rc) { 3812 PMD_DRV_LOG(ERR, 3813 "Board initialization failed rc: %x\n", rc); 3814 goto error; 3815 } 3816 3817 rc = bnxt_alloc_hwrm_resources(bp); 3818 if (rc) { 3819 PMD_DRV_LOG(ERR, 3820 "hwrm resource allocation failure rc: %x\n", rc); 3821 goto error_free; 3822 } 3823 rc = bnxt_hwrm_ver_get(bp); 3824 if (rc) 3825 goto error_free; 3826 3827 rc = bnxt_hwrm_func_reset(bp); 3828 if (rc) { 3829 PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc); 3830 rc = -EIO; 3831 goto error_free; 3832 } 3833 3834 rc = bnxt_hwrm_queue_qportcfg(bp); 3835 if (rc) { 3836 PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n"); 3837 goto error_free; 3838 } 3839 /* Get the MAX capabilities for this function */ 3840 rc = bnxt_hwrm_func_qcaps(bp); 3841 if (rc) { 3842 PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc); 3843 goto error_free; 3844 } 3845 3846 rc = bnxt_alloc_stats_mem(bp); 3847 if (rc) 3848 goto error_free; 3849 3850 if (bp->max_tx_rings == 0) { 3851 PMD_DRV_LOG(ERR, "No TX rings available!\n"); 3852 rc = -EBUSY; 3853 goto error_free; 3854 } 3855 3856 rc = bnxt_setup_mac_addr(eth_dev); 3857 if (rc) 3858 goto error_free; 3859 3860 /* THOR does not support ring groups. 3861 * But we will use the array to save RSS context IDs. 3862 */ 3863 if (BNXT_CHIP_THOR(bp)) { 3864 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 3865 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) { 3866 /* 1 ring is for default completion ring */ 3867 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n"); 3868 rc = -ENOSPC; 3869 goto error_free; 3870 } 3871 3872 if (BNXT_HAS_RING_GRPS(bp)) { 3873 bp->grp_info = rte_zmalloc("bnxt_grp_info", 3874 sizeof(*bp->grp_info) * 3875 bp->max_ring_grps, 0); 3876 if (!bp->grp_info) { 3877 PMD_DRV_LOG(ERR, 3878 "Failed to alloc %zu bytes for grp info tbl.\n", 3879 sizeof(*bp->grp_info) * bp->max_ring_grps); 3880 rc = -ENOMEM; 3881 goto error_free; 3882 } 3883 } 3884 3885 /* Forward all requests if firmware is new enough */ 3886 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 3887 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 3888 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 3889 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); 3890 } else { 3891 PMD_DRV_LOG(WARNING, 3892 "Firmware too old for VF mailbox functionality\n"); 3893 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); 3894 } 3895 3896 /* 3897 * The following are used for driver cleanup. If we disallow these, 3898 * VF drivers can't clean up cleanly. 3899 */ 3900 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 3901 ALLOW_FUNC(HWRM_VNIC_FREE); 3902 ALLOW_FUNC(HWRM_RING_FREE); 3903 ALLOW_FUNC(HWRM_RING_GRP_FREE); 3904 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 3905 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 3906 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 3907 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 3908 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 3909 rc = bnxt_hwrm_func_driver_register(bp); 3910 if (rc) { 3911 PMD_DRV_LOG(ERR, 3912 "Failed to register driver"); 3913 rc = -EBUSY; 3914 goto error_free; 3915 } 3916 3917 PMD_DRV_LOG(INFO, 3918 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", 3919 pci_dev->mem_resource[0].phys_addr, 3920 pci_dev->mem_resource[0].addr); 3921 3922 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 3923 if (rc) { 3924 PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n"); 3925 goto error_free; 3926 } 3927 3928 if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU && 3929 mtu != eth_dev->data->mtu) 3930 eth_dev->data->mtu = mtu; 3931 3932 if (BNXT_PF(bp)) { 3933 //if (bp->pf.active_vfs) { 3934 // TODO: Deallocate VF resources? 3935 //} 3936 if (bp->pdev->max_vfs) { 3937 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 3938 if (rc) { 3939 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 3940 goto error_free; 3941 } 3942 } else { 3943 rc = bnxt_hwrm_allocate_pf_only(bp); 3944 if (rc) { 3945 PMD_DRV_LOG(ERR, 3946 "Failed to allocate PF resources\n"); 3947 goto error_free; 3948 } 3949 } 3950 } 3951 3952 bnxt_hwrm_port_led_qcaps(bp); 3953 3954 rc = bnxt_setup_int(bp); 3955 if (rc) 3956 goto error_free; 3957 3958 rc = bnxt_alloc_mem(bp); 3959 if (rc) 3960 goto error_free; 3961 3962 bnxt_init_nic(bp); 3963 3964 rc = bnxt_request_int(bp); 3965 if (rc) 3966 goto error_free; 3967 3968 return 0; 3969 3970 error_free: 3971 bnxt_dev_uninit(eth_dev); 3972 error: 3973 return rc; 3974 } 3975 3976 static int 3977 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 3978 { 3979 struct bnxt *bp = eth_dev->data->dev_private; 3980 int rc; 3981 3982 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3983 return -EPERM; 3984 3985 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 3986 bnxt_disable_int(bp); 3987 bnxt_free_int(bp); 3988 bnxt_free_mem(bp); 3989 3990 bnxt_hwrm_func_buf_unrgtr(bp); 3991 3992 if (bp->grp_info != NULL) { 3993 rte_free(bp->grp_info); 3994 bp->grp_info = NULL; 3995 } 3996 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 3997 bnxt_free_hwrm_resources(bp); 3998 3999 if (bp->tx_mem_zone) { 4000 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 4001 bp->tx_mem_zone = NULL; 4002 } 4003 4004 if (bp->rx_mem_zone) { 4005 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 4006 bp->rx_mem_zone = NULL; 4007 } 4008 4009 if (bp->dev_stopped == 0) 4010 bnxt_dev_close_op(eth_dev); 4011 if (bp->pf.vf_info) 4012 rte_free(bp->pf.vf_info); 4013 bnxt_free_ctx_mem(bp); 4014 eth_dev->dev_ops = NULL; 4015 eth_dev->rx_pkt_burst = NULL; 4016 eth_dev->tx_pkt_burst = NULL; 4017 4018 return rc; 4019 } 4020 4021 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 4022 struct rte_pci_device *pci_dev) 4023 { 4024 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), 4025 bnxt_dev_init); 4026 } 4027 4028 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 4029 { 4030 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4031 return rte_eth_dev_pci_generic_remove(pci_dev, 4032 bnxt_dev_uninit); 4033 else 4034 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 4035 } 4036 4037 static struct rte_pci_driver bnxt_rte_pmd = { 4038 .id_table = bnxt_pci_id_map, 4039 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 4040 .probe = bnxt_pci_probe, 4041 .remove = bnxt_pci_remove, 4042 }; 4043 4044 static bool 4045 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 4046 { 4047 if (strcmp(dev->device->driver->name, drv->driver.name)) 4048 return false; 4049 4050 return true; 4051 } 4052 4053 bool is_bnxt_supported(struct rte_eth_dev *dev) 4054 { 4055 return is_device_supported(dev, &bnxt_rte_pmd); 4056 } 4057 4058 RTE_INIT(bnxt_init_log) 4059 { 4060 bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver"); 4061 if (bnxt_logtype_driver >= 0) 4062 rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); 4063 } 4064 4065 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 4066 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 4067 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 4068