1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <rte_ethdev_driver.h> 11 #include <rte_ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 15 #include "bnxt.h" 16 #include "bnxt_cpr.h" 17 #include "bnxt_filter.h" 18 #include "bnxt_hwrm.h" 19 #include "bnxt_irq.h" 20 #include "bnxt_ring.h" 21 #include "bnxt_rxq.h" 22 #include "bnxt_rxr.h" 23 #include "bnxt_stats.h" 24 #include "bnxt_txq.h" 25 #include "bnxt_txr.h" 26 #include "bnxt_vnic.h" 27 #include "hsi_struct_def_dpdk.h" 28 #include "bnxt_nvm_defs.h" 29 #include "bnxt_util.h" 30 31 #define DRV_MODULE_NAME "bnxt" 32 static const char bnxt_version[] = 33 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 34 int bnxt_logtype_driver; 35 36 #define PCI_VENDOR_ID_BROADCOM 0x14E4 37 38 #define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 39 #define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 40 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 41 #define BROADCOM_DEV_ID_57414_VF 0x16c1 42 #define BROADCOM_DEV_ID_57301 0x16c8 43 #define BROADCOM_DEV_ID_57302 0x16c9 44 #define BROADCOM_DEV_ID_57304_PF 0x16ca 45 #define BROADCOM_DEV_ID_57304_VF 0x16cb 46 #define BROADCOM_DEV_ID_57417_MF 0x16cc 47 #define BROADCOM_DEV_ID_NS2 0x16cd 48 #define BROADCOM_DEV_ID_57311 0x16ce 49 #define BROADCOM_DEV_ID_57312 0x16cf 50 #define BROADCOM_DEV_ID_57402 0x16d0 51 #define BROADCOM_DEV_ID_57404 0x16d1 52 #define BROADCOM_DEV_ID_57406_PF 0x16d2 53 #define BROADCOM_DEV_ID_57406_VF 0x16d3 54 #define BROADCOM_DEV_ID_57402_MF 0x16d4 55 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 56 #define BROADCOM_DEV_ID_57412 0x16d6 57 #define BROADCOM_DEV_ID_57414 0x16d7 58 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8 59 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9 60 #define BROADCOM_DEV_ID_5741X_VF 0x16dc 61 #define BROADCOM_DEV_ID_57412_MF 0x16de 62 #define BROADCOM_DEV_ID_57314 0x16df 63 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0 64 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 65 #define BROADCOM_DEV_ID_57417_SFP 0x16e2 66 #define BROADCOM_DEV_ID_57416_SFP 0x16e3 67 #define BROADCOM_DEV_ID_57317_SFP 0x16e4 68 #define BROADCOM_DEV_ID_57404_MF 0x16e7 69 #define BROADCOM_DEV_ID_57406_MF 0x16e8 70 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 71 #define BROADCOM_DEV_ID_57407_MF 0x16ea 72 #define BROADCOM_DEV_ID_57414_MF 0x16ec 73 #define BROADCOM_DEV_ID_57416_MF 0x16ee 74 #define BROADCOM_DEV_ID_57508 0x1750 75 #define BROADCOM_DEV_ID_57504 0x1751 76 #define BROADCOM_DEV_ID_57502 0x1752 77 #define BROADCOM_DEV_ID_57500_VF 0x1807 78 #define BROADCOM_DEV_ID_58802 0xd802 79 #define BROADCOM_DEV_ID_58804 0xd804 80 #define BROADCOM_DEV_ID_58808 0x16f0 81 #define BROADCOM_DEV_ID_58802_VF 0xd800 82 83 static const struct rte_pci_id bnxt_pci_id_map[] = { 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 85 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 87 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 97 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 98 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 99 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF) }, 130 { .vendor_id = 0, /* sentinel */ }, 131 }; 132 133 #define BNXT_ETH_RSS_SUPPORT ( \ 134 ETH_RSS_IPV4 | \ 135 ETH_RSS_NONFRAG_IPV4_TCP | \ 136 ETH_RSS_NONFRAG_IPV4_UDP | \ 137 ETH_RSS_IPV6 | \ 138 ETH_RSS_NONFRAG_IPV6_TCP | \ 139 ETH_RSS_NONFRAG_IPV6_UDP) 140 141 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ 142 DEV_TX_OFFLOAD_IPV4_CKSUM | \ 143 DEV_TX_OFFLOAD_TCP_CKSUM | \ 144 DEV_TX_OFFLOAD_UDP_CKSUM | \ 145 DEV_TX_OFFLOAD_TCP_TSO | \ 146 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ 147 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ 148 DEV_TX_OFFLOAD_GRE_TNL_TSO | \ 149 DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ 150 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ 151 DEV_TX_OFFLOAD_MULTI_SEGS) 152 153 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ 154 DEV_RX_OFFLOAD_VLAN_STRIP | \ 155 DEV_RX_OFFLOAD_IPV4_CKSUM | \ 156 DEV_RX_OFFLOAD_UDP_CKSUM | \ 157 DEV_RX_OFFLOAD_TCP_CKSUM | \ 158 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ 159 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 160 DEV_RX_OFFLOAD_KEEP_CRC | \ 161 DEV_RX_OFFLOAD_TCP_LRO) 162 163 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 164 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); 165 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); 166 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 167 168 /***********************/ 169 170 /* 171 * High level utility functions 172 */ 173 174 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 175 { 176 if (!BNXT_CHIP_THOR(bp)) 177 return 1; 178 179 return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, 180 BNXT_RSS_ENTRIES_PER_CTX_THOR) / 181 BNXT_RSS_ENTRIES_PER_CTX_THOR; 182 } 183 184 static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 185 { 186 if (!BNXT_CHIP_THOR(bp)) 187 return HW_HASH_INDEX_SIZE; 188 189 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; 190 } 191 192 static void bnxt_free_mem(struct bnxt *bp) 193 { 194 bnxt_free_filter_mem(bp); 195 bnxt_free_vnic_attributes(bp); 196 bnxt_free_vnic_mem(bp); 197 198 bnxt_free_stats(bp); 199 bnxt_free_tx_rings(bp); 200 bnxt_free_rx_rings(bp); 201 } 202 203 static int bnxt_alloc_mem(struct bnxt *bp) 204 { 205 int rc; 206 207 rc = bnxt_alloc_vnic_mem(bp); 208 if (rc) 209 goto alloc_mem_err; 210 211 rc = bnxt_alloc_vnic_attributes(bp); 212 if (rc) 213 goto alloc_mem_err; 214 215 rc = bnxt_alloc_filter_mem(bp); 216 if (rc) 217 goto alloc_mem_err; 218 219 return 0; 220 221 alloc_mem_err: 222 bnxt_free_mem(bp); 223 return rc; 224 } 225 226 static int bnxt_init_chip(struct bnxt *bp) 227 { 228 struct bnxt_rx_queue *rxq; 229 struct rte_eth_link new; 230 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 231 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 232 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 233 uint64_t rx_offloads = dev_conf->rxmode.offloads; 234 uint32_t intr_vector = 0; 235 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 236 uint32_t vec = BNXT_MISC_VEC_ID; 237 unsigned int i, j; 238 int rc; 239 240 /* disable uio/vfio intr/eventfd mapping */ 241 rte_intr_disable(intr_handle); 242 243 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 244 bp->eth_dev->data->dev_conf.rxmode.offloads |= 245 DEV_RX_OFFLOAD_JUMBO_FRAME; 246 bp->flags |= BNXT_FLAG_JUMBO; 247 } else { 248 bp->eth_dev->data->dev_conf.rxmode.offloads &= 249 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 250 bp->flags &= ~BNXT_FLAG_JUMBO; 251 } 252 253 /* THOR does not support ring groups. 254 * But we will use the array to save RSS context IDs. 255 */ 256 if (BNXT_CHIP_THOR(bp)) 257 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 258 259 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 260 if (rc) { 261 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 262 goto err_out; 263 } 264 265 rc = bnxt_alloc_hwrm_rings(bp); 266 if (rc) { 267 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 268 goto err_out; 269 } 270 271 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 272 if (rc) { 273 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 274 goto err_out; 275 } 276 277 rc = bnxt_mq_rx_configure(bp); 278 if (rc) { 279 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 280 goto err_out; 281 } 282 283 /* VNIC configuration */ 284 for (i = 0; i < bp->nr_vnics; i++) { 285 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 286 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 287 uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; 288 289 vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); 290 if (!vnic->fw_grp_ids) { 291 PMD_DRV_LOG(ERR, 292 "Failed to alloc %d bytes for group ids\n", 293 size); 294 rc = -ENOMEM; 295 goto err_out; 296 } 297 memset(vnic->fw_grp_ids, -1, size); 298 299 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 300 i, vnic, vnic->fw_grp_ids); 301 302 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 303 if (rc) { 304 PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n", 305 i, rc); 306 goto err_out; 307 } 308 309 /* Alloc RSS context only if RSS mode is enabled */ 310 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 311 int j, nr_ctxs = bnxt_rss_ctxts(bp); 312 313 rc = 0; 314 for (j = 0; j < nr_ctxs; j++) { 315 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 316 if (rc) 317 break; 318 } 319 if (rc) { 320 PMD_DRV_LOG(ERR, 321 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 322 i, j, rc); 323 goto err_out; 324 } 325 vnic->num_lb_ctxts = nr_ctxs; 326 } 327 328 /* 329 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 330 * setting is not available at this time, it will not be 331 * configured correctly in the CFA. 332 */ 333 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 334 vnic->vlan_strip = true; 335 else 336 vnic->vlan_strip = false; 337 338 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 339 if (rc) { 340 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 341 i, rc); 342 goto err_out; 343 } 344 345 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 346 if (rc) { 347 PMD_DRV_LOG(ERR, 348 "HWRM vnic %d filter failure rc: %x\n", 349 i, rc); 350 goto err_out; 351 } 352 353 for (j = 0; j < bp->rx_nr_rings; j++) { 354 rxq = bp->eth_dev->data->rx_queues[j]; 355 356 PMD_DRV_LOG(DEBUG, 357 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 358 j, rxq->vnic, rxq->vnic->fw_grp_ids); 359 360 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 361 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 362 } 363 364 rc = bnxt_vnic_rss_configure(bp, vnic); 365 if (rc) { 366 PMD_DRV_LOG(ERR, 367 "HWRM vnic set RSS failure rc: %x\n", rc); 368 goto err_out; 369 } 370 371 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 372 373 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 374 DEV_RX_OFFLOAD_TCP_LRO) 375 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 376 else 377 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 378 } 379 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 380 if (rc) { 381 PMD_DRV_LOG(ERR, 382 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 383 goto err_out; 384 } 385 386 /* check and configure queue intr-vector mapping */ 387 if ((rte_intr_cap_multiple(intr_handle) || 388 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 389 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 390 intr_vector = bp->eth_dev->data->nb_rx_queues; 391 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 392 if (intr_vector > bp->rx_cp_nr_rings) { 393 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 394 bp->rx_cp_nr_rings); 395 return -ENOTSUP; 396 } 397 if (rte_intr_efd_enable(intr_handle, intr_vector)) 398 return -1; 399 } 400 401 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 402 intr_handle->intr_vec = 403 rte_zmalloc("intr_vec", 404 bp->eth_dev->data->nb_rx_queues * 405 sizeof(int), 0); 406 if (intr_handle->intr_vec == NULL) { 407 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 408 " intr_vec", bp->eth_dev->data->nb_rx_queues); 409 return -ENOMEM; 410 } 411 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 412 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 413 intr_handle->intr_vec, intr_handle->nb_efd, 414 intr_handle->max_intr); 415 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 416 queue_id++) { 417 intr_handle->intr_vec[queue_id] = vec; 418 if (vec < base + intr_handle->nb_efd - 1) 419 vec++; 420 } 421 } 422 423 /* enable uio/vfio intr/eventfd mapping */ 424 rte_intr_enable(intr_handle); 425 426 rc = bnxt_get_hwrm_link_config(bp, &new); 427 if (rc) { 428 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); 429 goto err_out; 430 } 431 432 if (!bp->link_info.link_up) { 433 rc = bnxt_set_hwrm_link_config(bp, true); 434 if (rc) { 435 PMD_DRV_LOG(ERR, 436 "HWRM link config failure rc: %x\n", rc); 437 goto err_out; 438 } 439 } 440 bnxt_print_link_info(bp->eth_dev); 441 442 return 0; 443 444 err_out: 445 bnxt_free_all_hwrm_resources(bp); 446 447 /* Some of the error status returned by FW may not be from errno.h */ 448 if (rc > 0) 449 rc = -EIO; 450 451 return rc; 452 } 453 454 static int bnxt_shutdown_nic(struct bnxt *bp) 455 { 456 bnxt_free_all_hwrm_resources(bp); 457 bnxt_free_all_filters(bp); 458 bnxt_free_all_vnics(bp); 459 return 0; 460 } 461 462 static int bnxt_init_nic(struct bnxt *bp) 463 { 464 int rc; 465 466 if (BNXT_HAS_RING_GRPS(bp)) { 467 rc = bnxt_init_ring_grps(bp); 468 if (rc) 469 return rc; 470 } 471 472 bnxt_init_vnics(bp); 473 bnxt_init_filters(bp); 474 475 return 0; 476 } 477 478 /* 479 * Device configuration and status function 480 */ 481 482 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 483 struct rte_eth_dev_info *dev_info) 484 { 485 struct bnxt *bp = eth_dev->data->dev_private; 486 uint16_t max_vnics, i, j, vpool, vrxq; 487 unsigned int max_rx_rings; 488 489 /* MAC Specifics */ 490 dev_info->max_mac_addrs = bp->max_l2_ctx; 491 dev_info->max_hash_mac_addrs = 0; 492 493 /* PF/VF specifics */ 494 if (BNXT_PF(bp)) 495 dev_info->max_vfs = bp->pdev->max_vfs; 496 max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx); 497 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 498 dev_info->max_rx_queues = max_rx_rings; 499 dev_info->max_tx_queues = max_rx_rings; 500 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 501 dev_info->hash_key_size = 40; 502 max_vnics = bp->max_vnics; 503 504 /* Fast path specifics */ 505 dev_info->min_rx_bufsize = 1; 506 dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN + 507 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 508 509 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 510 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 511 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 512 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; 513 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 514 515 /* *INDENT-OFF* */ 516 dev_info->default_rxconf = (struct rte_eth_rxconf) { 517 .rx_thresh = { 518 .pthresh = 8, 519 .hthresh = 8, 520 .wthresh = 0, 521 }, 522 .rx_free_thresh = 32, 523 /* If no descriptors available, pkts are dropped by default */ 524 .rx_drop_en = 1, 525 }; 526 527 dev_info->default_txconf = (struct rte_eth_txconf) { 528 .tx_thresh = { 529 .pthresh = 32, 530 .hthresh = 0, 531 .wthresh = 0, 532 }, 533 .tx_free_thresh = 32, 534 .tx_rs_thresh = 32, 535 }; 536 eth_dev->data->dev_conf.intr_conf.lsc = 1; 537 538 eth_dev->data->dev_conf.intr_conf.rxq = 1; 539 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 540 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 541 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 542 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 543 544 /* *INDENT-ON* */ 545 546 /* 547 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 548 * need further investigation. 549 */ 550 551 /* VMDq resources */ 552 vpool = 64; /* ETH_64_POOLS */ 553 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 554 for (i = 0; i < 4; vpool >>= 1, i++) { 555 if (max_vnics > vpool) { 556 for (j = 0; j < 5; vrxq >>= 1, j++) { 557 if (dev_info->max_rx_queues > vrxq) { 558 if (vpool > vrxq) 559 vpool = vrxq; 560 goto found; 561 } 562 } 563 /* Not enough resources to support VMDq */ 564 break; 565 } 566 } 567 /* Not enough resources to support VMDq */ 568 vpool = 0; 569 vrxq = 0; 570 found: 571 dev_info->max_vmdq_pools = vpool; 572 dev_info->vmdq_queue_num = vrxq; 573 574 dev_info->vmdq_pool_base = 0; 575 dev_info->vmdq_queue_base = 0; 576 } 577 578 /* Configure the device based on the configuration provided */ 579 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 580 { 581 struct bnxt *bp = eth_dev->data->dev_private; 582 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 583 int rc; 584 585 bp->rx_queues = (void *)eth_dev->data->rx_queues; 586 bp->tx_queues = (void *)eth_dev->data->tx_queues; 587 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 588 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 589 590 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 591 rc = bnxt_hwrm_check_vf_rings(bp); 592 if (rc) { 593 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 594 return -ENOSPC; 595 } 596 597 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 598 if (rc) { 599 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 600 return -ENOSPC; 601 } 602 } else { 603 /* legacy driver needs to get updated values */ 604 rc = bnxt_hwrm_func_qcaps(bp); 605 if (rc) { 606 PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); 607 return rc; 608 } 609 } 610 611 /* Inherit new configurations */ 612 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 613 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 614 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 615 bp->max_cp_rings || 616 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 617 bp->max_stat_ctx) 618 goto resource_error; 619 620 if (BNXT_HAS_RING_GRPS(bp) && 621 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 622 goto resource_error; 623 624 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 625 bp->max_vnics < eth_dev->data->nb_rx_queues) 626 goto resource_error; 627 628 bp->rx_cp_nr_rings = bp->rx_nr_rings; 629 bp->tx_cp_nr_rings = bp->tx_nr_rings; 630 631 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 632 eth_dev->data->mtu = 633 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 634 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 635 BNXT_NUM_VLANS; 636 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 637 } 638 return 0; 639 640 resource_error: 641 PMD_DRV_LOG(ERR, 642 "Insufficient resources to support requested config\n"); 643 PMD_DRV_LOG(ERR, 644 "Num Queues Requested: Tx %d, Rx %d\n", 645 eth_dev->data->nb_tx_queues, 646 eth_dev->data->nb_rx_queues); 647 PMD_DRV_LOG(ERR, 648 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 649 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 650 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 651 return -ENOSPC; 652 } 653 654 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 655 { 656 struct rte_eth_link *link = ð_dev->data->dev_link; 657 658 if (link->link_status) 659 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 660 eth_dev->data->port_id, 661 (uint32_t)link->link_speed, 662 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 663 ("full-duplex") : ("half-duplex\n")); 664 else 665 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 666 eth_dev->data->port_id); 667 } 668 669 /* 670 * Determine whether the current configuration requires support for scattered 671 * receive; return 1 if scattered receive is required and 0 if not. 672 */ 673 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 674 { 675 uint16_t buf_size; 676 int i; 677 678 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 679 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 680 681 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 682 RTE_PKTMBUF_HEADROOM); 683 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 684 return 1; 685 } 686 return 0; 687 } 688 689 static eth_rx_burst_t 690 bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev) 691 { 692 #ifdef RTE_ARCH_X86 693 /* 694 * Vector mode receive can be enabled only if scatter rx is not 695 * in use and rx offloads are limited to VLAN stripping and 696 * CRC stripping. 697 */ 698 if (!eth_dev->data->scattered_rx && 699 !(eth_dev->data->dev_conf.rxmode.offloads & 700 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 701 DEV_RX_OFFLOAD_KEEP_CRC | 702 DEV_RX_OFFLOAD_JUMBO_FRAME | 703 DEV_RX_OFFLOAD_IPV4_CKSUM | 704 DEV_RX_OFFLOAD_UDP_CKSUM | 705 DEV_RX_OFFLOAD_TCP_CKSUM | 706 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 707 DEV_RX_OFFLOAD_VLAN_FILTER))) { 708 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", 709 eth_dev->data->port_id); 710 return bnxt_recv_pkts_vec; 711 } 712 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 713 eth_dev->data->port_id); 714 PMD_DRV_LOG(INFO, 715 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 716 eth_dev->data->port_id, 717 eth_dev->data->scattered_rx, 718 eth_dev->data->dev_conf.rxmode.offloads); 719 #endif 720 return bnxt_recv_pkts; 721 } 722 723 static eth_tx_burst_t 724 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 725 { 726 #ifdef RTE_ARCH_X86 727 /* 728 * Vector mode receive can be enabled only if scatter tx is not 729 * in use and tx offloads other than VLAN insertion are not 730 * in use. 731 */ 732 if (!eth_dev->data->scattered_rx && 733 !(eth_dev->data->dev_conf.txmode.offloads & 734 ~DEV_TX_OFFLOAD_VLAN_INSERT)) { 735 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", 736 eth_dev->data->port_id); 737 return bnxt_xmit_pkts_vec; 738 } 739 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 740 eth_dev->data->port_id); 741 PMD_DRV_LOG(INFO, 742 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 743 eth_dev->data->port_id, 744 eth_dev->data->scattered_rx, 745 eth_dev->data->dev_conf.txmode.offloads); 746 #endif 747 return bnxt_xmit_pkts; 748 } 749 750 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 751 { 752 struct bnxt *bp = eth_dev->data->dev_private; 753 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 754 int vlan_mask = 0; 755 int rc; 756 757 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 758 PMD_DRV_LOG(ERR, 759 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 760 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 761 } 762 bp->dev_stopped = 0; 763 764 rc = bnxt_init_chip(bp); 765 if (rc) 766 goto error; 767 768 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 769 770 bnxt_link_update_op(eth_dev, 1); 771 772 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 773 vlan_mask |= ETH_VLAN_FILTER_MASK; 774 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 775 vlan_mask |= ETH_VLAN_STRIP_MASK; 776 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 777 if (rc) 778 goto error; 779 780 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 781 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 782 bnxt_enable_int(bp); 783 bp->flags |= BNXT_FLAG_INIT_DONE; 784 return 0; 785 786 error: 787 bnxt_shutdown_nic(bp); 788 bnxt_free_tx_mbufs(bp); 789 bnxt_free_rx_mbufs(bp); 790 return rc; 791 } 792 793 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 794 { 795 struct bnxt *bp = eth_dev->data->dev_private; 796 int rc = 0; 797 798 if (!bp->link_info.link_up) 799 rc = bnxt_set_hwrm_link_config(bp, true); 800 if (!rc) 801 eth_dev->data->dev_link.link_status = 1; 802 803 bnxt_print_link_info(eth_dev); 804 return 0; 805 } 806 807 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 808 { 809 struct bnxt *bp = eth_dev->data->dev_private; 810 811 eth_dev->data->dev_link.link_status = 0; 812 bnxt_set_hwrm_link_config(bp, false); 813 bp->link_info.link_up = 0; 814 815 return 0; 816 } 817 818 /* Unload the driver, release resources */ 819 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 820 { 821 struct bnxt *bp = eth_dev->data->dev_private; 822 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 823 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 824 825 bnxt_disable_int(bp); 826 827 /* disable uio/vfio intr/eventfd mapping */ 828 rte_intr_disable(intr_handle); 829 830 bp->flags &= ~BNXT_FLAG_INIT_DONE; 831 if (bp->eth_dev->data->dev_started) { 832 /* TBD: STOP HW queues DMA */ 833 eth_dev->data->dev_link.link_status = 0; 834 } 835 bnxt_set_hwrm_link_config(bp, false); 836 837 /* Clean queue intr-vector mapping */ 838 rte_intr_efd_disable(intr_handle); 839 if (intr_handle->intr_vec != NULL) { 840 rte_free(intr_handle->intr_vec); 841 intr_handle->intr_vec = NULL; 842 } 843 844 bnxt_hwrm_port_clr_stats(bp); 845 bnxt_free_tx_mbufs(bp); 846 bnxt_free_rx_mbufs(bp); 847 bnxt_shutdown_nic(bp); 848 bp->dev_stopped = 1; 849 } 850 851 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 852 { 853 struct bnxt *bp = eth_dev->data->dev_private; 854 855 if (bp->dev_stopped == 0) 856 bnxt_dev_stop_op(eth_dev); 857 858 if (eth_dev->data->mac_addrs != NULL) { 859 rte_free(eth_dev->data->mac_addrs); 860 eth_dev->data->mac_addrs = NULL; 861 } 862 if (bp->grp_info != NULL) { 863 rte_free(bp->grp_info); 864 bp->grp_info = NULL; 865 } 866 867 bnxt_dev_uninit(eth_dev); 868 } 869 870 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 871 uint32_t index) 872 { 873 struct bnxt *bp = eth_dev->data->dev_private; 874 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 875 struct bnxt_vnic_info *vnic; 876 struct bnxt_filter_info *filter, *temp_filter; 877 uint32_t i; 878 879 /* 880 * Loop through all VNICs from the specified filter flow pools to 881 * remove the corresponding MAC addr filter 882 */ 883 for (i = 0; i < bp->nr_vnics; i++) { 884 if (!(pool_mask & (1ULL << i))) 885 continue; 886 887 vnic = &bp->vnic_info[i]; 888 filter = STAILQ_FIRST(&vnic->filter); 889 while (filter) { 890 temp_filter = STAILQ_NEXT(filter, next); 891 if (filter->mac_index == index) { 892 STAILQ_REMOVE(&vnic->filter, filter, 893 bnxt_filter_info, next); 894 bnxt_hwrm_clear_l2_filter(bp, filter); 895 filter->mac_index = INVALID_MAC_INDEX; 896 memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); 897 STAILQ_INSERT_TAIL(&bp->free_filter_list, 898 filter, next); 899 } 900 filter = temp_filter; 901 } 902 } 903 } 904 905 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 906 struct rte_ether_addr *mac_addr, 907 uint32_t index, uint32_t pool) 908 { 909 struct bnxt *bp = eth_dev->data->dev_private; 910 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 911 struct bnxt_filter_info *filter; 912 913 if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { 914 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 915 return -ENOTSUP; 916 } 917 918 if (!vnic) { 919 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 920 return -EINVAL; 921 } 922 /* Attach requested MAC address to the new l2_filter */ 923 STAILQ_FOREACH(filter, &vnic->filter, next) { 924 if (filter->mac_index == index) { 925 PMD_DRV_LOG(ERR, 926 "MAC addr already existed for pool %d\n", pool); 927 return 0; 928 } 929 } 930 filter = bnxt_alloc_filter(bp); 931 if (!filter) { 932 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 933 return -ENODEV; 934 } 935 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 936 filter->mac_index = index; 937 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 938 return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 939 } 940 941 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 942 { 943 int rc = 0; 944 struct bnxt *bp = eth_dev->data->dev_private; 945 struct rte_eth_link new; 946 unsigned int cnt = BNXT_LINK_WAIT_CNT; 947 948 memset(&new, 0, sizeof(new)); 949 do { 950 /* Retrieve link info from hardware */ 951 rc = bnxt_get_hwrm_link_config(bp, &new); 952 if (rc) { 953 new.link_speed = ETH_LINK_SPEED_100M; 954 new.link_duplex = ETH_LINK_FULL_DUPLEX; 955 PMD_DRV_LOG(ERR, 956 "Failed to retrieve link rc = 0x%x!\n", rc); 957 goto out; 958 } 959 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 960 961 if (!wait_to_complete) 962 break; 963 } while (!new.link_status && cnt--); 964 965 out: 966 /* Timed out or success */ 967 if (new.link_status != eth_dev->data->dev_link.link_status || 968 new.link_speed != eth_dev->data->dev_link.link_speed) { 969 memcpy(ð_dev->data->dev_link, &new, 970 sizeof(struct rte_eth_link)); 971 972 _rte_eth_dev_callback_process(eth_dev, 973 RTE_ETH_EVENT_INTR_LSC, 974 NULL); 975 976 bnxt_print_link_info(eth_dev); 977 } 978 979 return rc; 980 } 981 982 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 983 { 984 struct bnxt *bp = eth_dev->data->dev_private; 985 struct bnxt_vnic_info *vnic; 986 987 if (bp->vnic_info == NULL) 988 return; 989 990 vnic = &bp->vnic_info[0]; 991 992 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 993 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 994 } 995 996 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 997 { 998 struct bnxt *bp = eth_dev->data->dev_private; 999 struct bnxt_vnic_info *vnic; 1000 1001 if (bp->vnic_info == NULL) 1002 return; 1003 1004 vnic = &bp->vnic_info[0]; 1005 1006 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1007 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1008 } 1009 1010 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1011 { 1012 struct bnxt *bp = eth_dev->data->dev_private; 1013 struct bnxt_vnic_info *vnic; 1014 1015 if (bp->vnic_info == NULL) 1016 return; 1017 1018 vnic = &bp->vnic_info[0]; 1019 1020 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1021 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1022 } 1023 1024 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1025 { 1026 struct bnxt *bp = eth_dev->data->dev_private; 1027 struct bnxt_vnic_info *vnic; 1028 1029 if (bp->vnic_info == NULL) 1030 return; 1031 1032 vnic = &bp->vnic_info[0]; 1033 1034 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1035 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1036 } 1037 1038 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1039 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1040 { 1041 if (qid >= bp->rx_nr_rings) 1042 return NULL; 1043 1044 return bp->eth_dev->data->rx_queues[qid]; 1045 } 1046 1047 /* Return rxq corresponding to a given rss table ring/group ID. */ 1048 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1049 { 1050 struct bnxt_rx_queue *rxq; 1051 unsigned int i; 1052 1053 if (!BNXT_HAS_RING_GRPS(bp)) { 1054 for (i = 0; i < bp->rx_nr_rings; i++) { 1055 rxq = bp->eth_dev->data->rx_queues[i]; 1056 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1057 return rxq->index; 1058 } 1059 } else { 1060 for (i = 0; i < bp->rx_nr_rings; i++) { 1061 if (bp->grp_info[i].fw_grp_id == fwr) 1062 return i; 1063 } 1064 } 1065 1066 return INVALID_HW_RING_ID; 1067 } 1068 1069 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1070 struct rte_eth_rss_reta_entry64 *reta_conf, 1071 uint16_t reta_size) 1072 { 1073 struct bnxt *bp = eth_dev->data->dev_private; 1074 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1075 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1076 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1077 uint16_t idx, sft; 1078 int i; 1079 1080 if (!vnic->rss_table) 1081 return -EINVAL; 1082 1083 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 1084 return -EINVAL; 1085 1086 if (reta_size != tbl_size) { 1087 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1088 "(%d) must equal the size supported by the hardware " 1089 "(%d)\n", reta_size, tbl_size); 1090 return -EINVAL; 1091 } 1092 1093 for (i = 0; i < reta_size; i++) { 1094 struct bnxt_rx_queue *rxq; 1095 1096 idx = i / RTE_RETA_GROUP_SIZE; 1097 sft = i % RTE_RETA_GROUP_SIZE; 1098 1099 if (!(reta_conf[idx].mask & (1ULL << sft))) 1100 continue; 1101 1102 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 1103 if (!rxq) { 1104 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 1105 return -EINVAL; 1106 } 1107 1108 if (BNXT_CHIP_THOR(bp)) { 1109 vnic->rss_table[i * 2] = 1110 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1111 vnic->rss_table[i * 2 + 1] = 1112 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1113 } else { 1114 vnic->rss_table[i] = 1115 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1116 } 1117 1118 vnic->rss_table[i] = 1119 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1120 } 1121 1122 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1123 return 0; 1124 } 1125 1126 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 1127 struct rte_eth_rss_reta_entry64 *reta_conf, 1128 uint16_t reta_size) 1129 { 1130 struct bnxt *bp = eth_dev->data->dev_private; 1131 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1132 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1133 uint16_t idx, sft, i; 1134 1135 /* Retrieve from the default VNIC */ 1136 if (!vnic) 1137 return -EINVAL; 1138 if (!vnic->rss_table) 1139 return -EINVAL; 1140 1141 if (reta_size != tbl_size) { 1142 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1143 "(%d) must equal the size supported by the hardware " 1144 "(%d)\n", reta_size, tbl_size); 1145 return -EINVAL; 1146 } 1147 1148 for (idx = 0, i = 0; i < reta_size; i++) { 1149 idx = i / RTE_RETA_GROUP_SIZE; 1150 sft = i % RTE_RETA_GROUP_SIZE; 1151 1152 if (reta_conf[idx].mask & (1ULL << sft)) { 1153 uint16_t qid; 1154 1155 if (BNXT_CHIP_THOR(bp)) 1156 qid = bnxt_rss_to_qid(bp, 1157 vnic->rss_table[i * 2]); 1158 else 1159 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 1160 1161 if (qid == INVALID_HW_RING_ID) { 1162 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 1163 return -EINVAL; 1164 } 1165 reta_conf[idx].reta[sft] = qid; 1166 } 1167 } 1168 1169 return 0; 1170 } 1171 1172 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 1173 struct rte_eth_rss_conf *rss_conf) 1174 { 1175 struct bnxt *bp = eth_dev->data->dev_private; 1176 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1177 struct bnxt_vnic_info *vnic; 1178 uint16_t hash_type = 0; 1179 unsigned int i; 1180 1181 /* 1182 * If RSS enablement were different than dev_configure, 1183 * then return -EINVAL 1184 */ 1185 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 1186 if (!rss_conf->rss_hf) 1187 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 1188 } else { 1189 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 1190 return -EINVAL; 1191 } 1192 1193 bp->flags |= BNXT_FLAG_UPDATE_HASH; 1194 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); 1195 1196 if (rss_conf->rss_hf & ETH_RSS_IPV4) 1197 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1198 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 1199 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1200 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 1201 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1202 if (rss_conf->rss_hf & ETH_RSS_IPV6) 1203 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1204 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 1205 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1206 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 1207 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1208 1209 /* Update the RSS VNIC(s) */ 1210 for (i = 0; i < bp->nr_vnics; i++) { 1211 vnic = &bp->vnic_info[i]; 1212 vnic->hash_type = hash_type; 1213 1214 /* 1215 * Use the supplied key if the key length is 1216 * acceptable and the rss_key is not NULL 1217 */ 1218 if (rss_conf->rss_key && 1219 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) 1220 memcpy(vnic->rss_hash_key, rss_conf->rss_key, 1221 rss_conf->rss_key_len); 1222 1223 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1224 } 1225 return 0; 1226 } 1227 1228 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 1229 struct rte_eth_rss_conf *rss_conf) 1230 { 1231 struct bnxt *bp = eth_dev->data->dev_private; 1232 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1233 int len; 1234 uint32_t hash_types; 1235 1236 /* RSS configuration is the same for all VNICs */ 1237 if (vnic && vnic->rss_hash_key) { 1238 if (rss_conf->rss_key) { 1239 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 1240 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 1241 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 1242 } 1243 1244 hash_types = vnic->hash_type; 1245 rss_conf->rss_hf = 0; 1246 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 1247 rss_conf->rss_hf |= ETH_RSS_IPV4; 1248 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1249 } 1250 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 1251 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1252 hash_types &= 1253 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1254 } 1255 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 1256 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 1257 hash_types &= 1258 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1259 } 1260 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 1261 rss_conf->rss_hf |= ETH_RSS_IPV6; 1262 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1263 } 1264 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 1265 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 1266 hash_types &= 1267 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1268 } 1269 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 1270 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 1271 hash_types &= 1272 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1273 } 1274 if (hash_types) { 1275 PMD_DRV_LOG(ERR, 1276 "Unknwon RSS config from firmware (%08x), RSS disabled", 1277 vnic->hash_type); 1278 return -ENOTSUP; 1279 } 1280 } else { 1281 rss_conf->rss_hf = 0; 1282 } 1283 return 0; 1284 } 1285 1286 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 1287 struct rte_eth_fc_conf *fc_conf) 1288 { 1289 struct bnxt *bp = dev->data->dev_private; 1290 struct rte_eth_link link_info; 1291 int rc; 1292 1293 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1294 if (rc) 1295 return rc; 1296 1297 memset(fc_conf, 0, sizeof(*fc_conf)); 1298 if (bp->link_info.auto_pause) 1299 fc_conf->autoneg = 1; 1300 switch (bp->link_info.pause) { 1301 case 0: 1302 fc_conf->mode = RTE_FC_NONE; 1303 break; 1304 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 1305 fc_conf->mode = RTE_FC_TX_PAUSE; 1306 break; 1307 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 1308 fc_conf->mode = RTE_FC_RX_PAUSE; 1309 break; 1310 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 1311 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 1312 fc_conf->mode = RTE_FC_FULL; 1313 break; 1314 } 1315 return 0; 1316 } 1317 1318 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 1319 struct rte_eth_fc_conf *fc_conf) 1320 { 1321 struct bnxt *bp = dev->data->dev_private; 1322 1323 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 1324 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); 1325 return -ENOTSUP; 1326 } 1327 1328 switch (fc_conf->mode) { 1329 case RTE_FC_NONE: 1330 bp->link_info.auto_pause = 0; 1331 bp->link_info.force_pause = 0; 1332 break; 1333 case RTE_FC_RX_PAUSE: 1334 if (fc_conf->autoneg) { 1335 bp->link_info.auto_pause = 1336 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1337 bp->link_info.force_pause = 0; 1338 } else { 1339 bp->link_info.auto_pause = 0; 1340 bp->link_info.force_pause = 1341 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1342 } 1343 break; 1344 case RTE_FC_TX_PAUSE: 1345 if (fc_conf->autoneg) { 1346 bp->link_info.auto_pause = 1347 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 1348 bp->link_info.force_pause = 0; 1349 } else { 1350 bp->link_info.auto_pause = 0; 1351 bp->link_info.force_pause = 1352 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 1353 } 1354 break; 1355 case RTE_FC_FULL: 1356 if (fc_conf->autoneg) { 1357 bp->link_info.auto_pause = 1358 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 1359 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1360 bp->link_info.force_pause = 0; 1361 } else { 1362 bp->link_info.auto_pause = 0; 1363 bp->link_info.force_pause = 1364 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 1365 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1366 } 1367 break; 1368 } 1369 return bnxt_set_hwrm_link_config(bp, true); 1370 } 1371 1372 /* Add UDP tunneling port */ 1373 static int 1374 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 1375 struct rte_eth_udp_tunnel *udp_tunnel) 1376 { 1377 struct bnxt *bp = eth_dev->data->dev_private; 1378 uint16_t tunnel_type = 0; 1379 int rc = 0; 1380 1381 switch (udp_tunnel->prot_type) { 1382 case RTE_TUNNEL_TYPE_VXLAN: 1383 if (bp->vxlan_port_cnt) { 1384 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 1385 udp_tunnel->udp_port); 1386 if (bp->vxlan_port != udp_tunnel->udp_port) { 1387 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 1388 return -ENOSPC; 1389 } 1390 bp->vxlan_port_cnt++; 1391 return 0; 1392 } 1393 tunnel_type = 1394 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 1395 bp->vxlan_port_cnt++; 1396 break; 1397 case RTE_TUNNEL_TYPE_GENEVE: 1398 if (bp->geneve_port_cnt) { 1399 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 1400 udp_tunnel->udp_port); 1401 if (bp->geneve_port != udp_tunnel->udp_port) { 1402 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 1403 return -ENOSPC; 1404 } 1405 bp->geneve_port_cnt++; 1406 return 0; 1407 } 1408 tunnel_type = 1409 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 1410 bp->geneve_port_cnt++; 1411 break; 1412 default: 1413 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 1414 return -ENOTSUP; 1415 } 1416 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 1417 tunnel_type); 1418 return rc; 1419 } 1420 1421 static int 1422 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 1423 struct rte_eth_udp_tunnel *udp_tunnel) 1424 { 1425 struct bnxt *bp = eth_dev->data->dev_private; 1426 uint16_t tunnel_type = 0; 1427 uint16_t port = 0; 1428 int rc = 0; 1429 1430 switch (udp_tunnel->prot_type) { 1431 case RTE_TUNNEL_TYPE_VXLAN: 1432 if (!bp->vxlan_port_cnt) { 1433 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 1434 return -EINVAL; 1435 } 1436 if (bp->vxlan_port != udp_tunnel->udp_port) { 1437 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 1438 udp_tunnel->udp_port, bp->vxlan_port); 1439 return -EINVAL; 1440 } 1441 if (--bp->vxlan_port_cnt) 1442 return 0; 1443 1444 tunnel_type = 1445 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 1446 port = bp->vxlan_fw_dst_port_id; 1447 break; 1448 case RTE_TUNNEL_TYPE_GENEVE: 1449 if (!bp->geneve_port_cnt) { 1450 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 1451 return -EINVAL; 1452 } 1453 if (bp->geneve_port != udp_tunnel->udp_port) { 1454 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 1455 udp_tunnel->udp_port, bp->geneve_port); 1456 return -EINVAL; 1457 } 1458 if (--bp->geneve_port_cnt) 1459 return 0; 1460 1461 tunnel_type = 1462 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 1463 port = bp->geneve_fw_dst_port_id; 1464 break; 1465 default: 1466 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 1467 return -ENOTSUP; 1468 } 1469 1470 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 1471 if (!rc) { 1472 if (tunnel_type == 1473 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 1474 bp->vxlan_port = 0; 1475 if (tunnel_type == 1476 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 1477 bp->geneve_port = 0; 1478 } 1479 return rc; 1480 } 1481 1482 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1483 { 1484 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1485 struct bnxt_vnic_info *vnic; 1486 unsigned int i; 1487 int rc = 0; 1488 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1489 1490 /* Cycle through all VNICs */ 1491 for (i = 0; i < bp->nr_vnics; i++) { 1492 /* 1493 * For each VNIC and each associated filter(s) 1494 * if VLAN exists && VLAN matches vlan_id 1495 * remove the MAC+VLAN filter 1496 * add a new MAC only filter 1497 * else 1498 * VLAN filter doesn't exist, just skip and continue 1499 */ 1500 vnic = &bp->vnic_info[i]; 1501 filter = STAILQ_FIRST(&vnic->filter); 1502 while (filter) { 1503 temp_filter = STAILQ_NEXT(filter, next); 1504 1505 if (filter->enables & chk && 1506 filter->l2_ovlan == vlan_id) { 1507 /* Must delete the filter */ 1508 STAILQ_REMOVE(&vnic->filter, filter, 1509 bnxt_filter_info, next); 1510 bnxt_hwrm_clear_l2_filter(bp, filter); 1511 STAILQ_INSERT_TAIL(&bp->free_filter_list, 1512 filter, next); 1513 1514 /* 1515 * Need to examine to see if the MAC 1516 * filter already existed or not before 1517 * allocating a new one 1518 */ 1519 1520 new_filter = bnxt_alloc_filter(bp); 1521 if (!new_filter) { 1522 PMD_DRV_LOG(ERR, 1523 "MAC/VLAN filter alloc failed\n"); 1524 rc = -ENOMEM; 1525 goto exit; 1526 } 1527 STAILQ_INSERT_TAIL(&vnic->filter, 1528 new_filter, next); 1529 /* Inherit MAC from previous filter */ 1530 new_filter->mac_index = 1531 filter->mac_index; 1532 memcpy(new_filter->l2_addr, filter->l2_addr, 1533 RTE_ETHER_ADDR_LEN); 1534 /* MAC only filter */ 1535 rc = bnxt_hwrm_set_l2_filter(bp, 1536 vnic->fw_vnic_id, 1537 new_filter); 1538 if (rc) 1539 goto exit; 1540 PMD_DRV_LOG(INFO, 1541 "Del Vlan filter for %d\n", 1542 vlan_id); 1543 } 1544 filter = temp_filter; 1545 } 1546 } 1547 exit: 1548 return rc; 1549 } 1550 1551 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1552 { 1553 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1554 struct bnxt_vnic_info *vnic; 1555 unsigned int i; 1556 int rc = 0; 1557 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 1558 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 1559 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 1560 1561 /* Cycle through all VNICs */ 1562 for (i = 0; i < bp->nr_vnics; i++) { 1563 /* 1564 * For each VNIC and each associated filter(s) 1565 * if VLAN exists: 1566 * if VLAN matches vlan_id 1567 * VLAN filter already exists, just skip and continue 1568 * else 1569 * add a new MAC+VLAN filter 1570 * else 1571 * Remove the old MAC only filter 1572 * Add a new MAC+VLAN filter 1573 */ 1574 vnic = &bp->vnic_info[i]; 1575 filter = STAILQ_FIRST(&vnic->filter); 1576 while (filter) { 1577 temp_filter = STAILQ_NEXT(filter, next); 1578 1579 if (filter->enables & chk) { 1580 if (filter->l2_ivlan == vlan_id) 1581 goto cont; 1582 } else { 1583 /* Must delete the MAC filter */ 1584 STAILQ_REMOVE(&vnic->filter, filter, 1585 bnxt_filter_info, next); 1586 bnxt_hwrm_clear_l2_filter(bp, filter); 1587 filter->l2_ovlan = 0; 1588 STAILQ_INSERT_TAIL(&bp->free_filter_list, 1589 filter, next); 1590 } 1591 new_filter = bnxt_alloc_filter(bp); 1592 if (!new_filter) { 1593 PMD_DRV_LOG(ERR, 1594 "MAC/VLAN filter alloc failed\n"); 1595 rc = -ENOMEM; 1596 goto exit; 1597 } 1598 STAILQ_INSERT_TAIL(&vnic->filter, new_filter, next); 1599 /* Inherit MAC from the previous filter */ 1600 new_filter->mac_index = filter->mac_index; 1601 memcpy(new_filter->l2_addr, filter->l2_addr, 1602 RTE_ETHER_ADDR_LEN); 1603 /* MAC + VLAN ID filter */ 1604 new_filter->l2_ivlan = vlan_id; 1605 new_filter->l2_ivlan_mask = 0xF000; 1606 new_filter->enables |= en; 1607 rc = bnxt_hwrm_set_l2_filter(bp, 1608 vnic->fw_vnic_id, 1609 new_filter); 1610 if (rc) 1611 goto exit; 1612 PMD_DRV_LOG(INFO, 1613 "Added Vlan filter for %d\n", vlan_id); 1614 cont: 1615 filter = temp_filter; 1616 } 1617 } 1618 exit: 1619 return rc; 1620 } 1621 1622 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 1623 uint16_t vlan_id, int on) 1624 { 1625 struct bnxt *bp = eth_dev->data->dev_private; 1626 1627 /* These operations apply to ALL existing MAC/VLAN filters */ 1628 if (on) 1629 return bnxt_add_vlan_filter(bp, vlan_id); 1630 else 1631 return bnxt_del_vlan_filter(bp, vlan_id); 1632 } 1633 1634 static int 1635 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 1636 { 1637 struct bnxt *bp = dev->data->dev_private; 1638 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1639 unsigned int i; 1640 1641 if (mask & ETH_VLAN_FILTER_MASK) { 1642 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 1643 /* Remove any VLAN filters programmed */ 1644 for (i = 0; i < 4095; i++) 1645 bnxt_del_vlan_filter(bp, i); 1646 } 1647 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 1648 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 1649 } 1650 1651 if (mask & ETH_VLAN_STRIP_MASK) { 1652 /* Enable or disable VLAN stripping */ 1653 for (i = 0; i < bp->nr_vnics; i++) { 1654 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1655 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1656 vnic->vlan_strip = true; 1657 else 1658 vnic->vlan_strip = false; 1659 bnxt_hwrm_vnic_cfg(bp, vnic); 1660 } 1661 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 1662 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 1663 } 1664 1665 if (mask & ETH_VLAN_EXTEND_MASK) 1666 PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n"); 1667 1668 return 0; 1669 } 1670 1671 static int 1672 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 1673 struct rte_ether_addr *addr) 1674 { 1675 struct bnxt *bp = dev->data->dev_private; 1676 /* Default Filter is tied to VNIC 0 */ 1677 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1678 struct bnxt_filter_info *filter; 1679 int rc; 1680 1681 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 1682 return -EPERM; 1683 1684 memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); 1685 1686 STAILQ_FOREACH(filter, &vnic->filter, next) { 1687 /* Default Filter is at Index 0 */ 1688 if (filter->mac_index != 0) 1689 continue; 1690 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 1691 if (rc) 1692 return rc; 1693 memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); 1694 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); 1695 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 1696 filter->enables |= 1697 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 1698 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 1699 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1700 if (rc) 1701 return rc; 1702 filter->mac_index = 0; 1703 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 1704 } 1705 1706 return 0; 1707 } 1708 1709 static int 1710 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 1711 struct rte_ether_addr *mc_addr_set, 1712 uint32_t nb_mc_addr) 1713 { 1714 struct bnxt *bp = eth_dev->data->dev_private; 1715 char *mc_addr_list = (char *)mc_addr_set; 1716 struct bnxt_vnic_info *vnic; 1717 uint32_t off = 0, i = 0; 1718 1719 vnic = &bp->vnic_info[0]; 1720 1721 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 1722 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1723 goto allmulti; 1724 } 1725 1726 /* TODO Check for Duplicate mcast addresses */ 1727 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1728 for (i = 0; i < nb_mc_addr; i++) { 1729 memcpy(vnic->mc_list + off, &mc_addr_list[i], 1730 RTE_ETHER_ADDR_LEN); 1731 off += RTE_ETHER_ADDR_LEN; 1732 } 1733 1734 vnic->mc_addr_cnt = i; 1735 1736 allmulti: 1737 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1738 } 1739 1740 static int 1741 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1742 { 1743 struct bnxt *bp = dev->data->dev_private; 1744 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 1745 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 1746 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 1747 int ret; 1748 1749 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 1750 fw_major, fw_minor, fw_updt); 1751 1752 ret += 1; /* add the size of '\0' */ 1753 if (fw_size < (uint32_t)ret) 1754 return ret; 1755 else 1756 return 0; 1757 } 1758 1759 static void 1760 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1761 struct rte_eth_rxq_info *qinfo) 1762 { 1763 struct bnxt_rx_queue *rxq; 1764 1765 rxq = dev->data->rx_queues[queue_id]; 1766 1767 qinfo->mp = rxq->mb_pool; 1768 qinfo->scattered_rx = dev->data->scattered_rx; 1769 qinfo->nb_desc = rxq->nb_rx_desc; 1770 1771 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1772 qinfo->conf.rx_drop_en = 0; 1773 qinfo->conf.rx_deferred_start = 0; 1774 } 1775 1776 static void 1777 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1778 struct rte_eth_txq_info *qinfo) 1779 { 1780 struct bnxt_tx_queue *txq; 1781 1782 txq = dev->data->tx_queues[queue_id]; 1783 1784 qinfo->nb_desc = txq->nb_tx_desc; 1785 1786 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 1787 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 1788 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 1789 1790 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1791 qinfo->conf.tx_rs_thresh = 0; 1792 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1793 } 1794 1795 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 1796 { 1797 struct bnxt *bp = eth_dev->data->dev_private; 1798 struct rte_eth_dev_info dev_info; 1799 uint32_t new_pkt_size; 1800 uint32_t rc = 0; 1801 uint32_t i; 1802 1803 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 1804 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 1805 1806 bnxt_dev_info_get_op(eth_dev, &dev_info); 1807 1808 if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) { 1809 PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n", 1810 RTE_ETHER_MIN_MTU, BNXT_MAX_MTU); 1811 return -EINVAL; 1812 } 1813 1814 #ifdef RTE_ARCH_X86 1815 /* 1816 * If vector-mode tx/rx is active, disallow any MTU change that would 1817 * require scattered receive support. 1818 */ 1819 if (eth_dev->data->dev_started && 1820 (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec || 1821 eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) && 1822 (new_pkt_size > 1823 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 1824 PMD_DRV_LOG(ERR, 1825 "MTU change would require scattered rx support. "); 1826 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 1827 return -EINVAL; 1828 } 1829 #endif 1830 1831 if (new_mtu > RTE_ETHER_MTU) { 1832 bp->flags |= BNXT_FLAG_JUMBO; 1833 bp->eth_dev->data->dev_conf.rxmode.offloads |= 1834 DEV_RX_OFFLOAD_JUMBO_FRAME; 1835 } else { 1836 bp->eth_dev->data->dev_conf.rxmode.offloads &= 1837 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1838 bp->flags &= ~BNXT_FLAG_JUMBO; 1839 } 1840 1841 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 1842 1843 eth_dev->data->mtu = new_mtu; 1844 PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu); 1845 1846 for (i = 0; i < bp->nr_vnics; i++) { 1847 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1848 uint16_t size = 0; 1849 1850 vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + 1851 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1852 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 1853 if (rc) 1854 break; 1855 1856 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 1857 size -= RTE_PKTMBUF_HEADROOM; 1858 1859 if (size < new_mtu) { 1860 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 1861 if (rc) 1862 return rc; 1863 } 1864 } 1865 1866 return rc; 1867 } 1868 1869 static int 1870 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 1871 { 1872 struct bnxt *bp = dev->data->dev_private; 1873 uint16_t vlan = bp->vlan; 1874 int rc; 1875 1876 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 1877 PMD_DRV_LOG(ERR, 1878 "PVID cannot be modified for this function\n"); 1879 return -ENOTSUP; 1880 } 1881 bp->vlan = on ? pvid : 0; 1882 1883 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 1884 if (rc) 1885 bp->vlan = vlan; 1886 return rc; 1887 } 1888 1889 static int 1890 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 1891 { 1892 struct bnxt *bp = dev->data->dev_private; 1893 1894 return bnxt_hwrm_port_led_cfg(bp, true); 1895 } 1896 1897 static int 1898 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 1899 { 1900 struct bnxt *bp = dev->data->dev_private; 1901 1902 return bnxt_hwrm_port_led_cfg(bp, false); 1903 } 1904 1905 static uint32_t 1906 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1907 { 1908 uint32_t desc = 0, raw_cons = 0, cons; 1909 struct bnxt_cp_ring_info *cpr; 1910 struct bnxt_rx_queue *rxq; 1911 struct rx_pkt_cmpl *rxcmp; 1912 uint16_t cmp_type; 1913 uint8_t cmp = 1; 1914 bool valid; 1915 1916 rxq = dev->data->rx_queues[rx_queue_id]; 1917 cpr = rxq->cp_ring; 1918 valid = cpr->valid; 1919 1920 while (raw_cons < rxq->nb_rx_desc) { 1921 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1922 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1923 1924 if (!CMPL_VALID(rxcmp, valid)) 1925 goto nothing_to_do; 1926 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); 1927 cmp_type = CMP_TYPE(rxcmp); 1928 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1929 cmp = (rte_le_to_cpu_32( 1930 ((struct rx_tpa_end_cmpl *) 1931 (rxcmp))->agg_bufs_v1) & 1932 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 1933 RX_TPA_END_CMPL_AGG_BUFS_SFT; 1934 desc++; 1935 } else if (cmp_type == 0x11) { 1936 desc++; 1937 cmp = (rxcmp->agg_bufs_v1 & 1938 RX_PKT_CMPL_AGG_BUFS_MASK) >> 1939 RX_PKT_CMPL_AGG_BUFS_SFT; 1940 } else { 1941 cmp = 1; 1942 } 1943 nothing_to_do: 1944 raw_cons += cmp ? cmp : 2; 1945 } 1946 1947 return desc; 1948 } 1949 1950 static int 1951 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 1952 { 1953 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 1954 struct bnxt_rx_ring_info *rxr; 1955 struct bnxt_cp_ring_info *cpr; 1956 struct bnxt_sw_rx_bd *rx_buf; 1957 struct rx_pkt_cmpl *rxcmp; 1958 uint32_t cons, cp_cons; 1959 1960 if (!rxq) 1961 return -EINVAL; 1962 1963 cpr = rxq->cp_ring; 1964 rxr = rxq->rx_ring; 1965 1966 if (offset >= rxq->nb_rx_desc) 1967 return -EINVAL; 1968 1969 cons = RING_CMP(cpr->cp_ring_struct, offset); 1970 cp_cons = cpr->cp_raw_cons; 1971 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1972 1973 if (cons > cp_cons) { 1974 if (CMPL_VALID(rxcmp, cpr->valid)) 1975 return RTE_ETH_RX_DESC_DONE; 1976 } else { 1977 if (CMPL_VALID(rxcmp, !cpr->valid)) 1978 return RTE_ETH_RX_DESC_DONE; 1979 } 1980 rx_buf = &rxr->rx_buf_ring[cons]; 1981 if (rx_buf->mbuf == NULL) 1982 return RTE_ETH_RX_DESC_UNAVAIL; 1983 1984 1985 return RTE_ETH_RX_DESC_AVAIL; 1986 } 1987 1988 static int 1989 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 1990 { 1991 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 1992 struct bnxt_tx_ring_info *txr; 1993 struct bnxt_cp_ring_info *cpr; 1994 struct bnxt_sw_tx_bd *tx_buf; 1995 struct tx_pkt_cmpl *txcmp; 1996 uint32_t cons, cp_cons; 1997 1998 if (!txq) 1999 return -EINVAL; 2000 2001 cpr = txq->cp_ring; 2002 txr = txq->tx_ring; 2003 2004 if (offset >= txq->nb_tx_desc) 2005 return -EINVAL; 2006 2007 cons = RING_CMP(cpr->cp_ring_struct, offset); 2008 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2009 cp_cons = cpr->cp_raw_cons; 2010 2011 if (cons > cp_cons) { 2012 if (CMPL_VALID(txcmp, cpr->valid)) 2013 return RTE_ETH_TX_DESC_UNAVAIL; 2014 } else { 2015 if (CMPL_VALID(txcmp, !cpr->valid)) 2016 return RTE_ETH_TX_DESC_UNAVAIL; 2017 } 2018 tx_buf = &txr->tx_buf_ring[cons]; 2019 if (tx_buf->mbuf == NULL) 2020 return RTE_ETH_TX_DESC_DONE; 2021 2022 return RTE_ETH_TX_DESC_FULL; 2023 } 2024 2025 static struct bnxt_filter_info * 2026 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 2027 struct rte_eth_ethertype_filter *efilter, 2028 struct bnxt_vnic_info *vnic0, 2029 struct bnxt_vnic_info *vnic, 2030 int *ret) 2031 { 2032 struct bnxt_filter_info *mfilter = NULL; 2033 int match = 0; 2034 *ret = 0; 2035 2036 if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 || 2037 efilter->ether_type == RTE_ETHER_TYPE_IPV6) { 2038 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" 2039 " ethertype filter.", efilter->ether_type); 2040 *ret = -EINVAL; 2041 goto exit; 2042 } 2043 if (efilter->queue >= bp->rx_nr_rings) { 2044 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2045 *ret = -EINVAL; 2046 goto exit; 2047 } 2048 2049 vnic0 = &bp->vnic_info[0]; 2050 vnic = &bp->vnic_info[efilter->queue]; 2051 if (vnic == NULL) { 2052 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); 2053 *ret = -EINVAL; 2054 goto exit; 2055 } 2056 2057 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 2058 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 2059 if ((!memcmp(efilter->mac_addr.addr_bytes, 2060 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 2061 mfilter->flags == 2062 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 2063 mfilter->ethertype == efilter->ether_type)) { 2064 match = 1; 2065 break; 2066 } 2067 } 2068 } else { 2069 STAILQ_FOREACH(mfilter, &vnic->filter, next) 2070 if ((!memcmp(efilter->mac_addr.addr_bytes, 2071 mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && 2072 mfilter->ethertype == efilter->ether_type && 2073 mfilter->flags == 2074 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 2075 match = 1; 2076 break; 2077 } 2078 } 2079 2080 if (match) 2081 *ret = -EEXIST; 2082 2083 exit: 2084 return mfilter; 2085 } 2086 2087 static int 2088 bnxt_ethertype_filter(struct rte_eth_dev *dev, 2089 enum rte_filter_op filter_op, 2090 void *arg) 2091 { 2092 struct bnxt *bp = dev->data->dev_private; 2093 struct rte_eth_ethertype_filter *efilter = 2094 (struct rte_eth_ethertype_filter *)arg; 2095 struct bnxt_filter_info *bfilter, *filter1; 2096 struct bnxt_vnic_info *vnic, *vnic0; 2097 int ret; 2098 2099 if (filter_op == RTE_ETH_FILTER_NOP) 2100 return 0; 2101 2102 if (arg == NULL) { 2103 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 2104 filter_op); 2105 return -EINVAL; 2106 } 2107 2108 vnic0 = &bp->vnic_info[0]; 2109 vnic = &bp->vnic_info[efilter->queue]; 2110 2111 switch (filter_op) { 2112 case RTE_ETH_FILTER_ADD: 2113 bnxt_match_and_validate_ether_filter(bp, efilter, 2114 vnic0, vnic, &ret); 2115 if (ret < 0) 2116 return ret; 2117 2118 bfilter = bnxt_get_unused_filter(bp); 2119 if (bfilter == NULL) { 2120 PMD_DRV_LOG(ERR, 2121 "Not enough resources for a new filter.\n"); 2122 return -ENOMEM; 2123 } 2124 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2125 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 2126 RTE_ETHER_ADDR_LEN); 2127 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 2128 RTE_ETHER_ADDR_LEN); 2129 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2130 bfilter->ethertype = efilter->ether_type; 2131 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2132 2133 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 2134 if (filter1 == NULL) { 2135 ret = -1; 2136 goto cleanup; 2137 } 2138 bfilter->enables |= 2139 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2140 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2141 2142 bfilter->dst_id = vnic->fw_vnic_id; 2143 2144 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 2145 bfilter->flags = 2146 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2147 } 2148 2149 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2150 if (ret) 2151 goto cleanup; 2152 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2153 break; 2154 case RTE_ETH_FILTER_DELETE: 2155 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 2156 vnic0, vnic, &ret); 2157 if (ret == -EEXIST) { 2158 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 2159 2160 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 2161 next); 2162 bnxt_free_filter(bp, filter1); 2163 } else if (ret == 0) { 2164 PMD_DRV_LOG(ERR, "No matching filter found\n"); 2165 } 2166 break; 2167 default: 2168 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 2169 ret = -EINVAL; 2170 goto error; 2171 } 2172 return ret; 2173 cleanup: 2174 bnxt_free_filter(bp, bfilter); 2175 error: 2176 return ret; 2177 } 2178 2179 static inline int 2180 parse_ntuple_filter(struct bnxt *bp, 2181 struct rte_eth_ntuple_filter *nfilter, 2182 struct bnxt_filter_info *bfilter) 2183 { 2184 uint32_t en = 0; 2185 2186 if (nfilter->queue >= bp->rx_nr_rings) { 2187 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); 2188 return -EINVAL; 2189 } 2190 2191 switch (nfilter->dst_port_mask) { 2192 case UINT16_MAX: 2193 bfilter->dst_port_mask = -1; 2194 bfilter->dst_port = nfilter->dst_port; 2195 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 2196 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2197 break; 2198 default: 2199 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 2200 return -EINVAL; 2201 } 2202 2203 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2204 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2205 2206 switch (nfilter->proto_mask) { 2207 case UINT8_MAX: 2208 if (nfilter->proto == 17) /* IPPROTO_UDP */ 2209 bfilter->ip_protocol = 17; 2210 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 2211 bfilter->ip_protocol = 6; 2212 else 2213 return -EINVAL; 2214 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2215 break; 2216 default: 2217 PMD_DRV_LOG(ERR, "invalid protocol mask."); 2218 return -EINVAL; 2219 } 2220 2221 switch (nfilter->dst_ip_mask) { 2222 case UINT32_MAX: 2223 bfilter->dst_ipaddr_mask[0] = -1; 2224 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 2225 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 2226 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2227 break; 2228 default: 2229 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 2230 return -EINVAL; 2231 } 2232 2233 switch (nfilter->src_ip_mask) { 2234 case UINT32_MAX: 2235 bfilter->src_ipaddr_mask[0] = -1; 2236 bfilter->src_ipaddr[0] = nfilter->src_ip; 2237 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 2238 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2239 break; 2240 default: 2241 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 2242 return -EINVAL; 2243 } 2244 2245 switch (nfilter->src_port_mask) { 2246 case UINT16_MAX: 2247 bfilter->src_port_mask = -1; 2248 bfilter->src_port = nfilter->src_port; 2249 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 2250 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2251 break; 2252 default: 2253 PMD_DRV_LOG(ERR, "invalid src_port mask."); 2254 return -EINVAL; 2255 } 2256 2257 //TODO Priority 2258 //nfilter->priority = (uint8_t)filter->priority; 2259 2260 bfilter->enables = en; 2261 return 0; 2262 } 2263 2264 static struct bnxt_filter_info* 2265 bnxt_match_ntuple_filter(struct bnxt *bp, 2266 struct bnxt_filter_info *bfilter, 2267 struct bnxt_vnic_info **mvnic) 2268 { 2269 struct bnxt_filter_info *mfilter = NULL; 2270 int i; 2271 2272 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2273 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2274 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 2275 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 2276 bfilter->src_ipaddr_mask[0] == 2277 mfilter->src_ipaddr_mask[0] && 2278 bfilter->src_port == mfilter->src_port && 2279 bfilter->src_port_mask == mfilter->src_port_mask && 2280 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 2281 bfilter->dst_ipaddr_mask[0] == 2282 mfilter->dst_ipaddr_mask[0] && 2283 bfilter->dst_port == mfilter->dst_port && 2284 bfilter->dst_port_mask == mfilter->dst_port_mask && 2285 bfilter->flags == mfilter->flags && 2286 bfilter->enables == mfilter->enables) { 2287 if (mvnic) 2288 *mvnic = vnic; 2289 return mfilter; 2290 } 2291 } 2292 } 2293 return NULL; 2294 } 2295 2296 static int 2297 bnxt_cfg_ntuple_filter(struct bnxt *bp, 2298 struct rte_eth_ntuple_filter *nfilter, 2299 enum rte_filter_op filter_op) 2300 { 2301 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 2302 struct bnxt_vnic_info *vnic, *vnic0, *mvnic; 2303 int ret; 2304 2305 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 2306 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 2307 return -EINVAL; 2308 } 2309 2310 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 2311 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); 2312 return -EINVAL; 2313 } 2314 2315 bfilter = bnxt_get_unused_filter(bp); 2316 if (bfilter == NULL) { 2317 PMD_DRV_LOG(ERR, 2318 "Not enough resources for a new filter.\n"); 2319 return -ENOMEM; 2320 } 2321 ret = parse_ntuple_filter(bp, nfilter, bfilter); 2322 if (ret < 0) 2323 goto free_filter; 2324 2325 vnic = &bp->vnic_info[nfilter->queue]; 2326 vnic0 = &bp->vnic_info[0]; 2327 filter1 = STAILQ_FIRST(&vnic0->filter); 2328 if (filter1 == NULL) { 2329 ret = -1; 2330 goto free_filter; 2331 } 2332 2333 bfilter->dst_id = vnic->fw_vnic_id; 2334 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2335 bfilter->enables |= 2336 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2337 bfilter->ethertype = 0x800; 2338 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2339 2340 mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); 2341 2342 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2343 bfilter->dst_id == mfilter->dst_id) { 2344 PMD_DRV_LOG(ERR, "filter exists.\n"); 2345 ret = -EEXIST; 2346 goto free_filter; 2347 } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2348 bfilter->dst_id != mfilter->dst_id) { 2349 mfilter->dst_id = vnic->fw_vnic_id; 2350 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); 2351 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); 2352 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); 2353 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); 2354 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); 2355 goto free_filter; 2356 } 2357 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2358 PMD_DRV_LOG(ERR, "filter doesn't exist."); 2359 ret = -ENOENT; 2360 goto free_filter; 2361 } 2362 2363 if (filter_op == RTE_ETH_FILTER_ADD) { 2364 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2365 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2366 if (ret) 2367 goto free_filter; 2368 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2369 } else { 2370 if (mfilter == NULL) { 2371 /* This should not happen. But for Coverity! */ 2372 ret = -ENOENT; 2373 goto free_filter; 2374 } 2375 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 2376 2377 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); 2378 bnxt_free_filter(bp, mfilter); 2379 mfilter->fw_l2_filter_id = -1; 2380 bnxt_free_filter(bp, bfilter); 2381 bfilter->fw_l2_filter_id = -1; 2382 } 2383 2384 return 0; 2385 free_filter: 2386 bfilter->fw_l2_filter_id = -1; 2387 bnxt_free_filter(bp, bfilter); 2388 return ret; 2389 } 2390 2391 static int 2392 bnxt_ntuple_filter(struct rte_eth_dev *dev, 2393 enum rte_filter_op filter_op, 2394 void *arg) 2395 { 2396 struct bnxt *bp = dev->data->dev_private; 2397 int ret; 2398 2399 if (filter_op == RTE_ETH_FILTER_NOP) 2400 return 0; 2401 2402 if (arg == NULL) { 2403 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 2404 filter_op); 2405 return -EINVAL; 2406 } 2407 2408 switch (filter_op) { 2409 case RTE_ETH_FILTER_ADD: 2410 ret = bnxt_cfg_ntuple_filter(bp, 2411 (struct rte_eth_ntuple_filter *)arg, 2412 filter_op); 2413 break; 2414 case RTE_ETH_FILTER_DELETE: 2415 ret = bnxt_cfg_ntuple_filter(bp, 2416 (struct rte_eth_ntuple_filter *)arg, 2417 filter_op); 2418 break; 2419 default: 2420 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 2421 ret = -EINVAL; 2422 break; 2423 } 2424 return ret; 2425 } 2426 2427 static int 2428 bnxt_parse_fdir_filter(struct bnxt *bp, 2429 struct rte_eth_fdir_filter *fdir, 2430 struct bnxt_filter_info *filter) 2431 { 2432 enum rte_fdir_mode fdir_mode = 2433 bp->eth_dev->data->dev_conf.fdir_conf.mode; 2434 struct bnxt_vnic_info *vnic0, *vnic; 2435 struct bnxt_filter_info *filter1; 2436 uint32_t en = 0; 2437 int i; 2438 2439 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2440 return -EINVAL; 2441 2442 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 2443 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 2444 2445 switch (fdir->input.flow_type) { 2446 case RTE_ETH_FLOW_IPV4: 2447 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2448 /* FALLTHROUGH */ 2449 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 2450 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2451 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 2452 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2453 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 2454 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2455 filter->ip_addr_type = 2456 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2457 filter->src_ipaddr_mask[0] = 0xffffffff; 2458 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2459 filter->dst_ipaddr_mask[0] = 0xffffffff; 2460 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2461 filter->ethertype = 0x800; 2462 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2463 break; 2464 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2465 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 2466 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2467 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 2468 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2469 filter->dst_port_mask = 0xffff; 2470 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2471 filter->src_port_mask = 0xffff; 2472 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2473 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 2474 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2475 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 2476 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2477 filter->ip_protocol = 6; 2478 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2479 filter->ip_addr_type = 2480 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2481 filter->src_ipaddr_mask[0] = 0xffffffff; 2482 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2483 filter->dst_ipaddr_mask[0] = 0xffffffff; 2484 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2485 filter->ethertype = 0x800; 2486 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2487 break; 2488 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2489 filter->src_port = fdir->input.flow.udp4_flow.src_port; 2490 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2491 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 2492 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2493 filter->dst_port_mask = 0xffff; 2494 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2495 filter->src_port_mask = 0xffff; 2496 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2497 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 2498 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2499 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 2500 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2501 filter->ip_protocol = 17; 2502 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2503 filter->ip_addr_type = 2504 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2505 filter->src_ipaddr_mask[0] = 0xffffffff; 2506 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2507 filter->dst_ipaddr_mask[0] = 0xffffffff; 2508 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2509 filter->ethertype = 0x800; 2510 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2511 break; 2512 case RTE_ETH_FLOW_IPV6: 2513 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2514 /* FALLTHROUGH */ 2515 filter->ip_addr_type = 2516 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2517 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 2518 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2519 rte_memcpy(filter->src_ipaddr, 2520 fdir->input.flow.ipv6_flow.src_ip, 16); 2521 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2522 rte_memcpy(filter->dst_ipaddr, 2523 fdir->input.flow.ipv6_flow.dst_ip, 16); 2524 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2525 memset(filter->dst_ipaddr_mask, 0xff, 16); 2526 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2527 memset(filter->src_ipaddr_mask, 0xff, 16); 2528 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2529 filter->ethertype = 0x86dd; 2530 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2531 break; 2532 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2533 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 2534 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2535 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 2536 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2537 filter->dst_port_mask = 0xffff; 2538 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2539 filter->src_port_mask = 0xffff; 2540 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2541 filter->ip_addr_type = 2542 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2543 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 2544 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2545 rte_memcpy(filter->src_ipaddr, 2546 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 2547 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2548 rte_memcpy(filter->dst_ipaddr, 2549 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 2550 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2551 memset(filter->dst_ipaddr_mask, 0xff, 16); 2552 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2553 memset(filter->src_ipaddr_mask, 0xff, 16); 2554 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2555 filter->ethertype = 0x86dd; 2556 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2557 break; 2558 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2559 filter->src_port = fdir->input.flow.udp6_flow.src_port; 2560 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2561 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 2562 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2563 filter->dst_port_mask = 0xffff; 2564 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2565 filter->src_port_mask = 0xffff; 2566 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2567 filter->ip_addr_type = 2568 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2569 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 2570 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2571 rte_memcpy(filter->src_ipaddr, 2572 fdir->input.flow.udp6_flow.ip.src_ip, 16); 2573 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2574 rte_memcpy(filter->dst_ipaddr, 2575 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 2576 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2577 memset(filter->dst_ipaddr_mask, 0xff, 16); 2578 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2579 memset(filter->src_ipaddr_mask, 0xff, 16); 2580 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2581 filter->ethertype = 0x86dd; 2582 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2583 break; 2584 case RTE_ETH_FLOW_L2_PAYLOAD: 2585 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 2586 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2587 break; 2588 case RTE_ETH_FLOW_VXLAN: 2589 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2590 return -EINVAL; 2591 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2592 filter->tunnel_type = 2593 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 2594 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2595 break; 2596 case RTE_ETH_FLOW_NVGRE: 2597 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2598 return -EINVAL; 2599 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2600 filter->tunnel_type = 2601 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 2602 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2603 break; 2604 case RTE_ETH_FLOW_UNKNOWN: 2605 case RTE_ETH_FLOW_RAW: 2606 case RTE_ETH_FLOW_FRAG_IPV4: 2607 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 2608 case RTE_ETH_FLOW_FRAG_IPV6: 2609 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 2610 case RTE_ETH_FLOW_IPV6_EX: 2611 case RTE_ETH_FLOW_IPV6_TCP_EX: 2612 case RTE_ETH_FLOW_IPV6_UDP_EX: 2613 case RTE_ETH_FLOW_GENEVE: 2614 /* FALLTHROUGH */ 2615 default: 2616 return -EINVAL; 2617 } 2618 2619 vnic0 = &bp->vnic_info[0]; 2620 vnic = &bp->vnic_info[fdir->action.rx_queue]; 2621 if (vnic == NULL) { 2622 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); 2623 return -EINVAL; 2624 } 2625 2626 2627 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2628 rte_memcpy(filter->dst_macaddr, 2629 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 2630 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2631 } 2632 2633 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 2634 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2635 filter1 = STAILQ_FIRST(&vnic0->filter); 2636 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 2637 } else { 2638 filter->dst_id = vnic->fw_vnic_id; 2639 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2640 if (filter->dst_macaddr[i] == 0x00) 2641 filter1 = STAILQ_FIRST(&vnic0->filter); 2642 else 2643 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 2644 } 2645 2646 if (filter1 == NULL) 2647 return -EINVAL; 2648 2649 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2650 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2651 2652 filter->enables = en; 2653 2654 return 0; 2655 } 2656 2657 static struct bnxt_filter_info * 2658 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, 2659 struct bnxt_vnic_info **mvnic) 2660 { 2661 struct bnxt_filter_info *mf = NULL; 2662 int i; 2663 2664 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2665 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2666 2667 STAILQ_FOREACH(mf, &vnic->filter, next) { 2668 if (mf->filter_type == nf->filter_type && 2669 mf->flags == nf->flags && 2670 mf->src_port == nf->src_port && 2671 mf->src_port_mask == nf->src_port_mask && 2672 mf->dst_port == nf->dst_port && 2673 mf->dst_port_mask == nf->dst_port_mask && 2674 mf->ip_protocol == nf->ip_protocol && 2675 mf->ip_addr_type == nf->ip_addr_type && 2676 mf->ethertype == nf->ethertype && 2677 mf->vni == nf->vni && 2678 mf->tunnel_type == nf->tunnel_type && 2679 mf->l2_ovlan == nf->l2_ovlan && 2680 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 2681 mf->l2_ivlan == nf->l2_ivlan && 2682 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 2683 !memcmp(mf->l2_addr, nf->l2_addr, 2684 RTE_ETHER_ADDR_LEN) && 2685 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 2686 RTE_ETHER_ADDR_LEN) && 2687 !memcmp(mf->src_macaddr, nf->src_macaddr, 2688 RTE_ETHER_ADDR_LEN) && 2689 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 2690 RTE_ETHER_ADDR_LEN) && 2691 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 2692 sizeof(nf->src_ipaddr)) && 2693 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 2694 sizeof(nf->src_ipaddr_mask)) && 2695 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 2696 sizeof(nf->dst_ipaddr)) && 2697 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 2698 sizeof(nf->dst_ipaddr_mask))) { 2699 if (mvnic) 2700 *mvnic = vnic; 2701 return mf; 2702 } 2703 } 2704 } 2705 return NULL; 2706 } 2707 2708 static int 2709 bnxt_fdir_filter(struct rte_eth_dev *dev, 2710 enum rte_filter_op filter_op, 2711 void *arg) 2712 { 2713 struct bnxt *bp = dev->data->dev_private; 2714 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 2715 struct bnxt_filter_info *filter, *match; 2716 struct bnxt_vnic_info *vnic, *mvnic; 2717 int ret = 0, i; 2718 2719 if (filter_op == RTE_ETH_FILTER_NOP) 2720 return 0; 2721 2722 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 2723 return -EINVAL; 2724 2725 switch (filter_op) { 2726 case RTE_ETH_FILTER_ADD: 2727 case RTE_ETH_FILTER_DELETE: 2728 /* FALLTHROUGH */ 2729 filter = bnxt_get_unused_filter(bp); 2730 if (filter == NULL) { 2731 PMD_DRV_LOG(ERR, 2732 "Not enough resources for a new flow.\n"); 2733 return -ENOMEM; 2734 } 2735 2736 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 2737 if (ret != 0) 2738 goto free_filter; 2739 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2740 2741 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2742 vnic = &bp->vnic_info[0]; 2743 else 2744 vnic = &bp->vnic_info[fdir->action.rx_queue]; 2745 2746 match = bnxt_match_fdir(bp, filter, &mvnic); 2747 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2748 if (match->dst_id == vnic->fw_vnic_id) { 2749 PMD_DRV_LOG(ERR, "Flow already exists.\n"); 2750 ret = -EEXIST; 2751 goto free_filter; 2752 } else { 2753 match->dst_id = vnic->fw_vnic_id; 2754 ret = bnxt_hwrm_set_ntuple_filter(bp, 2755 match->dst_id, 2756 match); 2757 STAILQ_REMOVE(&mvnic->filter, match, 2758 bnxt_filter_info, next); 2759 STAILQ_INSERT_TAIL(&vnic->filter, match, next); 2760 PMD_DRV_LOG(ERR, 2761 "Filter with matching pattern exist\n"); 2762 PMD_DRV_LOG(ERR, 2763 "Updated it to new destination q\n"); 2764 goto free_filter; 2765 } 2766 } 2767 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2768 PMD_DRV_LOG(ERR, "Flow does not exist.\n"); 2769 ret = -ENOENT; 2770 goto free_filter; 2771 } 2772 2773 if (filter_op == RTE_ETH_FILTER_ADD) { 2774 ret = bnxt_hwrm_set_ntuple_filter(bp, 2775 filter->dst_id, 2776 filter); 2777 if (ret) 2778 goto free_filter; 2779 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2780 } else { 2781 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 2782 STAILQ_REMOVE(&vnic->filter, match, 2783 bnxt_filter_info, next); 2784 bnxt_free_filter(bp, match); 2785 filter->fw_l2_filter_id = -1; 2786 bnxt_free_filter(bp, filter); 2787 } 2788 break; 2789 case RTE_ETH_FILTER_FLUSH: 2790 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2791 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2792 2793 STAILQ_FOREACH(filter, &vnic->filter, next) { 2794 if (filter->filter_type == 2795 HWRM_CFA_NTUPLE_FILTER) { 2796 ret = 2797 bnxt_hwrm_clear_ntuple_filter(bp, 2798 filter); 2799 STAILQ_REMOVE(&vnic->filter, filter, 2800 bnxt_filter_info, next); 2801 } 2802 } 2803 } 2804 return ret; 2805 case RTE_ETH_FILTER_UPDATE: 2806 case RTE_ETH_FILTER_STATS: 2807 case RTE_ETH_FILTER_INFO: 2808 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); 2809 break; 2810 default: 2811 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); 2812 ret = -EINVAL; 2813 break; 2814 } 2815 return ret; 2816 2817 free_filter: 2818 filter->fw_l2_filter_id = -1; 2819 bnxt_free_filter(bp, filter); 2820 return ret; 2821 } 2822 2823 static int 2824 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, 2825 enum rte_filter_type filter_type, 2826 enum rte_filter_op filter_op, void *arg) 2827 { 2828 int ret = 0; 2829 2830 switch (filter_type) { 2831 case RTE_ETH_FILTER_TUNNEL: 2832 PMD_DRV_LOG(ERR, 2833 "filter type: %d: To be implemented\n", filter_type); 2834 break; 2835 case RTE_ETH_FILTER_FDIR: 2836 ret = bnxt_fdir_filter(dev, filter_op, arg); 2837 break; 2838 case RTE_ETH_FILTER_NTUPLE: 2839 ret = bnxt_ntuple_filter(dev, filter_op, arg); 2840 break; 2841 case RTE_ETH_FILTER_ETHERTYPE: 2842 ret = bnxt_ethertype_filter(dev, filter_op, arg); 2843 break; 2844 case RTE_ETH_FILTER_GENERIC: 2845 if (filter_op != RTE_ETH_FILTER_GET) 2846 return -EINVAL; 2847 *(const void **)arg = &bnxt_flow_ops; 2848 break; 2849 default: 2850 PMD_DRV_LOG(ERR, 2851 "Filter type (%d) not supported", filter_type); 2852 ret = -EINVAL; 2853 break; 2854 } 2855 return ret; 2856 } 2857 2858 static const uint32_t * 2859 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 2860 { 2861 static const uint32_t ptypes[] = { 2862 RTE_PTYPE_L2_ETHER_VLAN, 2863 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2864 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2865 RTE_PTYPE_L4_ICMP, 2866 RTE_PTYPE_L4_TCP, 2867 RTE_PTYPE_L4_UDP, 2868 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2869 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2870 RTE_PTYPE_INNER_L4_ICMP, 2871 RTE_PTYPE_INNER_L4_TCP, 2872 RTE_PTYPE_INNER_L4_UDP, 2873 RTE_PTYPE_UNKNOWN 2874 }; 2875 2876 if (!dev->rx_pkt_burst) 2877 return NULL; 2878 2879 return ptypes; 2880 } 2881 2882 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 2883 int reg_win) 2884 { 2885 uint32_t reg_base = *reg_arr & 0xfffff000; 2886 uint32_t win_off; 2887 int i; 2888 2889 for (i = 0; i < count; i++) { 2890 if ((reg_arr[i] & 0xfffff000) != reg_base) 2891 return -ERANGE; 2892 } 2893 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 2894 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 2895 return 0; 2896 } 2897 2898 static int bnxt_map_ptp_regs(struct bnxt *bp) 2899 { 2900 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2901 uint32_t *reg_arr; 2902 int rc, i; 2903 2904 reg_arr = ptp->rx_regs; 2905 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 2906 if (rc) 2907 return rc; 2908 2909 reg_arr = ptp->tx_regs; 2910 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 2911 if (rc) 2912 return rc; 2913 2914 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 2915 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 2916 2917 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 2918 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 2919 2920 return 0; 2921 } 2922 2923 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 2924 { 2925 rte_write32(0, (uint8_t *)bp->bar0 + 2926 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 2927 rte_write32(0, (uint8_t *)bp->bar0 + 2928 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 2929 } 2930 2931 static uint64_t bnxt_cc_read(struct bnxt *bp) 2932 { 2933 uint64_t ns; 2934 2935 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2936 BNXT_GRCPF_REG_SYNC_TIME)); 2937 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2938 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 2939 return ns; 2940 } 2941 2942 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 2943 { 2944 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2945 uint32_t fifo; 2946 2947 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2948 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2949 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 2950 return -EAGAIN; 2951 2952 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2953 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2954 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2955 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 2956 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2957 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 2958 2959 return 0; 2960 } 2961 2962 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 2963 { 2964 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2965 struct bnxt_pf_info *pf = &bp->pf; 2966 uint16_t port_id; 2967 uint32_t fifo; 2968 2969 if (!ptp) 2970 return -ENODEV; 2971 2972 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2973 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 2974 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 2975 return -EAGAIN; 2976 2977 port_id = pf->port_id; 2978 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 2979 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 2980 2981 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2982 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 2983 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 2984 /* bnxt_clr_rx_ts(bp); TBD */ 2985 return -EBUSY; 2986 } 2987 2988 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2989 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 2990 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2991 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 2992 2993 return 0; 2994 } 2995 2996 static int 2997 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 2998 { 2999 uint64_t ns; 3000 struct bnxt *bp = dev->data->dev_private; 3001 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3002 3003 if (!ptp) 3004 return 0; 3005 3006 ns = rte_timespec_to_ns(ts); 3007 /* Set the timecounters to a new value. */ 3008 ptp->tc.nsec = ns; 3009 3010 return 0; 3011 } 3012 3013 static int 3014 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3015 { 3016 uint64_t ns, systime_cycles; 3017 struct bnxt *bp = dev->data->dev_private; 3018 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3019 3020 if (!ptp) 3021 return 0; 3022 3023 systime_cycles = bnxt_cc_read(bp); 3024 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3025 *ts = rte_ns_to_timespec(ns); 3026 3027 return 0; 3028 } 3029 static int 3030 bnxt_timesync_enable(struct rte_eth_dev *dev) 3031 { 3032 struct bnxt *bp = dev->data->dev_private; 3033 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3034 uint32_t shift = 0; 3035 3036 if (!ptp) 3037 return 0; 3038 3039 ptp->rx_filter = 1; 3040 ptp->tx_tstamp_en = 1; 3041 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3042 3043 if (!bnxt_hwrm_ptp_cfg(bp)) 3044 bnxt_map_ptp_regs(bp); 3045 3046 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3047 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3048 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3049 3050 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3051 ptp->tc.cc_shift = shift; 3052 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3053 3054 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3055 ptp->rx_tstamp_tc.cc_shift = shift; 3056 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3057 3058 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3059 ptp->tx_tstamp_tc.cc_shift = shift; 3060 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3061 3062 return 0; 3063 } 3064 3065 static int 3066 bnxt_timesync_disable(struct rte_eth_dev *dev) 3067 { 3068 struct bnxt *bp = dev->data->dev_private; 3069 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3070 3071 if (!ptp) 3072 return 0; 3073 3074 ptp->rx_filter = 0; 3075 ptp->tx_tstamp_en = 0; 3076 ptp->rxctl = 0; 3077 3078 bnxt_hwrm_ptp_cfg(bp); 3079 3080 bnxt_unmap_ptp_regs(bp); 3081 3082 return 0; 3083 } 3084 3085 static int 3086 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3087 struct timespec *timestamp, 3088 uint32_t flags __rte_unused) 3089 { 3090 struct bnxt *bp = dev->data->dev_private; 3091 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3092 uint64_t rx_tstamp_cycles = 0; 3093 uint64_t ns; 3094 3095 if (!ptp) 3096 return 0; 3097 3098 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3099 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3100 *timestamp = rte_ns_to_timespec(ns); 3101 return 0; 3102 } 3103 3104 static int 3105 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3106 struct timespec *timestamp) 3107 { 3108 struct bnxt *bp = dev->data->dev_private; 3109 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3110 uint64_t tx_tstamp_cycles = 0; 3111 uint64_t ns; 3112 3113 if (!ptp) 3114 return 0; 3115 3116 bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3117 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3118 *timestamp = rte_ns_to_timespec(ns); 3119 3120 return 0; 3121 } 3122 3123 static int 3124 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3125 { 3126 struct bnxt *bp = dev->data->dev_private; 3127 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3128 3129 if (!ptp) 3130 return 0; 3131 3132 ptp->tc.nsec += delta; 3133 3134 return 0; 3135 } 3136 3137 static int 3138 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3139 { 3140 struct bnxt *bp = dev->data->dev_private; 3141 int rc; 3142 uint32_t dir_entries; 3143 uint32_t entry_length; 3144 3145 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n", 3146 bp->pdev->addr.domain, bp->pdev->addr.bus, 3147 bp->pdev->addr.devid, bp->pdev->addr.function); 3148 3149 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3150 if (rc != 0) 3151 return rc; 3152 3153 return dir_entries * entry_length; 3154 } 3155 3156 static int 3157 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3158 struct rte_dev_eeprom_info *in_eeprom) 3159 { 3160 struct bnxt *bp = dev->data->dev_private; 3161 uint32_t index; 3162 uint32_t offset; 3163 3164 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " 3165 "len = %d\n", bp->pdev->addr.domain, 3166 bp->pdev->addr.bus, bp->pdev->addr.devid, 3167 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 3168 3169 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3170 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3171 in_eeprom->data); 3172 3173 index = in_eeprom->offset >> 24; 3174 offset = in_eeprom->offset & 0xffffff; 3175 3176 if (index != 0) 3177 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3178 in_eeprom->length, in_eeprom->data); 3179 3180 return 0; 3181 } 3182 3183 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3184 { 3185 switch (dir_type) { 3186 case BNX_DIR_TYPE_CHIMP_PATCH: 3187 case BNX_DIR_TYPE_BOOTCODE: 3188 case BNX_DIR_TYPE_BOOTCODE_2: 3189 case BNX_DIR_TYPE_APE_FW: 3190 case BNX_DIR_TYPE_APE_PATCH: 3191 case BNX_DIR_TYPE_KONG_FW: 3192 case BNX_DIR_TYPE_KONG_PATCH: 3193 case BNX_DIR_TYPE_BONO_FW: 3194 case BNX_DIR_TYPE_BONO_PATCH: 3195 /* FALLTHROUGH */ 3196 return true; 3197 } 3198 3199 return false; 3200 } 3201 3202 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3203 { 3204 switch (dir_type) { 3205 case BNX_DIR_TYPE_AVS: 3206 case BNX_DIR_TYPE_EXP_ROM_MBA: 3207 case BNX_DIR_TYPE_PCIE: 3208 case BNX_DIR_TYPE_TSCF_UCODE: 3209 case BNX_DIR_TYPE_EXT_PHY: 3210 case BNX_DIR_TYPE_CCM: 3211 case BNX_DIR_TYPE_ISCSI_BOOT: 3212 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3213 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3214 /* FALLTHROUGH */ 3215 return true; 3216 } 3217 3218 return false; 3219 } 3220 3221 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3222 { 3223 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3224 bnxt_dir_type_is_other_exec_format(dir_type); 3225 } 3226 3227 static int 3228 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3229 struct rte_dev_eeprom_info *in_eeprom) 3230 { 3231 struct bnxt *bp = dev->data->dev_private; 3232 uint8_t index, dir_op; 3233 uint16_t type, ext, ordinal, attr; 3234 3235 PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " 3236 "len = %d\n", bp->pdev->addr.domain, 3237 bp->pdev->addr.bus, bp->pdev->addr.devid, 3238 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 3239 3240 if (!BNXT_PF(bp)) { 3241 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3242 return -EINVAL; 3243 } 3244 3245 type = in_eeprom->magic >> 16; 3246 3247 if (type == 0xffff) { /* special value for directory operations */ 3248 index = in_eeprom->magic & 0xff; 3249 dir_op = in_eeprom->magic >> 8; 3250 if (index == 0) 3251 return -EINVAL; 3252 switch (dir_op) { 3253 case 0x0e: /* erase */ 3254 if (in_eeprom->offset != ~in_eeprom->magic) 3255 return -EINVAL; 3256 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3257 default: 3258 return -EINVAL; 3259 } 3260 } 3261 3262 /* Create or re-write an NVM item: */ 3263 if (bnxt_dir_type_is_executable(type) == true) 3264 return -EOPNOTSUPP; 3265 ext = in_eeprom->magic & 0xffff; 3266 ordinal = in_eeprom->offset >> 16; 3267 attr = in_eeprom->offset & 0xffff; 3268 3269 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3270 in_eeprom->data, in_eeprom->length); 3271 return 0; 3272 } 3273 3274 /* 3275 * Initialization 3276 */ 3277 3278 static const struct eth_dev_ops bnxt_dev_ops = { 3279 .dev_infos_get = bnxt_dev_info_get_op, 3280 .dev_close = bnxt_dev_close_op, 3281 .dev_configure = bnxt_dev_configure_op, 3282 .dev_start = bnxt_dev_start_op, 3283 .dev_stop = bnxt_dev_stop_op, 3284 .dev_set_link_up = bnxt_dev_set_link_up_op, 3285 .dev_set_link_down = bnxt_dev_set_link_down_op, 3286 .stats_get = bnxt_stats_get_op, 3287 .stats_reset = bnxt_stats_reset_op, 3288 .rx_queue_setup = bnxt_rx_queue_setup_op, 3289 .rx_queue_release = bnxt_rx_queue_release_op, 3290 .tx_queue_setup = bnxt_tx_queue_setup_op, 3291 .tx_queue_release = bnxt_tx_queue_release_op, 3292 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 3293 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 3294 .reta_update = bnxt_reta_update_op, 3295 .reta_query = bnxt_reta_query_op, 3296 .rss_hash_update = bnxt_rss_hash_update_op, 3297 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 3298 .link_update = bnxt_link_update_op, 3299 .promiscuous_enable = bnxt_promiscuous_enable_op, 3300 .promiscuous_disable = bnxt_promiscuous_disable_op, 3301 .allmulticast_enable = bnxt_allmulticast_enable_op, 3302 .allmulticast_disable = bnxt_allmulticast_disable_op, 3303 .mac_addr_add = bnxt_mac_addr_add_op, 3304 .mac_addr_remove = bnxt_mac_addr_remove_op, 3305 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 3306 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 3307 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 3308 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 3309 .vlan_filter_set = bnxt_vlan_filter_set_op, 3310 .vlan_offload_set = bnxt_vlan_offload_set_op, 3311 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 3312 .mtu_set = bnxt_mtu_set_op, 3313 .mac_addr_set = bnxt_set_default_mac_addr_op, 3314 .xstats_get = bnxt_dev_xstats_get_op, 3315 .xstats_get_names = bnxt_dev_xstats_get_names_op, 3316 .xstats_reset = bnxt_dev_xstats_reset_op, 3317 .fw_version_get = bnxt_fw_version_get, 3318 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 3319 .rxq_info_get = bnxt_rxq_info_get_op, 3320 .txq_info_get = bnxt_txq_info_get_op, 3321 .dev_led_on = bnxt_dev_led_on_op, 3322 .dev_led_off = bnxt_dev_led_off_op, 3323 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 3324 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 3325 .rx_queue_count = bnxt_rx_queue_count_op, 3326 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 3327 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 3328 .rx_queue_start = bnxt_rx_queue_start, 3329 .rx_queue_stop = bnxt_rx_queue_stop, 3330 .tx_queue_start = bnxt_tx_queue_start, 3331 .tx_queue_stop = bnxt_tx_queue_stop, 3332 .filter_ctrl = bnxt_filter_ctrl_op, 3333 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 3334 .get_eeprom_length = bnxt_get_eeprom_length_op, 3335 .get_eeprom = bnxt_get_eeprom_op, 3336 .set_eeprom = bnxt_set_eeprom_op, 3337 .timesync_enable = bnxt_timesync_enable, 3338 .timesync_disable = bnxt_timesync_disable, 3339 .timesync_read_time = bnxt_timesync_read_time, 3340 .timesync_write_time = bnxt_timesync_write_time, 3341 .timesync_adjust_time = bnxt_timesync_adjust_time, 3342 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 3343 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 3344 }; 3345 3346 static bool bnxt_vf_pciid(uint16_t id) 3347 { 3348 if (id == BROADCOM_DEV_ID_57304_VF || 3349 id == BROADCOM_DEV_ID_57406_VF || 3350 id == BROADCOM_DEV_ID_5731X_VF || 3351 id == BROADCOM_DEV_ID_5741X_VF || 3352 id == BROADCOM_DEV_ID_57414_VF || 3353 id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || 3354 id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 || 3355 id == BROADCOM_DEV_ID_58802_VF || 3356 id == BROADCOM_DEV_ID_57500_VF) 3357 return true; 3358 return false; 3359 } 3360 3361 bool bnxt_stratus_device(struct bnxt *bp) 3362 { 3363 uint16_t id = bp->pdev->id.device_id; 3364 3365 if (id == BROADCOM_DEV_ID_STRATUS_NIC || 3366 id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || 3367 id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) 3368 return true; 3369 return false; 3370 } 3371 3372 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 3373 { 3374 struct bnxt *bp = eth_dev->data->dev_private; 3375 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3376 int rc; 3377 3378 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 3379 if (!pci_dev->mem_resource[0].addr) { 3380 PMD_DRV_LOG(ERR, 3381 "Cannot find PCI device base address, aborting\n"); 3382 rc = -ENODEV; 3383 goto init_err_disable; 3384 } 3385 3386 bp->eth_dev = eth_dev; 3387 bp->pdev = pci_dev; 3388 3389 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 3390 if (!bp->bar0) { 3391 PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n"); 3392 rc = -ENOMEM; 3393 goto init_err_release; 3394 } 3395 3396 if (!pci_dev->mem_resource[2].addr) { 3397 PMD_DRV_LOG(ERR, 3398 "Cannot find PCI device BAR 2 address, aborting\n"); 3399 rc = -ENODEV; 3400 goto init_err_release; 3401 } else { 3402 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 3403 } 3404 3405 return 0; 3406 3407 init_err_release: 3408 if (bp->bar0) 3409 bp->bar0 = NULL; 3410 if (bp->doorbell_base) 3411 bp->doorbell_base = NULL; 3412 3413 init_err_disable: 3414 3415 return rc; 3416 } 3417 3418 static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, 3419 struct bnxt_ctx_pg_info *ctx_pg, 3420 uint32_t mem_size, 3421 const char *suffix, 3422 uint16_t idx) 3423 { 3424 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 3425 const struct rte_memzone *mz = NULL; 3426 char mz_name[RTE_MEMZONE_NAMESIZE]; 3427 rte_iova_t mz_phys_addr; 3428 uint64_t valid_bits = 0; 3429 uint32_t sz; 3430 int i; 3431 3432 if (!mem_size) 3433 return 0; 3434 3435 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 3436 BNXT_PAGE_SIZE; 3437 rmem->page_size = BNXT_PAGE_SIZE; 3438 rmem->pg_arr = ctx_pg->ctx_pg_arr; 3439 rmem->dma_arr = ctx_pg->ctx_dma_arr; 3440 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 3441 3442 valid_bits = PTU_PTE_VALID; 3443 3444 if (rmem->nr_pages > 1) { 3445 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_tbl%s_%x", 3446 suffix, idx); 3447 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3448 mz = rte_memzone_lookup(mz_name); 3449 if (!mz) { 3450 mz = rte_memzone_reserve_aligned(mz_name, 3451 rmem->nr_pages * 8, 3452 SOCKET_ID_ANY, 3453 RTE_MEMZONE_2MB | 3454 RTE_MEMZONE_SIZE_HINT_ONLY | 3455 RTE_MEMZONE_IOVA_CONTIG, 3456 BNXT_PAGE_SIZE); 3457 if (mz == NULL) 3458 return -ENOMEM; 3459 } 3460 3461 memset(mz->addr, 0, mz->len); 3462 mz_phys_addr = mz->iova; 3463 if ((unsigned long)mz->addr == mz_phys_addr) { 3464 PMD_DRV_LOG(WARNING, 3465 "Memzone physical address same as virtual.\n"); 3466 PMD_DRV_LOG(WARNING, 3467 "Using rte_mem_virt2iova()\n"); 3468 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3469 if (mz_phys_addr == 0) { 3470 PMD_DRV_LOG(ERR, 3471 "unable to map addr to phys memory\n"); 3472 return -ENOMEM; 3473 } 3474 } 3475 rte_mem_lock_page(((char *)mz->addr)); 3476 3477 rmem->pg_tbl = mz->addr; 3478 rmem->pg_tbl_map = mz_phys_addr; 3479 rmem->pg_tbl_mz = mz; 3480 } 3481 3482 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x", suffix, idx); 3483 mz = rte_memzone_lookup(mz_name); 3484 if (!mz) { 3485 mz = rte_memzone_reserve_aligned(mz_name, 3486 mem_size, 3487 SOCKET_ID_ANY, 3488 RTE_MEMZONE_1GB | 3489 RTE_MEMZONE_SIZE_HINT_ONLY | 3490 RTE_MEMZONE_IOVA_CONTIG, 3491 BNXT_PAGE_SIZE); 3492 if (mz == NULL) 3493 return -ENOMEM; 3494 } 3495 3496 memset(mz->addr, 0, mz->len); 3497 mz_phys_addr = mz->iova; 3498 if ((unsigned long)mz->addr == mz_phys_addr) { 3499 PMD_DRV_LOG(WARNING, 3500 "Memzone physical address same as virtual.\n"); 3501 PMD_DRV_LOG(WARNING, 3502 "Using rte_mem_virt2iova()\n"); 3503 for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE) 3504 rte_mem_lock_page(((char *)mz->addr) + sz); 3505 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3506 if (mz_phys_addr == RTE_BAD_IOVA) { 3507 PMD_DRV_LOG(ERR, 3508 "unable to map addr to phys memory\n"); 3509 return -ENOMEM; 3510 } 3511 } 3512 3513 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 3514 rte_mem_lock_page(((char *)mz->addr) + sz); 3515 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 3516 rmem->dma_arr[i] = mz_phys_addr + sz; 3517 3518 if (rmem->nr_pages > 1) { 3519 if (i == rmem->nr_pages - 2 && 3520 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3521 valid_bits |= PTU_PTE_NEXT_TO_LAST; 3522 else if (i == rmem->nr_pages - 1 && 3523 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3524 valid_bits |= PTU_PTE_LAST; 3525 3526 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 3527 valid_bits); 3528 } 3529 } 3530 3531 rmem->mz = mz; 3532 if (rmem->vmem_size) 3533 rmem->vmem = (void **)mz->addr; 3534 rmem->dma_arr[0] = mz_phys_addr; 3535 return 0; 3536 } 3537 3538 static void bnxt_free_ctx_mem(struct bnxt *bp) 3539 { 3540 int i; 3541 3542 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 3543 return; 3544 3545 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 3546 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 3547 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 3548 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 3549 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 3550 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 3551 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 3552 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 3553 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 3554 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 3555 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 3556 3557 for (i = 0; i < BNXT_MAX_Q; i++) { 3558 if (bp->ctx->tqm_mem[i]) 3559 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 3560 } 3561 3562 rte_free(bp->ctx); 3563 bp->ctx = NULL; 3564 } 3565 3566 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 3567 3568 #define min_t(type, x, y) ({ \ 3569 type __min1 = (x); \ 3570 type __min2 = (y); \ 3571 __min1 < __min2 ? __min1 : __min2; }) 3572 3573 #define max_t(type, x, y) ({ \ 3574 type __max1 = (x); \ 3575 type __max2 = (y); \ 3576 __max1 > __max2 ? __max1 : __max2; }) 3577 3578 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 3579 3580 int bnxt_alloc_ctx_mem(struct bnxt *bp) 3581 { 3582 struct bnxt_ctx_pg_info *ctx_pg; 3583 struct bnxt_ctx_mem_info *ctx; 3584 uint32_t mem_size, ena, entries; 3585 int i, rc; 3586 3587 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 3588 if (rc) { 3589 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 3590 return rc; 3591 } 3592 ctx = bp->ctx; 3593 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 3594 return 0; 3595 3596 ctx_pg = &ctx->qp_mem; 3597 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 3598 mem_size = ctx->qp_entry_size * ctx_pg->entries; 3599 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 3600 if (rc) 3601 return rc; 3602 3603 ctx_pg = &ctx->srq_mem; 3604 ctx_pg->entries = ctx->srq_max_l2_entries; 3605 mem_size = ctx->srq_entry_size * ctx_pg->entries; 3606 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 3607 if (rc) 3608 return rc; 3609 3610 ctx_pg = &ctx->cq_mem; 3611 ctx_pg->entries = ctx->cq_max_l2_entries; 3612 mem_size = ctx->cq_entry_size * ctx_pg->entries; 3613 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 3614 if (rc) 3615 return rc; 3616 3617 ctx_pg = &ctx->vnic_mem; 3618 ctx_pg->entries = ctx->vnic_max_vnic_entries + 3619 ctx->vnic_max_ring_table_entries; 3620 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 3621 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 3622 if (rc) 3623 return rc; 3624 3625 ctx_pg = &ctx->stat_mem; 3626 ctx_pg->entries = ctx->stat_max_entries; 3627 mem_size = ctx->stat_entry_size * ctx_pg->entries; 3628 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 3629 if (rc) 3630 return rc; 3631 3632 entries = ctx->qp_max_l2_entries; 3633 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 3634 entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring, 3635 ctx->tqm_max_entries_per_ring); 3636 for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) { 3637 ctx_pg = ctx->tqm_mem[i]; 3638 /* use min tqm entries for now. */ 3639 ctx_pg->entries = entries; 3640 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 3641 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); 3642 if (rc) 3643 return rc; 3644 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 3645 } 3646 3647 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 3648 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 3649 if (rc) 3650 PMD_DRV_LOG(ERR, 3651 "Failed to configure context mem: rc = %d\n", rc); 3652 else 3653 ctx->flags |= BNXT_CTX_FLAG_INITED; 3654 3655 return 0; 3656 } 3657 3658 static int bnxt_alloc_stats_mem(struct bnxt *bp) 3659 { 3660 struct rte_pci_device *pci_dev = bp->pdev; 3661 char mz_name[RTE_MEMZONE_NAMESIZE]; 3662 const struct rte_memzone *mz = NULL; 3663 uint32_t total_alloc_len; 3664 rte_iova_t mz_phys_addr; 3665 3666 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 3667 return 0; 3668 3669 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3670 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 3671 pci_dev->addr.bus, pci_dev->addr.devid, 3672 pci_dev->addr.function, "rx_port_stats"); 3673 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3674 mz = rte_memzone_lookup(mz_name); 3675 total_alloc_len = 3676 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 3677 sizeof(struct rx_port_stats_ext) + 512); 3678 if (!mz) { 3679 mz = rte_memzone_reserve(mz_name, total_alloc_len, 3680 SOCKET_ID_ANY, 3681 RTE_MEMZONE_2MB | 3682 RTE_MEMZONE_SIZE_HINT_ONLY | 3683 RTE_MEMZONE_IOVA_CONTIG); 3684 if (mz == NULL) 3685 return -ENOMEM; 3686 } 3687 memset(mz->addr, 0, mz->len); 3688 mz_phys_addr = mz->iova; 3689 if ((unsigned long)mz->addr == mz_phys_addr) { 3690 PMD_DRV_LOG(WARNING, 3691 "Memzone physical address same as virtual.\n"); 3692 PMD_DRV_LOG(WARNING, 3693 "Using rte_mem_virt2iova()\n"); 3694 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3695 if (mz_phys_addr == 0) { 3696 PMD_DRV_LOG(ERR, 3697 "Can't map address to physical memory\n"); 3698 return -ENOMEM; 3699 } 3700 } 3701 3702 bp->rx_mem_zone = (const void *)mz; 3703 bp->hw_rx_port_stats = mz->addr; 3704 bp->hw_rx_port_stats_map = mz_phys_addr; 3705 3706 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3707 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 3708 pci_dev->addr.bus, pci_dev->addr.devid, 3709 pci_dev->addr.function, "tx_port_stats"); 3710 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3711 mz = rte_memzone_lookup(mz_name); 3712 total_alloc_len = 3713 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 3714 sizeof(struct tx_port_stats_ext) + 512); 3715 if (!mz) { 3716 mz = rte_memzone_reserve(mz_name, 3717 total_alloc_len, 3718 SOCKET_ID_ANY, 3719 RTE_MEMZONE_2MB | 3720 RTE_MEMZONE_SIZE_HINT_ONLY | 3721 RTE_MEMZONE_IOVA_CONTIG); 3722 if (mz == NULL) 3723 return -ENOMEM; 3724 } 3725 memset(mz->addr, 0, mz->len); 3726 mz_phys_addr = mz->iova; 3727 if ((unsigned long)mz->addr == mz_phys_addr) { 3728 PMD_DRV_LOG(WARNING, 3729 "Memzone physical address same as virtual\n"); 3730 PMD_DRV_LOG(WARNING, 3731 "Using rte_mem_virt2iova()\n"); 3732 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3733 if (mz_phys_addr == 0) { 3734 PMD_DRV_LOG(ERR, 3735 "Can't map address to physical memory\n"); 3736 return -ENOMEM; 3737 } 3738 } 3739 3740 bp->tx_mem_zone = (const void *)mz; 3741 bp->hw_tx_port_stats = mz->addr; 3742 bp->hw_tx_port_stats_map = mz_phys_addr; 3743 bp->flags |= BNXT_FLAG_PORT_STATS; 3744 3745 /* Display extended statistics if FW supports it */ 3746 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 3747 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 3748 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 3749 return 0; 3750 3751 bp->hw_rx_port_stats_ext = (void *) 3752 ((uint8_t *)bp->hw_rx_port_stats + 3753 sizeof(struct rx_port_stats)); 3754 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 3755 sizeof(struct rx_port_stats); 3756 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 3757 3758 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 3759 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 3760 bp->hw_tx_port_stats_ext = (void *) 3761 ((uint8_t *)bp->hw_tx_port_stats + 3762 sizeof(struct tx_port_stats)); 3763 bp->hw_tx_port_stats_ext_map = 3764 bp->hw_tx_port_stats_map + 3765 sizeof(struct tx_port_stats); 3766 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 3767 } 3768 3769 return 0; 3770 } 3771 3772 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 3773 { 3774 struct bnxt *bp = eth_dev->data->dev_private; 3775 int rc = 0; 3776 3777 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 3778 RTE_ETHER_ADDR_LEN * 3779 bp->max_l2_ctx, 3780 0); 3781 if (eth_dev->data->mac_addrs == NULL) { 3782 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 3783 return -ENOMEM; 3784 } 3785 3786 if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) { 3787 if (BNXT_PF(bp)) 3788 return -EINVAL; 3789 3790 /* Generate a random MAC address, if none was assigned by PF */ 3791 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 3792 bnxt_eth_hw_addr_random(bp->mac_addr); 3793 PMD_DRV_LOG(INFO, 3794 "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", 3795 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 3796 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 3797 3798 rc = bnxt_hwrm_set_mac(bp); 3799 if (!rc) 3800 memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr, 3801 RTE_ETHER_ADDR_LEN); 3802 return rc; 3803 } 3804 3805 /* Copy the permanent MAC from the FUNC_QCAPS response */ 3806 memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN); 3807 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 3808 3809 return rc; 3810 } 3811 3812 #define ALLOW_FUNC(x) \ 3813 { \ 3814 uint32_t arg = (x); \ 3815 bp->pf.vf_req_fwd[((arg) >> 5)] &= \ 3816 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 3817 } 3818 static int 3819 bnxt_dev_init(struct rte_eth_dev *eth_dev) 3820 { 3821 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3822 static int version_printed; 3823 struct bnxt *bp; 3824 uint16_t mtu; 3825 int rc; 3826 3827 if (version_printed++ == 0) 3828 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 3829 3830 rte_eth_copy_pci_info(eth_dev, pci_dev); 3831 3832 bp = eth_dev->data->dev_private; 3833 3834 bp->dev_stopped = 1; 3835 3836 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3837 goto skip_init; 3838 3839 if (bnxt_vf_pciid(pci_dev->id.device_id)) 3840 bp->flags |= BNXT_FLAG_VF; 3841 3842 if (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 || 3843 pci_dev->id.device_id == BROADCOM_DEV_ID_57504 || 3844 pci_dev->id.device_id == BROADCOM_DEV_ID_57502 || 3845 pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF) 3846 bp->flags |= BNXT_FLAG_THOR_CHIP; 3847 3848 rc = bnxt_init_board(eth_dev); 3849 if (rc) { 3850 PMD_DRV_LOG(ERR, 3851 "Board initialization failed rc: %x\n", rc); 3852 goto error; 3853 } 3854 skip_init: 3855 eth_dev->dev_ops = &bnxt_dev_ops; 3856 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 3857 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 3858 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3859 return 0; 3860 3861 rc = bnxt_alloc_hwrm_resources(bp); 3862 if (rc) { 3863 PMD_DRV_LOG(ERR, 3864 "hwrm resource allocation failure rc: %x\n", rc); 3865 goto error_free; 3866 } 3867 rc = bnxt_hwrm_ver_get(bp); 3868 if (rc) 3869 goto error_free; 3870 3871 rc = bnxt_hwrm_func_reset(bp); 3872 if (rc) { 3873 PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc); 3874 rc = -EIO; 3875 goto error_free; 3876 } 3877 3878 rc = bnxt_hwrm_queue_qportcfg(bp); 3879 if (rc) { 3880 PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n"); 3881 goto error_free; 3882 } 3883 /* Get the MAX capabilities for this function */ 3884 rc = bnxt_hwrm_func_qcaps(bp); 3885 if (rc) { 3886 PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc); 3887 goto error_free; 3888 } 3889 3890 rc = bnxt_alloc_stats_mem(bp); 3891 if (rc) 3892 goto error_free; 3893 3894 if (bp->max_tx_rings == 0) { 3895 PMD_DRV_LOG(ERR, "No TX rings available!\n"); 3896 rc = -EBUSY; 3897 goto error_free; 3898 } 3899 3900 rc = bnxt_setup_mac_addr(eth_dev); 3901 if (rc) 3902 goto error_free; 3903 3904 /* THOR does not support ring groups. 3905 * But we will use the array to save RSS context IDs. 3906 */ 3907 if (BNXT_CHIP_THOR(bp)) { 3908 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 3909 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) { 3910 /* 1 ring is for default completion ring */ 3911 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n"); 3912 rc = -ENOSPC; 3913 goto error_free; 3914 } 3915 3916 if (BNXT_HAS_RING_GRPS(bp)) { 3917 bp->grp_info = rte_zmalloc("bnxt_grp_info", 3918 sizeof(*bp->grp_info) * 3919 bp->max_ring_grps, 0); 3920 if (!bp->grp_info) { 3921 PMD_DRV_LOG(ERR, 3922 "Failed to alloc %zu bytes for grp info tbl.\n", 3923 sizeof(*bp->grp_info) * bp->max_ring_grps); 3924 rc = -ENOMEM; 3925 goto error_free; 3926 } 3927 } 3928 3929 /* Forward all requests if firmware is new enough */ 3930 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 3931 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 3932 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 3933 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); 3934 } else { 3935 PMD_DRV_LOG(WARNING, 3936 "Firmware too old for VF mailbox functionality\n"); 3937 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); 3938 } 3939 3940 /* 3941 * The following are used for driver cleanup. If we disallow these, 3942 * VF drivers can't clean up cleanly. 3943 */ 3944 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 3945 ALLOW_FUNC(HWRM_VNIC_FREE); 3946 ALLOW_FUNC(HWRM_RING_FREE); 3947 ALLOW_FUNC(HWRM_RING_GRP_FREE); 3948 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 3949 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 3950 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 3951 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 3952 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 3953 rc = bnxt_hwrm_func_driver_register(bp); 3954 if (rc) { 3955 PMD_DRV_LOG(ERR, 3956 "Failed to register driver"); 3957 rc = -EBUSY; 3958 goto error_free; 3959 } 3960 3961 PMD_DRV_LOG(INFO, 3962 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", 3963 pci_dev->mem_resource[0].phys_addr, 3964 pci_dev->mem_resource[0].addr); 3965 3966 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 3967 if (rc) { 3968 PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n"); 3969 goto error_free; 3970 } 3971 3972 if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU && 3973 mtu != eth_dev->data->mtu) 3974 eth_dev->data->mtu = mtu; 3975 3976 if (BNXT_PF(bp)) { 3977 //if (bp->pf.active_vfs) { 3978 // TODO: Deallocate VF resources? 3979 //} 3980 if (bp->pdev->max_vfs) { 3981 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 3982 if (rc) { 3983 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 3984 goto error_free; 3985 } 3986 } else { 3987 rc = bnxt_hwrm_allocate_pf_only(bp); 3988 if (rc) { 3989 PMD_DRV_LOG(ERR, 3990 "Failed to allocate PF resources\n"); 3991 goto error_free; 3992 } 3993 } 3994 } 3995 3996 bnxt_hwrm_port_led_qcaps(bp); 3997 3998 rc = bnxt_setup_int(bp); 3999 if (rc) 4000 goto error_free; 4001 4002 rc = bnxt_alloc_mem(bp); 4003 if (rc) 4004 goto error_free_int; 4005 4006 rc = bnxt_request_int(bp); 4007 if (rc) 4008 goto error_free_int; 4009 4010 bnxt_init_nic(bp); 4011 4012 return 0; 4013 4014 error_free_int: 4015 bnxt_disable_int(bp); 4016 bnxt_hwrm_func_buf_unrgtr(bp); 4017 bnxt_free_int(bp); 4018 bnxt_free_mem(bp); 4019 error_free: 4020 bnxt_dev_uninit(eth_dev); 4021 error: 4022 return rc; 4023 } 4024 4025 static int 4026 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 4027 { 4028 struct bnxt *bp = eth_dev->data->dev_private; 4029 int rc; 4030 4031 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 4032 return -EPERM; 4033 4034 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 4035 bnxt_disable_int(bp); 4036 bnxt_free_int(bp); 4037 bnxt_free_mem(bp); 4038 if (bp->grp_info != NULL) { 4039 rte_free(bp->grp_info); 4040 bp->grp_info = NULL; 4041 } 4042 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 4043 bnxt_free_hwrm_resources(bp); 4044 4045 if (bp->tx_mem_zone) { 4046 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 4047 bp->tx_mem_zone = NULL; 4048 } 4049 4050 if (bp->rx_mem_zone) { 4051 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 4052 bp->rx_mem_zone = NULL; 4053 } 4054 4055 if (bp->dev_stopped == 0) 4056 bnxt_dev_close_op(eth_dev); 4057 if (bp->pf.vf_info) 4058 rte_free(bp->pf.vf_info); 4059 bnxt_free_ctx_mem(bp); 4060 eth_dev->dev_ops = NULL; 4061 eth_dev->rx_pkt_burst = NULL; 4062 eth_dev->tx_pkt_burst = NULL; 4063 4064 return rc; 4065 } 4066 4067 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 4068 struct rte_pci_device *pci_dev) 4069 { 4070 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), 4071 bnxt_dev_init); 4072 } 4073 4074 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 4075 { 4076 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4077 return rte_eth_dev_pci_generic_remove(pci_dev, 4078 bnxt_dev_uninit); 4079 else 4080 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 4081 } 4082 4083 static struct rte_pci_driver bnxt_rte_pmd = { 4084 .id_table = bnxt_pci_id_map, 4085 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 4086 .probe = bnxt_pci_probe, 4087 .remove = bnxt_pci_remove, 4088 }; 4089 4090 static bool 4091 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 4092 { 4093 if (strcmp(dev->device->driver->name, drv->driver.name)) 4094 return false; 4095 4096 return true; 4097 } 4098 4099 bool is_bnxt_supported(struct rte_eth_dev *dev) 4100 { 4101 return is_device_supported(dev, &bnxt_rte_pmd); 4102 } 4103 4104 RTE_INIT(bnxt_init_log) 4105 { 4106 bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver"); 4107 if (bnxt_logtype_driver >= 0) 4108 rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); 4109 } 4110 4111 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 4112 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 4113 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 4114