1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Broadcom Limited. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Broadcom Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <inttypes.h> 35 #include <stdbool.h> 36 37 #include <rte_dev.h> 38 #include <rte_ethdev.h> 39 #include <rte_ethdev_pci.h> 40 #include <rte_malloc.h> 41 #include <rte_cycles.h> 42 43 #include "bnxt.h" 44 #include "bnxt_cpr.h" 45 #include "bnxt_filter.h" 46 #include "bnxt_hwrm.h" 47 #include "bnxt_irq.h" 48 #include "bnxt_ring.h" 49 #include "bnxt_rxq.h" 50 #include "bnxt_rxr.h" 51 #include "bnxt_stats.h" 52 #include "bnxt_txq.h" 53 #include "bnxt_txr.h" 54 #include "bnxt_vnic.h" 55 #include "hsi_struct_def_dpdk.h" 56 #include "bnxt_nvm_defs.h" 57 58 #define DRV_MODULE_NAME "bnxt" 59 static const char bnxt_version[] = 60 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; 61 62 #define PCI_VENDOR_ID_BROADCOM 0x14E4 63 64 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 65 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 66 #define BROADCOM_DEV_ID_57414_VF 0x16c1 67 #define BROADCOM_DEV_ID_57301 0x16c8 68 #define BROADCOM_DEV_ID_57302 0x16c9 69 #define BROADCOM_DEV_ID_57304_PF 0x16ca 70 #define BROADCOM_DEV_ID_57304_VF 0x16cb 71 #define BROADCOM_DEV_ID_57417_MF 0x16cc 72 #define BROADCOM_DEV_ID_NS2 0x16cd 73 #define BROADCOM_DEV_ID_57311 0x16ce 74 #define BROADCOM_DEV_ID_57312 0x16cf 75 #define BROADCOM_DEV_ID_57402 0x16d0 76 #define BROADCOM_DEV_ID_57404 0x16d1 77 #define BROADCOM_DEV_ID_57406_PF 0x16d2 78 #define BROADCOM_DEV_ID_57406_VF 0x16d3 79 #define BROADCOM_DEV_ID_57402_MF 0x16d4 80 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 81 #define BROADCOM_DEV_ID_57412 0x16d6 82 #define BROADCOM_DEV_ID_57414 0x16d7 83 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8 84 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9 85 #define BROADCOM_DEV_ID_5741X_VF 0x16dc 86 #define BROADCOM_DEV_ID_57412_MF 0x16de 87 #define BROADCOM_DEV_ID_57314 0x16df 88 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0 89 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 90 #define BROADCOM_DEV_ID_57417_SFP 0x16e2 91 #define BROADCOM_DEV_ID_57416_SFP 0x16e3 92 #define BROADCOM_DEV_ID_57317_SFP 0x16e4 93 #define BROADCOM_DEV_ID_57404_MF 0x16e7 94 #define BROADCOM_DEV_ID_57406_MF 0x16e8 95 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 96 #define BROADCOM_DEV_ID_57407_MF 0x16ea 97 #define BROADCOM_DEV_ID_57414_MF 0x16ec 98 #define BROADCOM_DEV_ID_57416_MF 0x16ee 99 100 static const struct rte_pci_id bnxt_pci_id_map[] = { 101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 102 BROADCOM_DEV_ID_STRATUS_NIC_VF) }, 103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 137 { .vendor_id = 0, /* sentinel */ }, 138 }; 139 140 #define BNXT_ETH_RSS_SUPPORT ( \ 141 ETH_RSS_IPV4 | \ 142 ETH_RSS_NONFRAG_IPV4_TCP | \ 143 ETH_RSS_NONFRAG_IPV4_UDP | \ 144 ETH_RSS_IPV6 | \ 145 ETH_RSS_NONFRAG_IPV6_TCP | \ 146 ETH_RSS_NONFRAG_IPV6_UDP) 147 148 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 149 150 /***********************/ 151 152 /* 153 * High level utility functions 154 */ 155 156 static void bnxt_free_mem(struct bnxt *bp) 157 { 158 bnxt_free_filter_mem(bp); 159 bnxt_free_vnic_attributes(bp); 160 bnxt_free_vnic_mem(bp); 161 162 bnxt_free_stats(bp); 163 bnxt_free_tx_rings(bp); 164 bnxt_free_rx_rings(bp); 165 bnxt_free_def_cp_ring(bp); 166 } 167 168 static int bnxt_alloc_mem(struct bnxt *bp) 169 { 170 int rc; 171 172 /* Default completion ring */ 173 rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); 174 if (rc) 175 goto alloc_mem_err; 176 177 rc = bnxt_alloc_rings(bp, 0, NULL, NULL, 178 bp->def_cp_ring, "def_cp"); 179 if (rc) 180 goto alloc_mem_err; 181 182 rc = bnxt_alloc_vnic_mem(bp); 183 if (rc) 184 goto alloc_mem_err; 185 186 rc = bnxt_alloc_vnic_attributes(bp); 187 if (rc) 188 goto alloc_mem_err; 189 190 rc = bnxt_alloc_filter_mem(bp); 191 if (rc) 192 goto alloc_mem_err; 193 194 return 0; 195 196 alloc_mem_err: 197 bnxt_free_mem(bp); 198 return rc; 199 } 200 201 static int bnxt_init_chip(struct bnxt *bp) 202 { 203 unsigned int i, rss_idx, fw_idx; 204 struct rte_eth_link new; 205 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 206 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 207 uint32_t intr_vector = 0; 208 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 209 uint32_t vec = BNXT_MISC_VEC_ID; 210 int rc; 211 212 /* disable uio/vfio intr/eventfd mapping */ 213 rte_intr_disable(intr_handle); 214 215 if (bp->eth_dev->data->mtu > ETHER_MTU) { 216 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 217 bp->flags |= BNXT_FLAG_JUMBO; 218 } else { 219 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 220 bp->flags &= ~BNXT_FLAG_JUMBO; 221 } 222 223 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 224 if (rc) { 225 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); 226 goto err_out; 227 } 228 229 rc = bnxt_alloc_hwrm_rings(bp); 230 if (rc) { 231 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc); 232 goto err_out; 233 } 234 235 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 236 if (rc) { 237 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc); 238 goto err_out; 239 } 240 241 rc = bnxt_mq_rx_configure(bp); 242 if (rc) { 243 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc); 244 goto err_out; 245 } 246 247 /* VNIC configuration */ 248 for (i = 0; i < bp->nr_vnics; i++) { 249 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 250 251 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 252 if (rc) { 253 RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n", 254 i, rc); 255 goto err_out; 256 } 257 258 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); 259 if (rc) { 260 RTE_LOG(ERR, PMD, 261 "HWRM vnic %d ctx alloc failure rc: %x\n", 262 i, rc); 263 goto err_out; 264 } 265 266 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 267 if (rc) { 268 RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n", 269 i, rc); 270 goto err_out; 271 } 272 273 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 274 if (rc) { 275 RTE_LOG(ERR, PMD, 276 "HWRM vnic %d filter failure rc: %x\n", 277 i, rc); 278 goto err_out; 279 } 280 if (vnic->rss_table && vnic->hash_type) { 281 /* 282 * Fill the RSS hash & redirection table with 283 * ring group ids for all VNICs 284 */ 285 for (rss_idx = 0, fw_idx = 0; 286 rss_idx < HW_HASH_INDEX_SIZE; 287 rss_idx++, fw_idx++) { 288 if (vnic->fw_grp_ids[fw_idx] == 289 INVALID_HW_RING_ID) 290 fw_idx = 0; 291 vnic->rss_table[rss_idx] = 292 vnic->fw_grp_ids[fw_idx]; 293 } 294 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 295 if (rc) { 296 RTE_LOG(ERR, PMD, 297 "HWRM vnic %d set RSS failure rc: %x\n", 298 i, rc); 299 goto err_out; 300 } 301 } 302 303 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 304 305 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) 306 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 307 else 308 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 309 } 310 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 311 if (rc) { 312 RTE_LOG(ERR, PMD, 313 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 314 goto err_out; 315 } 316 317 /* check and configure queue intr-vector mapping */ 318 if ((rte_intr_cap_multiple(intr_handle) || 319 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 320 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 321 intr_vector = bp->eth_dev->data->nb_rx_queues; 322 RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__, 323 intr_vector); 324 if (intr_vector > bp->rx_cp_nr_rings) { 325 RTE_LOG(ERR, PMD, "At most %d intr queues supported", 326 bp->rx_cp_nr_rings); 327 return -ENOTSUP; 328 } 329 if (rte_intr_efd_enable(intr_handle, intr_vector)) 330 return -1; 331 } 332 333 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 334 intr_handle->intr_vec = 335 rte_zmalloc("intr_vec", 336 bp->eth_dev->data->nb_rx_queues * 337 sizeof(int), 0); 338 if (intr_handle->intr_vec == NULL) { 339 RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues" 340 " intr_vec", bp->eth_dev->data->nb_rx_queues); 341 return -ENOMEM; 342 } 343 RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p " 344 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 345 __func__, intr_handle->intr_vec, intr_handle->nb_efd, 346 intr_handle->max_intr); 347 } 348 349 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 350 queue_id++) { 351 intr_handle->intr_vec[queue_id] = vec; 352 if (vec < base + intr_handle->nb_efd - 1) 353 vec++; 354 } 355 356 /* enable uio/vfio intr/eventfd mapping */ 357 rte_intr_enable(intr_handle); 358 359 rc = bnxt_get_hwrm_link_config(bp, &new); 360 if (rc) { 361 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); 362 goto err_out; 363 } 364 365 if (!bp->link_info.link_up) { 366 rc = bnxt_set_hwrm_link_config(bp, true); 367 if (rc) { 368 RTE_LOG(ERR, PMD, 369 "HWRM link config failure rc: %x\n", rc); 370 goto err_out; 371 } 372 } 373 374 return 0; 375 376 err_out: 377 bnxt_free_all_hwrm_resources(bp); 378 379 return rc; 380 } 381 382 static int bnxt_shutdown_nic(struct bnxt *bp) 383 { 384 bnxt_free_all_hwrm_resources(bp); 385 bnxt_free_all_filters(bp); 386 bnxt_free_all_vnics(bp); 387 return 0; 388 } 389 390 static int bnxt_init_nic(struct bnxt *bp) 391 { 392 int rc; 393 394 bnxt_init_ring_grps(bp); 395 bnxt_init_vnics(bp); 396 bnxt_init_filters(bp); 397 398 rc = bnxt_init_chip(bp); 399 if (rc) 400 return rc; 401 402 return 0; 403 } 404 405 /* 406 * Device configuration and status function 407 */ 408 409 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 410 struct rte_eth_dev_info *dev_info) 411 { 412 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 413 uint16_t max_vnics, i, j, vpool, vrxq; 414 unsigned int max_rx_rings; 415 416 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 417 418 /* MAC Specifics */ 419 dev_info->max_mac_addrs = bp->max_l2_ctx; 420 dev_info->max_hash_mac_addrs = 0; 421 422 /* PF/VF specifics */ 423 if (BNXT_PF(bp)) 424 dev_info->max_vfs = bp->pdev->max_vfs; 425 max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, 426 RTE_MIN(bp->max_rsscos_ctx, 427 bp->max_stat_ctx))); 428 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 429 dev_info->max_rx_queues = max_rx_rings; 430 dev_info->max_tx_queues = max_rx_rings; 431 dev_info->reta_size = bp->max_rsscos_ctx; 432 dev_info->hash_key_size = 40; 433 max_vnics = bp->max_vnics; 434 435 /* Fast path specifics */ 436 dev_info->min_rx_bufsize = 1; 437 dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN 438 + VLAN_TAG_SIZE; 439 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 440 DEV_RX_OFFLOAD_IPV4_CKSUM | 441 DEV_RX_OFFLOAD_UDP_CKSUM | 442 DEV_RX_OFFLOAD_TCP_CKSUM | 443 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 444 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 445 DEV_TX_OFFLOAD_IPV4_CKSUM | 446 DEV_TX_OFFLOAD_TCP_CKSUM | 447 DEV_TX_OFFLOAD_UDP_CKSUM | 448 DEV_TX_OFFLOAD_TCP_TSO | 449 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 450 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 451 DEV_TX_OFFLOAD_GRE_TNL_TSO | 452 DEV_TX_OFFLOAD_IPIP_TNL_TSO | 453 DEV_TX_OFFLOAD_GENEVE_TNL_TSO; 454 455 /* *INDENT-OFF* */ 456 dev_info->default_rxconf = (struct rte_eth_rxconf) { 457 .rx_thresh = { 458 .pthresh = 8, 459 .hthresh = 8, 460 .wthresh = 0, 461 }, 462 .rx_free_thresh = 32, 463 .rx_drop_en = 0, 464 }; 465 466 dev_info->default_txconf = (struct rte_eth_txconf) { 467 .tx_thresh = { 468 .pthresh = 32, 469 .hthresh = 0, 470 .wthresh = 0, 471 }, 472 .tx_free_thresh = 32, 473 .tx_rs_thresh = 32, 474 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 475 ETH_TXQ_FLAGS_NOOFFLOADS, 476 }; 477 eth_dev->data->dev_conf.intr_conf.lsc = 1; 478 479 eth_dev->data->dev_conf.intr_conf.rxq = 1; 480 481 /* *INDENT-ON* */ 482 483 /* 484 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 485 * need further investigation. 486 */ 487 488 /* VMDq resources */ 489 vpool = 64; /* ETH_64_POOLS */ 490 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 491 for (i = 0; i < 4; vpool >>= 1, i++) { 492 if (max_vnics > vpool) { 493 for (j = 0; j < 5; vrxq >>= 1, j++) { 494 if (dev_info->max_rx_queues > vrxq) { 495 if (vpool > vrxq) 496 vpool = vrxq; 497 goto found; 498 } 499 } 500 /* Not enough resources to support VMDq */ 501 break; 502 } 503 } 504 /* Not enough resources to support VMDq */ 505 vpool = 0; 506 vrxq = 0; 507 found: 508 dev_info->max_vmdq_pools = vpool; 509 dev_info->vmdq_queue_num = vrxq; 510 511 dev_info->vmdq_pool_base = 0; 512 dev_info->vmdq_queue_base = 0; 513 } 514 515 /* Configure the device based on the configuration provided */ 516 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 517 { 518 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 519 520 bp->rx_queues = (void *)eth_dev->data->rx_queues; 521 bp->tx_queues = (void *)eth_dev->data->tx_queues; 522 523 /* Inherit new configurations */ 524 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 525 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 526 bp->rx_cp_nr_rings = bp->rx_nr_rings; 527 bp->tx_cp_nr_rings = bp->tx_nr_rings; 528 529 if (eth_dev->data->dev_conf.rxmode.jumbo_frame) 530 eth_dev->data->mtu = 531 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 532 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; 533 return 0; 534 } 535 536 static inline int 537 rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev, 538 struct rte_eth_link *link) 539 { 540 struct rte_eth_link *dst = ð_dev->data->dev_link; 541 struct rte_eth_link *src = link; 542 543 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 544 *(uint64_t *)src) == 0) 545 return 1; 546 547 return 0; 548 } 549 550 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 551 { 552 struct rte_eth_link *link = ð_dev->data->dev_link; 553 554 if (link->link_status) 555 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n", 556 eth_dev->data->port_id, 557 (uint32_t)link->link_speed, 558 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 559 ("full-duplex") : ("half-duplex\n")); 560 else 561 RTE_LOG(INFO, PMD, "Port %d Link Down\n", 562 eth_dev->data->port_id); 563 } 564 565 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) 566 { 567 bnxt_print_link_info(eth_dev); 568 return 0; 569 } 570 571 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 572 { 573 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 574 int vlan_mask = 0; 575 int rc; 576 577 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 578 RTE_LOG(ERR, PMD, 579 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 580 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 581 } 582 bp->dev_stopped = 0; 583 584 rc = bnxt_init_nic(bp); 585 if (rc) 586 goto error; 587 588 bnxt_link_update_op(eth_dev, 0); 589 590 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) 591 vlan_mask |= ETH_VLAN_FILTER_MASK; 592 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) 593 vlan_mask |= ETH_VLAN_STRIP_MASK; 594 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 595 if (rc) 596 goto error; 597 598 return 0; 599 600 error: 601 bnxt_shutdown_nic(bp); 602 bnxt_free_tx_mbufs(bp); 603 bnxt_free_rx_mbufs(bp); 604 return rc; 605 } 606 607 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 608 { 609 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 610 611 eth_dev->data->dev_link.link_status = 1; 612 bnxt_set_hwrm_link_config(bp, true); 613 return 0; 614 } 615 616 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 617 { 618 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 619 620 eth_dev->data->dev_link.link_status = 0; 621 bnxt_set_hwrm_link_config(bp, false); 622 return 0; 623 } 624 625 /* Unload the driver, release resources */ 626 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 627 { 628 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 629 630 if (bp->eth_dev->data->dev_started) { 631 /* TBD: STOP HW queues DMA */ 632 eth_dev->data->dev_link.link_status = 0; 633 } 634 bnxt_set_hwrm_link_config(bp, false); 635 bnxt_hwrm_port_clr_stats(bp); 636 bnxt_shutdown_nic(bp); 637 bp->dev_stopped = 1; 638 } 639 640 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 641 { 642 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 643 644 if (bp->dev_stopped == 0) 645 bnxt_dev_stop_op(eth_dev); 646 647 bnxt_free_tx_mbufs(bp); 648 bnxt_free_rx_mbufs(bp); 649 bnxt_free_mem(bp); 650 if (eth_dev->data->mac_addrs != NULL) { 651 rte_free(eth_dev->data->mac_addrs); 652 eth_dev->data->mac_addrs = NULL; 653 } 654 if (bp->grp_info != NULL) { 655 rte_free(bp->grp_info); 656 bp->grp_info = NULL; 657 } 658 } 659 660 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 661 uint32_t index) 662 { 663 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 664 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 665 struct bnxt_vnic_info *vnic; 666 struct bnxt_filter_info *filter, *temp_filter; 667 uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS); 668 uint32_t i; 669 670 /* 671 * Loop through all VNICs from the specified filter flow pools to 672 * remove the corresponding MAC addr filter 673 */ 674 for (i = 0; i < pool; i++) { 675 if (!(pool_mask & (1ULL << i))) 676 continue; 677 678 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 679 filter = STAILQ_FIRST(&vnic->filter); 680 while (filter) { 681 temp_filter = STAILQ_NEXT(filter, next); 682 if (filter->mac_index == index) { 683 STAILQ_REMOVE(&vnic->filter, filter, 684 bnxt_filter_info, next); 685 bnxt_hwrm_clear_l2_filter(bp, filter); 686 filter->mac_index = INVALID_MAC_INDEX; 687 memset(&filter->l2_addr, 0, 688 ETHER_ADDR_LEN); 689 STAILQ_INSERT_TAIL( 690 &bp->free_filter_list, 691 filter, next); 692 } 693 filter = temp_filter; 694 } 695 } 696 } 697 } 698 699 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 700 struct ether_addr *mac_addr, 701 uint32_t index, uint32_t pool) 702 { 703 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 704 struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]); 705 struct bnxt_filter_info *filter; 706 707 if (BNXT_VF(bp)) { 708 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n"); 709 return -ENOTSUP; 710 } 711 712 if (!vnic) { 713 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool); 714 return -EINVAL; 715 } 716 /* Attach requested MAC address to the new l2_filter */ 717 STAILQ_FOREACH(filter, &vnic->filter, next) { 718 if (filter->mac_index == index) { 719 RTE_LOG(ERR, PMD, 720 "MAC addr already existed for pool %d\n", pool); 721 return -EINVAL; 722 } 723 } 724 filter = bnxt_alloc_filter(bp); 725 if (!filter) { 726 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); 727 return -ENODEV; 728 } 729 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 730 filter->mac_index = index; 731 memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); 732 return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 733 } 734 735 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 736 { 737 int rc = 0; 738 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 739 struct rte_eth_link new; 740 unsigned int cnt = BNXT_LINK_WAIT_CNT; 741 742 memset(&new, 0, sizeof(new)); 743 do { 744 /* Retrieve link info from hardware */ 745 rc = bnxt_get_hwrm_link_config(bp, &new); 746 if (rc) { 747 new.link_speed = ETH_LINK_SPEED_100M; 748 new.link_duplex = ETH_LINK_FULL_DUPLEX; 749 RTE_LOG(ERR, PMD, 750 "Failed to retrieve link rc = 0x%x!\n", rc); 751 goto out; 752 } 753 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 754 755 if (!wait_to_complete) 756 break; 757 } while (!new.link_status && cnt--); 758 759 out: 760 /* Timed out or success */ 761 if (new.link_status != eth_dev->data->dev_link.link_status || 762 new.link_speed != eth_dev->data->dev_link.link_speed) { 763 rte_bnxt_atomic_write_link_status(eth_dev, &new); 764 bnxt_print_link_info(eth_dev); 765 } 766 767 return rc; 768 } 769 770 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 771 { 772 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 773 struct bnxt_vnic_info *vnic; 774 775 if (bp->vnic_info == NULL) 776 return; 777 778 vnic = &bp->vnic_info[0]; 779 780 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 781 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 782 } 783 784 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 785 { 786 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 787 struct bnxt_vnic_info *vnic; 788 789 if (bp->vnic_info == NULL) 790 return; 791 792 vnic = &bp->vnic_info[0]; 793 794 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 795 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 796 } 797 798 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 799 { 800 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 801 struct bnxt_vnic_info *vnic; 802 803 if (bp->vnic_info == NULL) 804 return; 805 806 vnic = &bp->vnic_info[0]; 807 808 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 809 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 810 } 811 812 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 813 { 814 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 815 struct bnxt_vnic_info *vnic; 816 817 if (bp->vnic_info == NULL) 818 return; 819 820 vnic = &bp->vnic_info[0]; 821 822 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 823 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 824 } 825 826 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 827 struct rte_eth_rss_reta_entry64 *reta_conf, 828 uint16_t reta_size) 829 { 830 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 831 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 832 struct bnxt_vnic_info *vnic; 833 int i; 834 835 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 836 return -EINVAL; 837 838 if (reta_size != HW_HASH_INDEX_SIZE) { 839 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 840 "(%d) must equal the size supported by the hardware " 841 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 842 return -EINVAL; 843 } 844 /* Update the RSS VNIC(s) */ 845 for (i = 0; i < MAX_FF_POOLS; i++) { 846 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 847 memcpy(vnic->rss_table, reta_conf, reta_size); 848 849 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 850 } 851 } 852 return 0; 853 } 854 855 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 856 struct rte_eth_rss_reta_entry64 *reta_conf, 857 uint16_t reta_size) 858 { 859 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 860 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 861 struct rte_intr_handle *intr_handle 862 = &bp->pdev->intr_handle; 863 864 /* Retrieve from the default VNIC */ 865 if (!vnic) 866 return -EINVAL; 867 if (!vnic->rss_table) 868 return -EINVAL; 869 870 if (reta_size != HW_HASH_INDEX_SIZE) { 871 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 872 "(%d) must equal the size supported by the hardware " 873 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 874 return -EINVAL; 875 } 876 /* EW - need to revisit here copying from u64 to u16 */ 877 memcpy(reta_conf, vnic->rss_table, reta_size); 878 879 if (rte_intr_allow_others(intr_handle)) { 880 if (eth_dev->data->dev_conf.intr_conf.lsc != 0) 881 bnxt_dev_lsc_intr_setup(eth_dev); 882 } 883 884 return 0; 885 } 886 887 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 888 struct rte_eth_rss_conf *rss_conf) 889 { 890 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 891 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 892 struct bnxt_vnic_info *vnic; 893 uint16_t hash_type = 0; 894 int i; 895 896 /* 897 * If RSS enablement were different than dev_configure, 898 * then return -EINVAL 899 */ 900 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 901 if (!rss_conf->rss_hf) 902 RTE_LOG(ERR, PMD, "Hash type NONE\n"); 903 } else { 904 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 905 return -EINVAL; 906 } 907 908 bp->flags |= BNXT_FLAG_UPDATE_HASH; 909 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); 910 911 if (rss_conf->rss_hf & ETH_RSS_IPV4) 912 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 913 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 914 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 915 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 916 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 917 if (rss_conf->rss_hf & ETH_RSS_IPV6) 918 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 919 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 920 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 921 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 922 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 923 924 /* Update the RSS VNIC(s) */ 925 for (i = 0; i < MAX_FF_POOLS; i++) { 926 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 927 vnic->hash_type = hash_type; 928 929 /* 930 * Use the supplied key if the key length is 931 * acceptable and the rss_key is not NULL 932 */ 933 if (rss_conf->rss_key && 934 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) 935 memcpy(vnic->rss_hash_key, rss_conf->rss_key, 936 rss_conf->rss_key_len); 937 938 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 939 } 940 } 941 return 0; 942 } 943 944 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 945 struct rte_eth_rss_conf *rss_conf) 946 { 947 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 948 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 949 int len; 950 uint32_t hash_types; 951 952 /* RSS configuration is the same for all VNICs */ 953 if (vnic && vnic->rss_hash_key) { 954 if (rss_conf->rss_key) { 955 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 956 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 957 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 958 } 959 960 hash_types = vnic->hash_type; 961 rss_conf->rss_hf = 0; 962 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 963 rss_conf->rss_hf |= ETH_RSS_IPV4; 964 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 965 } 966 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 967 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 968 hash_types &= 969 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 970 } 971 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 972 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 973 hash_types &= 974 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 975 } 976 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 977 rss_conf->rss_hf |= ETH_RSS_IPV6; 978 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 979 } 980 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 981 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 982 hash_types &= 983 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 984 } 985 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 986 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 987 hash_types &= 988 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 989 } 990 if (hash_types) { 991 RTE_LOG(ERR, PMD, 992 "Unknwon RSS config from firmware (%08x), RSS disabled", 993 vnic->hash_type); 994 return -ENOTSUP; 995 } 996 } else { 997 rss_conf->rss_hf = 0; 998 } 999 return 0; 1000 } 1001 1002 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 1003 struct rte_eth_fc_conf *fc_conf) 1004 { 1005 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1006 struct rte_eth_link link_info; 1007 int rc; 1008 1009 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1010 if (rc) 1011 return rc; 1012 1013 memset(fc_conf, 0, sizeof(*fc_conf)); 1014 if (bp->link_info.auto_pause) 1015 fc_conf->autoneg = 1; 1016 switch (bp->link_info.pause) { 1017 case 0: 1018 fc_conf->mode = RTE_FC_NONE; 1019 break; 1020 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 1021 fc_conf->mode = RTE_FC_TX_PAUSE; 1022 break; 1023 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 1024 fc_conf->mode = RTE_FC_RX_PAUSE; 1025 break; 1026 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 1027 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 1028 fc_conf->mode = RTE_FC_FULL; 1029 break; 1030 } 1031 return 0; 1032 } 1033 1034 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 1035 struct rte_eth_fc_conf *fc_conf) 1036 { 1037 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1038 1039 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1040 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n"); 1041 return -ENOTSUP; 1042 } 1043 1044 switch (fc_conf->mode) { 1045 case RTE_FC_NONE: 1046 bp->link_info.auto_pause = 0; 1047 bp->link_info.force_pause = 0; 1048 break; 1049 case RTE_FC_RX_PAUSE: 1050 if (fc_conf->autoneg) { 1051 bp->link_info.auto_pause = 1052 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1053 bp->link_info.force_pause = 0; 1054 } else { 1055 bp->link_info.auto_pause = 0; 1056 bp->link_info.force_pause = 1057 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1058 } 1059 break; 1060 case RTE_FC_TX_PAUSE: 1061 if (fc_conf->autoneg) { 1062 bp->link_info.auto_pause = 1063 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 1064 bp->link_info.force_pause = 0; 1065 } else { 1066 bp->link_info.auto_pause = 0; 1067 bp->link_info.force_pause = 1068 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 1069 } 1070 break; 1071 case RTE_FC_FULL: 1072 if (fc_conf->autoneg) { 1073 bp->link_info.auto_pause = 1074 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 1075 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1076 bp->link_info.force_pause = 0; 1077 } else { 1078 bp->link_info.auto_pause = 0; 1079 bp->link_info.force_pause = 1080 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 1081 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1082 } 1083 break; 1084 } 1085 return bnxt_set_hwrm_link_config(bp, true); 1086 } 1087 1088 /* Add UDP tunneling port */ 1089 static int 1090 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 1091 struct rte_eth_udp_tunnel *udp_tunnel) 1092 { 1093 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1094 uint16_t tunnel_type = 0; 1095 int rc = 0; 1096 1097 switch (udp_tunnel->prot_type) { 1098 case RTE_TUNNEL_TYPE_VXLAN: 1099 if (bp->vxlan_port_cnt) { 1100 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1101 udp_tunnel->udp_port); 1102 if (bp->vxlan_port != udp_tunnel->udp_port) { 1103 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1104 return -ENOSPC; 1105 } 1106 bp->vxlan_port_cnt++; 1107 return 0; 1108 } 1109 tunnel_type = 1110 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 1111 bp->vxlan_port_cnt++; 1112 break; 1113 case RTE_TUNNEL_TYPE_GENEVE: 1114 if (bp->geneve_port_cnt) { 1115 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1116 udp_tunnel->udp_port); 1117 if (bp->geneve_port != udp_tunnel->udp_port) { 1118 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1119 return -ENOSPC; 1120 } 1121 bp->geneve_port_cnt++; 1122 return 0; 1123 } 1124 tunnel_type = 1125 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 1126 bp->geneve_port_cnt++; 1127 break; 1128 default: 1129 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1130 return -ENOTSUP; 1131 } 1132 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 1133 tunnel_type); 1134 return rc; 1135 } 1136 1137 static int 1138 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 1139 struct rte_eth_udp_tunnel *udp_tunnel) 1140 { 1141 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1142 uint16_t tunnel_type = 0; 1143 uint16_t port = 0; 1144 int rc = 0; 1145 1146 switch (udp_tunnel->prot_type) { 1147 case RTE_TUNNEL_TYPE_VXLAN: 1148 if (!bp->vxlan_port_cnt) { 1149 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1150 return -EINVAL; 1151 } 1152 if (bp->vxlan_port != udp_tunnel->udp_port) { 1153 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1154 udp_tunnel->udp_port, bp->vxlan_port); 1155 return -EINVAL; 1156 } 1157 if (--bp->vxlan_port_cnt) 1158 return 0; 1159 1160 tunnel_type = 1161 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 1162 port = bp->vxlan_fw_dst_port_id; 1163 break; 1164 case RTE_TUNNEL_TYPE_GENEVE: 1165 if (!bp->geneve_port_cnt) { 1166 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1167 return -EINVAL; 1168 } 1169 if (bp->geneve_port != udp_tunnel->udp_port) { 1170 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1171 udp_tunnel->udp_port, bp->geneve_port); 1172 return -EINVAL; 1173 } 1174 if (--bp->geneve_port_cnt) 1175 return 0; 1176 1177 tunnel_type = 1178 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 1179 port = bp->geneve_fw_dst_port_id; 1180 break; 1181 default: 1182 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1183 return -ENOTSUP; 1184 } 1185 1186 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 1187 if (!rc) { 1188 if (tunnel_type == 1189 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 1190 bp->vxlan_port = 0; 1191 if (tunnel_type == 1192 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 1193 bp->geneve_port = 0; 1194 } 1195 return rc; 1196 } 1197 1198 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1199 { 1200 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1201 struct bnxt_vnic_info *vnic; 1202 unsigned int i; 1203 int rc = 0; 1204 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1205 1206 /* Cycle through all VNICs */ 1207 for (i = 0; i < bp->nr_vnics; i++) { 1208 /* 1209 * For each VNIC and each associated filter(s) 1210 * if VLAN exists && VLAN matches vlan_id 1211 * remove the MAC+VLAN filter 1212 * add a new MAC only filter 1213 * else 1214 * VLAN filter doesn't exist, just skip and continue 1215 */ 1216 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1217 filter = STAILQ_FIRST(&vnic->filter); 1218 while (filter) { 1219 temp_filter = STAILQ_NEXT(filter, next); 1220 1221 if (filter->enables & chk && 1222 filter->l2_ovlan == vlan_id) { 1223 /* Must delete the filter */ 1224 STAILQ_REMOVE(&vnic->filter, filter, 1225 bnxt_filter_info, next); 1226 bnxt_hwrm_clear_l2_filter(bp, filter); 1227 STAILQ_INSERT_TAIL( 1228 &bp->free_filter_list, 1229 filter, next); 1230 1231 /* 1232 * Need to examine to see if the MAC 1233 * filter already existed or not before 1234 * allocating a new one 1235 */ 1236 1237 new_filter = bnxt_alloc_filter(bp); 1238 if (!new_filter) { 1239 RTE_LOG(ERR, PMD, 1240 "MAC/VLAN filter alloc failed\n"); 1241 rc = -ENOMEM; 1242 goto exit; 1243 } 1244 STAILQ_INSERT_TAIL(&vnic->filter, 1245 new_filter, next); 1246 /* Inherit MAC from previous filter */ 1247 new_filter->mac_index = 1248 filter->mac_index; 1249 memcpy(new_filter->l2_addr, 1250 filter->l2_addr, ETHER_ADDR_LEN); 1251 /* MAC only filter */ 1252 rc = bnxt_hwrm_set_l2_filter(bp, 1253 vnic->fw_vnic_id, 1254 new_filter); 1255 if (rc) 1256 goto exit; 1257 RTE_LOG(INFO, PMD, 1258 "Del Vlan filter for %d\n", 1259 vlan_id); 1260 } 1261 filter = temp_filter; 1262 } 1263 } 1264 } 1265 exit: 1266 return rc; 1267 } 1268 1269 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1270 { 1271 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1272 struct bnxt_vnic_info *vnic; 1273 unsigned int i; 1274 int rc = 0; 1275 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN | 1276 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK; 1277 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1278 1279 /* Cycle through all VNICs */ 1280 for (i = 0; i < bp->nr_vnics; i++) { 1281 /* 1282 * For each VNIC and each associated filter(s) 1283 * if VLAN exists: 1284 * if VLAN matches vlan_id 1285 * VLAN filter already exists, just skip and continue 1286 * else 1287 * add a new MAC+VLAN filter 1288 * else 1289 * Remove the old MAC only filter 1290 * Add a new MAC+VLAN filter 1291 */ 1292 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1293 filter = STAILQ_FIRST(&vnic->filter); 1294 while (filter) { 1295 temp_filter = STAILQ_NEXT(filter, next); 1296 1297 if (filter->enables & chk) { 1298 if (filter->l2_ovlan == vlan_id) 1299 goto cont; 1300 } else { 1301 /* Must delete the MAC filter */ 1302 STAILQ_REMOVE(&vnic->filter, filter, 1303 bnxt_filter_info, next); 1304 bnxt_hwrm_clear_l2_filter(bp, filter); 1305 filter->l2_ovlan = 0; 1306 STAILQ_INSERT_TAIL( 1307 &bp->free_filter_list, 1308 filter, next); 1309 } 1310 new_filter = bnxt_alloc_filter(bp); 1311 if (!new_filter) { 1312 RTE_LOG(ERR, PMD, 1313 "MAC/VLAN filter alloc failed\n"); 1314 rc = -ENOMEM; 1315 goto exit; 1316 } 1317 STAILQ_INSERT_TAIL(&vnic->filter, new_filter, 1318 next); 1319 /* Inherit MAC from the previous filter */ 1320 new_filter->mac_index = filter->mac_index; 1321 memcpy(new_filter->l2_addr, filter->l2_addr, 1322 ETHER_ADDR_LEN); 1323 /* MAC + VLAN ID filter */ 1324 new_filter->l2_ovlan = vlan_id; 1325 new_filter->l2_ovlan_mask = 0xF000; 1326 new_filter->enables |= en; 1327 rc = bnxt_hwrm_set_l2_filter(bp, 1328 vnic->fw_vnic_id, 1329 new_filter); 1330 if (rc) 1331 goto exit; 1332 RTE_LOG(INFO, PMD, 1333 "Added Vlan filter for %d\n", vlan_id); 1334 cont: 1335 filter = temp_filter; 1336 } 1337 } 1338 } 1339 exit: 1340 return rc; 1341 } 1342 1343 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 1344 uint16_t vlan_id, int on) 1345 { 1346 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1347 1348 /* These operations apply to ALL existing MAC/VLAN filters */ 1349 if (on) 1350 return bnxt_add_vlan_filter(bp, vlan_id); 1351 else 1352 return bnxt_del_vlan_filter(bp, vlan_id); 1353 } 1354 1355 static int 1356 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 1357 { 1358 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1359 unsigned int i; 1360 1361 if (mask & ETH_VLAN_FILTER_MASK) { 1362 if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { 1363 /* Remove any VLAN filters programmed */ 1364 for (i = 0; i < 4095; i++) 1365 bnxt_del_vlan_filter(bp, i); 1366 } 1367 RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n", 1368 dev->data->dev_conf.rxmode.hw_vlan_filter); 1369 } 1370 1371 if (mask & ETH_VLAN_STRIP_MASK) { 1372 /* Enable or disable VLAN stripping */ 1373 for (i = 0; i < bp->nr_vnics; i++) { 1374 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1375 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1376 vnic->vlan_strip = true; 1377 else 1378 vnic->vlan_strip = false; 1379 bnxt_hwrm_vnic_cfg(bp, vnic); 1380 } 1381 RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n", 1382 dev->data->dev_conf.rxmode.hw_vlan_strip); 1383 } 1384 1385 if (mask & ETH_VLAN_EXTEND_MASK) 1386 RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n"); 1387 1388 return 0; 1389 } 1390 1391 static void 1392 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) 1393 { 1394 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1395 /* Default Filter is tied to VNIC 0 */ 1396 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1397 struct bnxt_filter_info *filter; 1398 int rc; 1399 1400 if (BNXT_VF(bp)) 1401 return; 1402 1403 memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); 1404 memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 1405 1406 STAILQ_FOREACH(filter, &vnic->filter, next) { 1407 /* Default Filter is at Index 0 */ 1408 if (filter->mac_index != 0) 1409 continue; 1410 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 1411 if (rc) 1412 break; 1413 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); 1414 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); 1415 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 1416 filter->enables |= 1417 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 1418 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 1419 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1420 if (rc) 1421 break; 1422 filter->mac_index = 0; 1423 RTE_LOG(DEBUG, PMD, "Set MAC addr\n"); 1424 } 1425 } 1426 1427 static int 1428 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 1429 struct ether_addr *mc_addr_set, 1430 uint32_t nb_mc_addr) 1431 { 1432 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1433 char *mc_addr_list = (char *)mc_addr_set; 1434 struct bnxt_vnic_info *vnic; 1435 uint32_t off = 0, i = 0; 1436 1437 vnic = &bp->vnic_info[0]; 1438 1439 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 1440 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1441 goto allmulti; 1442 } 1443 1444 /* TODO Check for Duplicate mcast addresses */ 1445 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1446 for (i = 0; i < nb_mc_addr; i++) { 1447 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN); 1448 off += ETHER_ADDR_LEN; 1449 } 1450 1451 vnic->mc_addr_cnt = i; 1452 1453 allmulti: 1454 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1455 } 1456 1457 static int 1458 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1459 { 1460 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1461 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 1462 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 1463 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 1464 int ret; 1465 1466 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 1467 fw_major, fw_minor, fw_updt); 1468 1469 ret += 1; /* add the size of '\0' */ 1470 if (fw_size < (uint32_t)ret) 1471 return ret; 1472 else 1473 return 0; 1474 } 1475 1476 static void 1477 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1478 struct rte_eth_rxq_info *qinfo) 1479 { 1480 struct bnxt_rx_queue *rxq; 1481 1482 rxq = dev->data->rx_queues[queue_id]; 1483 1484 qinfo->mp = rxq->mb_pool; 1485 qinfo->scattered_rx = dev->data->scattered_rx; 1486 qinfo->nb_desc = rxq->nb_rx_desc; 1487 1488 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1489 qinfo->conf.rx_drop_en = 0; 1490 qinfo->conf.rx_deferred_start = 0; 1491 } 1492 1493 static void 1494 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1495 struct rte_eth_txq_info *qinfo) 1496 { 1497 struct bnxt_tx_queue *txq; 1498 1499 txq = dev->data->tx_queues[queue_id]; 1500 1501 qinfo->nb_desc = txq->nb_tx_desc; 1502 1503 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 1504 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 1505 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 1506 1507 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1508 qinfo->conf.tx_rs_thresh = 0; 1509 qinfo->conf.txq_flags = txq->txq_flags; 1510 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1511 } 1512 1513 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 1514 { 1515 struct bnxt *bp = eth_dev->data->dev_private; 1516 struct rte_eth_dev_info dev_info; 1517 uint32_t max_dev_mtu; 1518 uint32_t rc = 0; 1519 uint32_t i; 1520 1521 bnxt_dev_info_get_op(eth_dev, &dev_info); 1522 max_dev_mtu = dev_info.max_rx_pktlen - 1523 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2; 1524 1525 if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) { 1526 RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n", 1527 ETHER_MIN_MTU, max_dev_mtu); 1528 return -EINVAL; 1529 } 1530 1531 1532 if (new_mtu > ETHER_MTU) { 1533 bp->flags |= BNXT_FLAG_JUMBO; 1534 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 1535 } else { 1536 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 1537 bp->flags &= ~BNXT_FLAG_JUMBO; 1538 } 1539 1540 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = 1541 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1542 1543 eth_dev->data->mtu = new_mtu; 1544 RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu); 1545 1546 for (i = 0; i < bp->nr_vnics; i++) { 1547 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1548 1549 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + 1550 ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1551 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 1552 if (rc) 1553 break; 1554 1555 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 1556 if (rc) 1557 return rc; 1558 } 1559 1560 return rc; 1561 } 1562 1563 static int 1564 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 1565 { 1566 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1567 uint16_t vlan = bp->vlan; 1568 int rc; 1569 1570 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1571 RTE_LOG(ERR, PMD, 1572 "PVID cannot be modified for this function\n"); 1573 return -ENOTSUP; 1574 } 1575 bp->vlan = on ? pvid : 0; 1576 1577 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 1578 if (rc) 1579 bp->vlan = vlan; 1580 return rc; 1581 } 1582 1583 static int 1584 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 1585 { 1586 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1587 1588 return bnxt_hwrm_port_led_cfg(bp, true); 1589 } 1590 1591 static int 1592 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 1593 { 1594 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1595 1596 return bnxt_hwrm_port_led_cfg(bp, false); 1597 } 1598 1599 static uint32_t 1600 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1601 { 1602 uint32_t desc = 0, raw_cons = 0, cons; 1603 struct bnxt_cp_ring_info *cpr; 1604 struct bnxt_rx_queue *rxq; 1605 struct rx_pkt_cmpl *rxcmp; 1606 uint16_t cmp_type; 1607 uint8_t cmp = 1; 1608 bool valid; 1609 1610 rxq = dev->data->rx_queues[rx_queue_id]; 1611 cpr = rxq->cp_ring; 1612 valid = cpr->valid; 1613 1614 while (raw_cons < rxq->nb_rx_desc) { 1615 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1616 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1617 1618 if (!CMPL_VALID(rxcmp, valid)) 1619 goto nothing_to_do; 1620 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); 1621 cmp_type = CMP_TYPE(rxcmp); 1622 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1623 cmp = (rte_le_to_cpu_32( 1624 ((struct rx_tpa_end_cmpl *) 1625 (rxcmp))->agg_bufs_v1) & 1626 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 1627 RX_TPA_END_CMPL_AGG_BUFS_SFT; 1628 desc++; 1629 } else if (cmp_type == 0x11) { 1630 desc++; 1631 cmp = (rxcmp->agg_bufs_v1 & 1632 RX_PKT_CMPL_AGG_BUFS_MASK) >> 1633 RX_PKT_CMPL_AGG_BUFS_SFT; 1634 } else { 1635 cmp = 1; 1636 } 1637 nothing_to_do: 1638 raw_cons += cmp ? cmp : 2; 1639 } 1640 1641 return desc; 1642 } 1643 1644 static int 1645 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 1646 { 1647 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 1648 struct bnxt_rx_ring_info *rxr; 1649 struct bnxt_cp_ring_info *cpr; 1650 struct bnxt_sw_rx_bd *rx_buf; 1651 struct rx_pkt_cmpl *rxcmp; 1652 uint32_t cons, cp_cons; 1653 1654 if (!rxq) 1655 return -EINVAL; 1656 1657 cpr = rxq->cp_ring; 1658 rxr = rxq->rx_ring; 1659 1660 if (offset >= rxq->nb_rx_desc) 1661 return -EINVAL; 1662 1663 cons = RING_CMP(cpr->cp_ring_struct, offset); 1664 cp_cons = cpr->cp_raw_cons; 1665 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1666 1667 if (cons > cp_cons) { 1668 if (CMPL_VALID(rxcmp, cpr->valid)) 1669 return RTE_ETH_RX_DESC_DONE; 1670 } else { 1671 if (CMPL_VALID(rxcmp, !cpr->valid)) 1672 return RTE_ETH_RX_DESC_DONE; 1673 } 1674 rx_buf = &rxr->rx_buf_ring[cons]; 1675 if (rx_buf->mbuf == NULL) 1676 return RTE_ETH_RX_DESC_UNAVAIL; 1677 1678 1679 return RTE_ETH_RX_DESC_AVAIL; 1680 } 1681 1682 static int 1683 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 1684 { 1685 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 1686 struct bnxt_tx_ring_info *txr; 1687 struct bnxt_cp_ring_info *cpr; 1688 struct bnxt_sw_tx_bd *tx_buf; 1689 struct tx_pkt_cmpl *txcmp; 1690 uint32_t cons, cp_cons; 1691 1692 if (!txq) 1693 return -EINVAL; 1694 1695 cpr = txq->cp_ring; 1696 txr = txq->tx_ring; 1697 1698 if (offset >= txq->nb_tx_desc) 1699 return -EINVAL; 1700 1701 cons = RING_CMP(cpr->cp_ring_struct, offset); 1702 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1703 cp_cons = cpr->cp_raw_cons; 1704 1705 if (cons > cp_cons) { 1706 if (CMPL_VALID(txcmp, cpr->valid)) 1707 return RTE_ETH_TX_DESC_UNAVAIL; 1708 } else { 1709 if (CMPL_VALID(txcmp, !cpr->valid)) 1710 return RTE_ETH_TX_DESC_UNAVAIL; 1711 } 1712 tx_buf = &txr->tx_buf_ring[cons]; 1713 if (tx_buf->mbuf == NULL) 1714 return RTE_ETH_TX_DESC_DONE; 1715 1716 return RTE_ETH_TX_DESC_FULL; 1717 } 1718 1719 static struct bnxt_filter_info * 1720 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 1721 struct rte_eth_ethertype_filter *efilter, 1722 struct bnxt_vnic_info *vnic0, 1723 struct bnxt_vnic_info *vnic, 1724 int *ret) 1725 { 1726 struct bnxt_filter_info *mfilter = NULL; 1727 int match = 0; 1728 *ret = 0; 1729 1730 if (efilter->ether_type != ETHER_TYPE_IPv4 && 1731 efilter->ether_type != ETHER_TYPE_IPv6) { 1732 RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in" 1733 " ethertype filter.", efilter->ether_type); 1734 *ret = -EINVAL; 1735 goto exit; 1736 } 1737 if (efilter->queue >= bp->rx_nr_rings) { 1738 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1739 *ret = -EINVAL; 1740 goto exit; 1741 } 1742 1743 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1744 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1745 if (vnic == NULL) { 1746 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1747 *ret = -EINVAL; 1748 goto exit; 1749 } 1750 1751 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1752 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 1753 if ((!memcmp(efilter->mac_addr.addr_bytes, 1754 mfilter->l2_addr, ETHER_ADDR_LEN) && 1755 mfilter->flags == 1756 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 1757 mfilter->ethertype == efilter->ether_type)) { 1758 match = 1; 1759 break; 1760 } 1761 } 1762 } else { 1763 STAILQ_FOREACH(mfilter, &vnic->filter, next) 1764 if ((!memcmp(efilter->mac_addr.addr_bytes, 1765 mfilter->l2_addr, ETHER_ADDR_LEN) && 1766 mfilter->ethertype == efilter->ether_type && 1767 mfilter->flags == 1768 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 1769 match = 1; 1770 break; 1771 } 1772 } 1773 1774 if (match) 1775 *ret = -EEXIST; 1776 1777 exit: 1778 return mfilter; 1779 } 1780 1781 static int 1782 bnxt_ethertype_filter(struct rte_eth_dev *dev, 1783 enum rte_filter_op filter_op, 1784 void *arg) 1785 { 1786 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1787 struct rte_eth_ethertype_filter *efilter = 1788 (struct rte_eth_ethertype_filter *)arg; 1789 struct bnxt_filter_info *bfilter, *filter1; 1790 struct bnxt_vnic_info *vnic, *vnic0; 1791 int ret; 1792 1793 if (filter_op == RTE_ETH_FILTER_NOP) 1794 return 0; 1795 1796 if (arg == NULL) { 1797 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 1798 filter_op); 1799 return -EINVAL; 1800 } 1801 1802 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1803 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1804 1805 switch (filter_op) { 1806 case RTE_ETH_FILTER_ADD: 1807 bnxt_match_and_validate_ether_filter(bp, efilter, 1808 vnic0, vnic, &ret); 1809 if (ret < 0) 1810 return ret; 1811 1812 bfilter = bnxt_get_unused_filter(bp); 1813 if (bfilter == NULL) { 1814 RTE_LOG(ERR, PMD, 1815 "Not enough resources for a new filter.\n"); 1816 return -ENOMEM; 1817 } 1818 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 1819 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 1820 ETHER_ADDR_LEN); 1821 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 1822 ETHER_ADDR_LEN); 1823 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 1824 bfilter->ethertype = efilter->ether_type; 1825 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 1826 1827 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 1828 if (filter1 == NULL) { 1829 ret = -1; 1830 goto cleanup; 1831 } 1832 bfilter->enables |= 1833 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 1834 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1835 1836 bfilter->dst_id = vnic->fw_vnic_id; 1837 1838 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1839 bfilter->flags = 1840 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1841 } 1842 1843 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 1844 if (ret) 1845 goto cleanup; 1846 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 1847 break; 1848 case RTE_ETH_FILTER_DELETE: 1849 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 1850 vnic0, vnic, &ret); 1851 if (ret == -EEXIST) { 1852 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 1853 1854 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 1855 next); 1856 bnxt_free_filter(bp, filter1); 1857 } else if (ret == 0) { 1858 RTE_LOG(ERR, PMD, "No matching filter found\n"); 1859 } 1860 break; 1861 default: 1862 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 1863 ret = -EINVAL; 1864 goto error; 1865 } 1866 return ret; 1867 cleanup: 1868 bnxt_free_filter(bp, bfilter); 1869 error: 1870 return ret; 1871 } 1872 1873 static inline int 1874 parse_ntuple_filter(struct bnxt *bp, 1875 struct rte_eth_ntuple_filter *nfilter, 1876 struct bnxt_filter_info *bfilter) 1877 { 1878 uint32_t en = 0; 1879 1880 if (nfilter->queue >= bp->rx_nr_rings) { 1881 RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue); 1882 return -EINVAL; 1883 } 1884 1885 switch (nfilter->dst_port_mask) { 1886 case UINT16_MAX: 1887 bfilter->dst_port_mask = -1; 1888 bfilter->dst_port = nfilter->dst_port; 1889 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 1890 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 1891 break; 1892 default: 1893 RTE_LOG(ERR, PMD, "invalid dst_port mask."); 1894 return -EINVAL; 1895 } 1896 1897 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 1898 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1899 1900 switch (nfilter->proto_mask) { 1901 case UINT8_MAX: 1902 if (nfilter->proto == 17) /* IPPROTO_UDP */ 1903 bfilter->ip_protocol = 17; 1904 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 1905 bfilter->ip_protocol = 6; 1906 else 1907 return -EINVAL; 1908 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1909 break; 1910 default: 1911 RTE_LOG(ERR, PMD, "invalid protocol mask."); 1912 return -EINVAL; 1913 } 1914 1915 switch (nfilter->dst_ip_mask) { 1916 case UINT32_MAX: 1917 bfilter->dst_ipaddr_mask[0] = -1; 1918 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 1919 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 1920 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 1921 break; 1922 default: 1923 RTE_LOG(ERR, PMD, "invalid dst_ip mask."); 1924 return -EINVAL; 1925 } 1926 1927 switch (nfilter->src_ip_mask) { 1928 case UINT32_MAX: 1929 bfilter->src_ipaddr_mask[0] = -1; 1930 bfilter->src_ipaddr[0] = nfilter->src_ip; 1931 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 1932 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 1933 break; 1934 default: 1935 RTE_LOG(ERR, PMD, "invalid src_ip mask."); 1936 return -EINVAL; 1937 } 1938 1939 switch (nfilter->src_port_mask) { 1940 case UINT16_MAX: 1941 bfilter->src_port_mask = -1; 1942 bfilter->src_port = nfilter->src_port; 1943 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 1944 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 1945 break; 1946 default: 1947 RTE_LOG(ERR, PMD, "invalid src_port mask."); 1948 return -EINVAL; 1949 } 1950 1951 //TODO Priority 1952 //nfilter->priority = (uint8_t)filter->priority; 1953 1954 bfilter->enables = en; 1955 return 0; 1956 } 1957 1958 static struct bnxt_filter_info* 1959 bnxt_match_ntuple_filter(struct bnxt *bp, 1960 struct bnxt_filter_info *bfilter) 1961 { 1962 struct bnxt_filter_info *mfilter = NULL; 1963 int i; 1964 1965 for (i = bp->nr_vnics - 1; i >= 0; i--) { 1966 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1967 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 1968 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 1969 bfilter->src_ipaddr_mask[0] == 1970 mfilter->src_ipaddr_mask[0] && 1971 bfilter->src_port == mfilter->src_port && 1972 bfilter->src_port_mask == mfilter->src_port_mask && 1973 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 1974 bfilter->dst_ipaddr_mask[0] == 1975 mfilter->dst_ipaddr_mask[0] && 1976 bfilter->dst_port == mfilter->dst_port && 1977 bfilter->dst_port_mask == mfilter->dst_port_mask && 1978 bfilter->flags == mfilter->flags && 1979 bfilter->enables == mfilter->enables) 1980 return mfilter; 1981 } 1982 } 1983 return NULL; 1984 } 1985 1986 static int 1987 bnxt_cfg_ntuple_filter(struct bnxt *bp, 1988 struct rte_eth_ntuple_filter *nfilter, 1989 enum rte_filter_op filter_op) 1990 { 1991 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 1992 struct bnxt_vnic_info *vnic, *vnic0; 1993 int ret; 1994 1995 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 1996 RTE_LOG(ERR, PMD, "only 5tuple is supported."); 1997 return -EINVAL; 1998 } 1999 2000 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 2001 RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n"); 2002 return -EINVAL; 2003 } 2004 2005 bfilter = bnxt_get_unused_filter(bp); 2006 if (bfilter == NULL) { 2007 RTE_LOG(ERR, PMD, 2008 "Not enough resources for a new filter.\n"); 2009 return -ENOMEM; 2010 } 2011 ret = parse_ntuple_filter(bp, nfilter, bfilter); 2012 if (ret < 0) 2013 goto free_filter; 2014 2015 vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]); 2016 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2017 filter1 = STAILQ_FIRST(&vnic0->filter); 2018 if (filter1 == NULL) { 2019 ret = -1; 2020 goto free_filter; 2021 } 2022 2023 bfilter->dst_id = vnic->fw_vnic_id; 2024 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2025 bfilter->enables |= 2026 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2027 bfilter->ethertype = 0x800; 2028 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2029 2030 mfilter = bnxt_match_ntuple_filter(bp, bfilter); 2031 2032 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2033 RTE_LOG(ERR, PMD, "filter exists."); 2034 ret = -EEXIST; 2035 goto free_filter; 2036 } 2037 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2038 RTE_LOG(ERR, PMD, "filter doesn't exist."); 2039 ret = -ENOENT; 2040 goto free_filter; 2041 } 2042 2043 if (filter_op == RTE_ETH_FILTER_ADD) { 2044 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2045 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2046 if (ret) 2047 goto free_filter; 2048 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2049 } else { 2050 if (mfilter == NULL) { 2051 /* This should not happen. But for Coverity! */ 2052 ret = -ENOENT; 2053 goto free_filter; 2054 } 2055 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 2056 2057 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, 2058 next); 2059 bnxt_free_filter(bp, mfilter); 2060 bfilter->fw_l2_filter_id = -1; 2061 bnxt_free_filter(bp, bfilter); 2062 } 2063 2064 return 0; 2065 free_filter: 2066 bfilter->fw_l2_filter_id = -1; 2067 bnxt_free_filter(bp, bfilter); 2068 return ret; 2069 } 2070 2071 static int 2072 bnxt_ntuple_filter(struct rte_eth_dev *dev, 2073 enum rte_filter_op filter_op, 2074 void *arg) 2075 { 2076 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2077 int ret; 2078 2079 if (filter_op == RTE_ETH_FILTER_NOP) 2080 return 0; 2081 2082 if (arg == NULL) { 2083 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 2084 filter_op); 2085 return -EINVAL; 2086 } 2087 2088 switch (filter_op) { 2089 case RTE_ETH_FILTER_ADD: 2090 ret = bnxt_cfg_ntuple_filter(bp, 2091 (struct rte_eth_ntuple_filter *)arg, 2092 filter_op); 2093 break; 2094 case RTE_ETH_FILTER_DELETE: 2095 ret = bnxt_cfg_ntuple_filter(bp, 2096 (struct rte_eth_ntuple_filter *)arg, 2097 filter_op); 2098 break; 2099 default: 2100 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 2101 ret = -EINVAL; 2102 break; 2103 } 2104 return ret; 2105 } 2106 2107 static int 2108 bnxt_parse_fdir_filter(struct bnxt *bp, 2109 struct rte_eth_fdir_filter *fdir, 2110 struct bnxt_filter_info *filter) 2111 { 2112 enum rte_fdir_mode fdir_mode = 2113 bp->eth_dev->data->dev_conf.fdir_conf.mode; 2114 struct bnxt_vnic_info *vnic0, *vnic; 2115 struct bnxt_filter_info *filter1; 2116 uint32_t en = 0; 2117 int i; 2118 2119 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2120 return -EINVAL; 2121 2122 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 2123 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 2124 2125 switch (fdir->input.flow_type) { 2126 case RTE_ETH_FLOW_IPV4: 2127 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2128 /* FALLTHROUGH */ 2129 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 2130 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2131 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 2132 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2133 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 2134 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2135 filter->ip_addr_type = 2136 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2137 filter->src_ipaddr_mask[0] = 0xffffffff; 2138 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2139 filter->dst_ipaddr_mask[0] = 0xffffffff; 2140 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2141 filter->ethertype = 0x800; 2142 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2143 break; 2144 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2145 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 2146 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2147 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 2148 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2149 filter->dst_port_mask = 0xffff; 2150 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2151 filter->src_port_mask = 0xffff; 2152 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2153 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 2154 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2155 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 2156 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2157 filter->ip_protocol = 6; 2158 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2159 filter->ip_addr_type = 2160 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2161 filter->src_ipaddr_mask[0] = 0xffffffff; 2162 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2163 filter->dst_ipaddr_mask[0] = 0xffffffff; 2164 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2165 filter->ethertype = 0x800; 2166 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2167 break; 2168 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2169 filter->src_port = fdir->input.flow.udp4_flow.src_port; 2170 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2171 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 2172 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2173 filter->dst_port_mask = 0xffff; 2174 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2175 filter->src_port_mask = 0xffff; 2176 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2177 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 2178 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2179 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 2180 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2181 filter->ip_protocol = 17; 2182 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2183 filter->ip_addr_type = 2184 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2185 filter->src_ipaddr_mask[0] = 0xffffffff; 2186 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2187 filter->dst_ipaddr_mask[0] = 0xffffffff; 2188 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2189 filter->ethertype = 0x800; 2190 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2191 break; 2192 case RTE_ETH_FLOW_IPV6: 2193 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2194 /* FALLTHROUGH */ 2195 filter->ip_addr_type = 2196 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2197 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 2198 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2199 rte_memcpy(filter->src_ipaddr, 2200 fdir->input.flow.ipv6_flow.src_ip, 16); 2201 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2202 rte_memcpy(filter->dst_ipaddr, 2203 fdir->input.flow.ipv6_flow.dst_ip, 16); 2204 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2205 memset(filter->dst_ipaddr_mask, 0xff, 16); 2206 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2207 memset(filter->src_ipaddr_mask, 0xff, 16); 2208 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2209 filter->ethertype = 0x86dd; 2210 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2211 break; 2212 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2213 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 2214 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2215 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 2216 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2217 filter->dst_port_mask = 0xffff; 2218 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2219 filter->src_port_mask = 0xffff; 2220 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2221 filter->ip_addr_type = 2222 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2223 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 2224 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2225 rte_memcpy(filter->src_ipaddr, 2226 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 2227 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2228 rte_memcpy(filter->dst_ipaddr, 2229 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 2230 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2231 memset(filter->dst_ipaddr_mask, 0xff, 16); 2232 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2233 memset(filter->src_ipaddr_mask, 0xff, 16); 2234 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2235 filter->ethertype = 0x86dd; 2236 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2237 break; 2238 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2239 filter->src_port = fdir->input.flow.udp6_flow.src_port; 2240 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2241 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 2242 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2243 filter->dst_port_mask = 0xffff; 2244 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2245 filter->src_port_mask = 0xffff; 2246 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2247 filter->ip_addr_type = 2248 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2249 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 2250 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2251 rte_memcpy(filter->src_ipaddr, 2252 fdir->input.flow.udp6_flow.ip.src_ip, 16); 2253 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2254 rte_memcpy(filter->dst_ipaddr, 2255 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 2256 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2257 memset(filter->dst_ipaddr_mask, 0xff, 16); 2258 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2259 memset(filter->src_ipaddr_mask, 0xff, 16); 2260 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2261 filter->ethertype = 0x86dd; 2262 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2263 break; 2264 case RTE_ETH_FLOW_L2_PAYLOAD: 2265 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 2266 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2267 break; 2268 case RTE_ETH_FLOW_VXLAN: 2269 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2270 return -EINVAL; 2271 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2272 filter->tunnel_type = 2273 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 2274 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2275 break; 2276 case RTE_ETH_FLOW_NVGRE: 2277 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2278 return -EINVAL; 2279 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2280 filter->tunnel_type = 2281 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 2282 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2283 break; 2284 case RTE_ETH_FLOW_UNKNOWN: 2285 case RTE_ETH_FLOW_RAW: 2286 case RTE_ETH_FLOW_FRAG_IPV4: 2287 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 2288 case RTE_ETH_FLOW_FRAG_IPV6: 2289 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 2290 case RTE_ETH_FLOW_IPV6_EX: 2291 case RTE_ETH_FLOW_IPV6_TCP_EX: 2292 case RTE_ETH_FLOW_IPV6_UDP_EX: 2293 case RTE_ETH_FLOW_GENEVE: 2294 /* FALLTHROUGH */ 2295 default: 2296 return -EINVAL; 2297 } 2298 2299 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2300 vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2301 if (vnic == NULL) { 2302 RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue); 2303 return -EINVAL; 2304 } 2305 2306 2307 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2308 rte_memcpy(filter->dst_macaddr, 2309 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 2310 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2311 } 2312 2313 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 2314 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2315 filter1 = STAILQ_FIRST(&vnic0->filter); 2316 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 2317 } else { 2318 filter->dst_id = vnic->fw_vnic_id; 2319 for (i = 0; i < ETHER_ADDR_LEN; i++) 2320 if (filter->dst_macaddr[i] == 0x00) 2321 filter1 = STAILQ_FIRST(&vnic0->filter); 2322 else 2323 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 2324 } 2325 2326 if (filter1 == NULL) 2327 return -EINVAL; 2328 2329 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2330 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2331 2332 filter->enables = en; 2333 2334 return 0; 2335 } 2336 2337 static struct bnxt_filter_info * 2338 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) 2339 { 2340 struct bnxt_filter_info *mf = NULL; 2341 int i; 2342 2343 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2344 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2345 2346 STAILQ_FOREACH(mf, &vnic->filter, next) { 2347 if (mf->filter_type == nf->filter_type && 2348 mf->flags == nf->flags && 2349 mf->src_port == nf->src_port && 2350 mf->src_port_mask == nf->src_port_mask && 2351 mf->dst_port == nf->dst_port && 2352 mf->dst_port_mask == nf->dst_port_mask && 2353 mf->ip_protocol == nf->ip_protocol && 2354 mf->ip_addr_type == nf->ip_addr_type && 2355 mf->ethertype == nf->ethertype && 2356 mf->vni == nf->vni && 2357 mf->tunnel_type == nf->tunnel_type && 2358 mf->l2_ovlan == nf->l2_ovlan && 2359 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 2360 mf->l2_ivlan == nf->l2_ivlan && 2361 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 2362 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && 2363 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 2364 ETHER_ADDR_LEN) && 2365 !memcmp(mf->src_macaddr, nf->src_macaddr, 2366 ETHER_ADDR_LEN) && 2367 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 2368 ETHER_ADDR_LEN) && 2369 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 2370 sizeof(nf->src_ipaddr)) && 2371 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 2372 sizeof(nf->src_ipaddr_mask)) && 2373 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 2374 sizeof(nf->dst_ipaddr)) && 2375 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 2376 sizeof(nf->dst_ipaddr_mask))) 2377 return mf; 2378 } 2379 } 2380 return NULL; 2381 } 2382 2383 static int 2384 bnxt_fdir_filter(struct rte_eth_dev *dev, 2385 enum rte_filter_op filter_op, 2386 void *arg) 2387 { 2388 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2389 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 2390 struct bnxt_filter_info *filter, *match; 2391 struct bnxt_vnic_info *vnic; 2392 int ret = 0, i; 2393 2394 if (filter_op == RTE_ETH_FILTER_NOP) 2395 return 0; 2396 2397 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 2398 return -EINVAL; 2399 2400 switch (filter_op) { 2401 case RTE_ETH_FILTER_ADD: 2402 case RTE_ETH_FILTER_DELETE: 2403 /* FALLTHROUGH */ 2404 filter = bnxt_get_unused_filter(bp); 2405 if (filter == NULL) { 2406 RTE_LOG(ERR, PMD, 2407 "Not enough resources for a new flow.\n"); 2408 return -ENOMEM; 2409 } 2410 2411 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 2412 if (ret != 0) 2413 goto free_filter; 2414 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2415 2416 match = bnxt_match_fdir(bp, filter); 2417 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2418 RTE_LOG(ERR, PMD, "Flow already exists.\n"); 2419 ret = -EEXIST; 2420 goto free_filter; 2421 } 2422 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2423 RTE_LOG(ERR, PMD, "Flow does not exist.\n"); 2424 ret = -ENOENT; 2425 goto free_filter; 2426 } 2427 2428 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2429 vnic = STAILQ_FIRST(&bp->ff_pool[0]); 2430 else 2431 vnic = 2432 STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2433 2434 if (filter_op == RTE_ETH_FILTER_ADD) { 2435 ret = bnxt_hwrm_set_ntuple_filter(bp, 2436 filter->dst_id, 2437 filter); 2438 if (ret) 2439 goto free_filter; 2440 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2441 } else { 2442 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 2443 STAILQ_REMOVE(&vnic->filter, match, 2444 bnxt_filter_info, next); 2445 bnxt_free_filter(bp, match); 2446 filter->fw_l2_filter_id = -1; 2447 bnxt_free_filter(bp, filter); 2448 } 2449 break; 2450 case RTE_ETH_FILTER_FLUSH: 2451 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2452 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2453 2454 STAILQ_FOREACH(filter, &vnic->filter, next) { 2455 if (filter->filter_type == 2456 HWRM_CFA_NTUPLE_FILTER) { 2457 ret = 2458 bnxt_hwrm_clear_ntuple_filter(bp, 2459 filter); 2460 STAILQ_REMOVE(&vnic->filter, filter, 2461 bnxt_filter_info, next); 2462 } 2463 } 2464 } 2465 return ret; 2466 case RTE_ETH_FILTER_UPDATE: 2467 case RTE_ETH_FILTER_STATS: 2468 case RTE_ETH_FILTER_INFO: 2469 /* FALLTHROUGH */ 2470 RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op); 2471 break; 2472 default: 2473 RTE_LOG(ERR, PMD, "unknown operation %u", filter_op); 2474 ret = -EINVAL; 2475 break; 2476 } 2477 return ret; 2478 2479 free_filter: 2480 filter->fw_l2_filter_id = -1; 2481 bnxt_free_filter(bp, filter); 2482 return ret; 2483 } 2484 2485 static int 2486 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, 2487 enum rte_filter_type filter_type, 2488 enum rte_filter_op filter_op, void *arg) 2489 { 2490 int ret = 0; 2491 2492 switch (filter_type) { 2493 case RTE_ETH_FILTER_TUNNEL: 2494 RTE_LOG(ERR, PMD, 2495 "filter type: %d: To be implemented\n", filter_type); 2496 break; 2497 case RTE_ETH_FILTER_FDIR: 2498 ret = bnxt_fdir_filter(dev, filter_op, arg); 2499 break; 2500 case RTE_ETH_FILTER_NTUPLE: 2501 ret = bnxt_ntuple_filter(dev, filter_op, arg); 2502 break; 2503 case RTE_ETH_FILTER_ETHERTYPE: 2504 ret = bnxt_ethertype_filter(dev, filter_op, arg); 2505 break; 2506 case RTE_ETH_FILTER_GENERIC: 2507 if (filter_op != RTE_ETH_FILTER_GET) 2508 return -EINVAL; 2509 *(const void **)arg = &bnxt_flow_ops; 2510 break; 2511 default: 2512 RTE_LOG(ERR, PMD, 2513 "Filter type (%d) not supported", filter_type); 2514 ret = -EINVAL; 2515 break; 2516 } 2517 return ret; 2518 } 2519 2520 static const uint32_t * 2521 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 2522 { 2523 static const uint32_t ptypes[] = { 2524 RTE_PTYPE_L2_ETHER_VLAN, 2525 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2526 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2527 RTE_PTYPE_L4_ICMP, 2528 RTE_PTYPE_L4_TCP, 2529 RTE_PTYPE_L4_UDP, 2530 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2531 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2532 RTE_PTYPE_INNER_L4_ICMP, 2533 RTE_PTYPE_INNER_L4_TCP, 2534 RTE_PTYPE_INNER_L4_UDP, 2535 RTE_PTYPE_UNKNOWN 2536 }; 2537 2538 if (dev->rx_pkt_burst == bnxt_recv_pkts) 2539 return ptypes; 2540 return NULL; 2541 } 2542 2543 2544 2545 static int 2546 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 2547 { 2548 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2549 int rc; 2550 uint32_t dir_entries; 2551 uint32_t entry_length; 2552 2553 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n", 2554 __func__, bp->pdev->addr.domain, bp->pdev->addr.bus, 2555 bp->pdev->addr.devid, bp->pdev->addr.function); 2556 2557 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 2558 if (rc != 0) 2559 return rc; 2560 2561 return dir_entries * entry_length; 2562 } 2563 2564 static int 2565 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 2566 struct rte_dev_eeprom_info *in_eeprom) 2567 { 2568 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2569 uint32_t index; 2570 uint32_t offset; 2571 2572 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2573 "len = %d\n", __func__, bp->pdev->addr.domain, 2574 bp->pdev->addr.bus, bp->pdev->addr.devid, 2575 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2576 2577 if (in_eeprom->offset == 0) /* special offset value to get directory */ 2578 return bnxt_get_nvram_directory(bp, in_eeprom->length, 2579 in_eeprom->data); 2580 2581 index = in_eeprom->offset >> 24; 2582 offset = in_eeprom->offset & 0xffffff; 2583 2584 if (index != 0) 2585 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 2586 in_eeprom->length, in_eeprom->data); 2587 2588 return 0; 2589 } 2590 2591 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 2592 { 2593 switch (dir_type) { 2594 case BNX_DIR_TYPE_CHIMP_PATCH: 2595 case BNX_DIR_TYPE_BOOTCODE: 2596 case BNX_DIR_TYPE_BOOTCODE_2: 2597 case BNX_DIR_TYPE_APE_FW: 2598 case BNX_DIR_TYPE_APE_PATCH: 2599 case BNX_DIR_TYPE_KONG_FW: 2600 case BNX_DIR_TYPE_KONG_PATCH: 2601 case BNX_DIR_TYPE_BONO_FW: 2602 case BNX_DIR_TYPE_BONO_PATCH: 2603 return true; 2604 } 2605 2606 return false; 2607 } 2608 2609 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 2610 { 2611 switch (dir_type) { 2612 case BNX_DIR_TYPE_AVS: 2613 case BNX_DIR_TYPE_EXP_ROM_MBA: 2614 case BNX_DIR_TYPE_PCIE: 2615 case BNX_DIR_TYPE_TSCF_UCODE: 2616 case BNX_DIR_TYPE_EXT_PHY: 2617 case BNX_DIR_TYPE_CCM: 2618 case BNX_DIR_TYPE_ISCSI_BOOT: 2619 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 2620 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 2621 return true; 2622 } 2623 2624 return false; 2625 } 2626 2627 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 2628 { 2629 return bnxt_dir_type_is_ape_bin_format(dir_type) || 2630 bnxt_dir_type_is_other_exec_format(dir_type); 2631 } 2632 2633 static int 2634 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 2635 struct rte_dev_eeprom_info *in_eeprom) 2636 { 2637 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2638 uint8_t index, dir_op; 2639 uint16_t type, ext, ordinal, attr; 2640 2641 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2642 "len = %d\n", __func__, bp->pdev->addr.domain, 2643 bp->pdev->addr.bus, bp->pdev->addr.devid, 2644 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2645 2646 if (!BNXT_PF(bp)) { 2647 RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n"); 2648 return -EINVAL; 2649 } 2650 2651 type = in_eeprom->magic >> 16; 2652 2653 if (type == 0xffff) { /* special value for directory operations */ 2654 index = in_eeprom->magic & 0xff; 2655 dir_op = in_eeprom->magic >> 8; 2656 if (index == 0) 2657 return -EINVAL; 2658 switch (dir_op) { 2659 case 0x0e: /* erase */ 2660 if (in_eeprom->offset != ~in_eeprom->magic) 2661 return -EINVAL; 2662 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 2663 default: 2664 return -EINVAL; 2665 } 2666 } 2667 2668 /* Create or re-write an NVM item: */ 2669 if (bnxt_dir_type_is_executable(type) == true) 2670 return -EOPNOTSUPP; 2671 ext = in_eeprom->magic & 0xffff; 2672 ordinal = in_eeprom->offset >> 16; 2673 attr = in_eeprom->offset & 0xffff; 2674 2675 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 2676 in_eeprom->data, in_eeprom->length); 2677 return 0; 2678 } 2679 2680 /* 2681 * Initialization 2682 */ 2683 2684 static const struct eth_dev_ops bnxt_dev_ops = { 2685 .dev_infos_get = bnxt_dev_info_get_op, 2686 .dev_close = bnxt_dev_close_op, 2687 .dev_configure = bnxt_dev_configure_op, 2688 .dev_start = bnxt_dev_start_op, 2689 .dev_stop = bnxt_dev_stop_op, 2690 .dev_set_link_up = bnxt_dev_set_link_up_op, 2691 .dev_set_link_down = bnxt_dev_set_link_down_op, 2692 .stats_get = bnxt_stats_get_op, 2693 .stats_reset = bnxt_stats_reset_op, 2694 .rx_queue_setup = bnxt_rx_queue_setup_op, 2695 .rx_queue_release = bnxt_rx_queue_release_op, 2696 .tx_queue_setup = bnxt_tx_queue_setup_op, 2697 .tx_queue_release = bnxt_tx_queue_release_op, 2698 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 2699 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 2700 .reta_update = bnxt_reta_update_op, 2701 .reta_query = bnxt_reta_query_op, 2702 .rss_hash_update = bnxt_rss_hash_update_op, 2703 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 2704 .link_update = bnxt_link_update_op, 2705 .promiscuous_enable = bnxt_promiscuous_enable_op, 2706 .promiscuous_disable = bnxt_promiscuous_disable_op, 2707 .allmulticast_enable = bnxt_allmulticast_enable_op, 2708 .allmulticast_disable = bnxt_allmulticast_disable_op, 2709 .mac_addr_add = bnxt_mac_addr_add_op, 2710 .mac_addr_remove = bnxt_mac_addr_remove_op, 2711 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 2712 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 2713 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 2714 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 2715 .vlan_filter_set = bnxt_vlan_filter_set_op, 2716 .vlan_offload_set = bnxt_vlan_offload_set_op, 2717 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 2718 .mtu_set = bnxt_mtu_set_op, 2719 .mac_addr_set = bnxt_set_default_mac_addr_op, 2720 .xstats_get = bnxt_dev_xstats_get_op, 2721 .xstats_get_names = bnxt_dev_xstats_get_names_op, 2722 .xstats_reset = bnxt_dev_xstats_reset_op, 2723 .fw_version_get = bnxt_fw_version_get, 2724 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 2725 .rxq_info_get = bnxt_rxq_info_get_op, 2726 .txq_info_get = bnxt_txq_info_get_op, 2727 .dev_led_on = bnxt_dev_led_on_op, 2728 .dev_led_off = bnxt_dev_led_off_op, 2729 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 2730 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 2731 .rx_queue_count = bnxt_rx_queue_count_op, 2732 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 2733 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 2734 .filter_ctrl = bnxt_filter_ctrl_op, 2735 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 2736 .get_eeprom_length = bnxt_get_eeprom_length_op, 2737 .get_eeprom = bnxt_get_eeprom_op, 2738 .set_eeprom = bnxt_set_eeprom_op, 2739 }; 2740 2741 static bool bnxt_vf_pciid(uint16_t id) 2742 { 2743 if (id == BROADCOM_DEV_ID_57304_VF || 2744 id == BROADCOM_DEV_ID_57406_VF || 2745 id == BROADCOM_DEV_ID_5731X_VF || 2746 id == BROADCOM_DEV_ID_5741X_VF || 2747 id == BROADCOM_DEV_ID_57414_VF || 2748 id == BROADCOM_DEV_ID_STRATUS_NIC_VF) 2749 return true; 2750 return false; 2751 } 2752 2753 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 2754 { 2755 struct bnxt *bp = eth_dev->data->dev_private; 2756 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2757 int rc; 2758 2759 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 2760 if (!pci_dev->mem_resource[0].addr) { 2761 RTE_LOG(ERR, PMD, 2762 "Cannot find PCI device base address, aborting\n"); 2763 rc = -ENODEV; 2764 goto init_err_disable; 2765 } 2766 2767 bp->eth_dev = eth_dev; 2768 bp->pdev = pci_dev; 2769 2770 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 2771 if (!bp->bar0) { 2772 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); 2773 rc = -ENOMEM; 2774 goto init_err_release; 2775 } 2776 return 0; 2777 2778 init_err_release: 2779 if (bp->bar0) 2780 bp->bar0 = NULL; 2781 2782 init_err_disable: 2783 2784 return rc; 2785 } 2786 2787 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 2788 2789 #define ALLOW_FUNC(x) \ 2790 { \ 2791 typeof(x) arg = (x); \ 2792 bp->pf.vf_req_fwd[((arg) >> 5)] &= \ 2793 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 2794 } 2795 static int 2796 bnxt_dev_init(struct rte_eth_dev *eth_dev) 2797 { 2798 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2799 char mz_name[RTE_MEMZONE_NAMESIZE]; 2800 const struct rte_memzone *mz = NULL; 2801 static int version_printed; 2802 uint32_t total_alloc_len; 2803 rte_iova_t mz_phys_addr; 2804 struct bnxt *bp; 2805 int rc; 2806 2807 if (version_printed++ == 0) 2808 RTE_LOG(INFO, PMD, "%s\n", bnxt_version); 2809 2810 rte_eth_copy_pci_info(eth_dev, pci_dev); 2811 2812 bp = eth_dev->data->dev_private; 2813 2814 rte_atomic64_init(&bp->rx_mbuf_alloc_fail); 2815 bp->dev_stopped = 1; 2816 2817 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2818 goto skip_init; 2819 2820 if (bnxt_vf_pciid(pci_dev->id.device_id)) 2821 bp->flags |= BNXT_FLAG_VF; 2822 2823 rc = bnxt_init_board(eth_dev); 2824 if (rc) { 2825 RTE_LOG(ERR, PMD, 2826 "Board initialization failed rc: %x\n", rc); 2827 goto error; 2828 } 2829 skip_init: 2830 eth_dev->dev_ops = &bnxt_dev_ops; 2831 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2832 return 0; 2833 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 2834 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 2835 2836 if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { 2837 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 2838 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 2839 pci_dev->addr.bus, pci_dev->addr.devid, 2840 pci_dev->addr.function, "rx_port_stats"); 2841 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 2842 mz = rte_memzone_lookup(mz_name); 2843 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 2844 sizeof(struct rx_port_stats) + 512); 2845 if (!mz) { 2846 mz = rte_memzone_reserve(mz_name, total_alloc_len, 2847 SOCKET_ID_ANY, 2848 RTE_MEMZONE_2MB | 2849 RTE_MEMZONE_SIZE_HINT_ONLY); 2850 if (mz == NULL) 2851 return -ENOMEM; 2852 } 2853 memset(mz->addr, 0, mz->len); 2854 mz_phys_addr = mz->iova; 2855 if ((unsigned long)mz->addr == mz_phys_addr) { 2856 RTE_LOG(WARNING, PMD, 2857 "Memzone physical address same as virtual.\n"); 2858 RTE_LOG(WARNING, PMD, 2859 "Using rte_mem_virt2iova()\n"); 2860 mz_phys_addr = rte_mem_virt2iova(mz->addr); 2861 if (mz_phys_addr == 0) { 2862 RTE_LOG(ERR, PMD, 2863 "unable to map address to physical memory\n"); 2864 return -ENOMEM; 2865 } 2866 } 2867 2868 bp->rx_mem_zone = (const void *)mz; 2869 bp->hw_rx_port_stats = mz->addr; 2870 bp->hw_rx_port_stats_map = mz_phys_addr; 2871 2872 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 2873 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 2874 pci_dev->addr.bus, pci_dev->addr.devid, 2875 pci_dev->addr.function, "tx_port_stats"); 2876 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 2877 mz = rte_memzone_lookup(mz_name); 2878 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 2879 sizeof(struct tx_port_stats) + 512); 2880 if (!mz) { 2881 mz = rte_memzone_reserve(mz_name, total_alloc_len, 2882 SOCKET_ID_ANY, 2883 RTE_MEMZONE_2MB | 2884 RTE_MEMZONE_SIZE_HINT_ONLY); 2885 if (mz == NULL) 2886 return -ENOMEM; 2887 } 2888 memset(mz->addr, 0, mz->len); 2889 mz_phys_addr = mz->iova; 2890 if ((unsigned long)mz->addr == mz_phys_addr) { 2891 RTE_LOG(WARNING, PMD, 2892 "Memzone physical address same as virtual.\n"); 2893 RTE_LOG(WARNING, PMD, 2894 "Using rte_mem_virt2iova()\n"); 2895 mz_phys_addr = rte_mem_virt2iova(mz->addr); 2896 if (mz_phys_addr == 0) { 2897 RTE_LOG(ERR, PMD, 2898 "unable to map address to physical memory\n"); 2899 return -ENOMEM; 2900 } 2901 } 2902 2903 bp->tx_mem_zone = (const void *)mz; 2904 bp->hw_tx_port_stats = mz->addr; 2905 bp->hw_tx_port_stats_map = mz_phys_addr; 2906 2907 bp->flags |= BNXT_FLAG_PORT_STATS; 2908 } 2909 2910 rc = bnxt_alloc_hwrm_resources(bp); 2911 if (rc) { 2912 RTE_LOG(ERR, PMD, 2913 "hwrm resource allocation failure rc: %x\n", rc); 2914 goto error_free; 2915 } 2916 rc = bnxt_hwrm_ver_get(bp); 2917 if (rc) 2918 goto error_free; 2919 bnxt_hwrm_queue_qportcfg(bp); 2920 2921 bnxt_hwrm_func_qcfg(bp); 2922 2923 /* Get the MAX capabilities for this function */ 2924 rc = bnxt_hwrm_func_qcaps(bp); 2925 if (rc) { 2926 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); 2927 goto error_free; 2928 } 2929 if (bp->max_tx_rings == 0) { 2930 RTE_LOG(ERR, PMD, "No TX rings available!\n"); 2931 rc = -EBUSY; 2932 goto error_free; 2933 } 2934 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 2935 ETHER_ADDR_LEN * bp->max_l2_ctx, 0); 2936 if (eth_dev->data->mac_addrs == NULL) { 2937 RTE_LOG(ERR, PMD, 2938 "Failed to alloc %u bytes needed to store MAC addr tbl", 2939 ETHER_ADDR_LEN * bp->max_l2_ctx); 2940 rc = -ENOMEM; 2941 goto error_free; 2942 } 2943 /* Copy the permanent MAC from the qcap response address now. */ 2944 memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr)); 2945 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 2946 bp->grp_info = rte_zmalloc("bnxt_grp_info", 2947 sizeof(*bp->grp_info) * bp->max_ring_grps, 0); 2948 if (!bp->grp_info) { 2949 RTE_LOG(ERR, PMD, 2950 "Failed to alloc %zu bytes needed to store group info table\n", 2951 sizeof(*bp->grp_info) * bp->max_ring_grps); 2952 rc = -ENOMEM; 2953 goto error_free; 2954 } 2955 2956 /* Forward all requests if firmware is new enough */ 2957 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 2958 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 2959 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 2960 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); 2961 } else { 2962 RTE_LOG(WARNING, PMD, 2963 "Firmware too old for VF mailbox functionality\n"); 2964 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); 2965 } 2966 2967 /* 2968 * The following are used for driver cleanup. If we disallow these, 2969 * VF drivers can't clean up cleanly. 2970 */ 2971 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 2972 ALLOW_FUNC(HWRM_VNIC_FREE); 2973 ALLOW_FUNC(HWRM_RING_FREE); 2974 ALLOW_FUNC(HWRM_RING_GRP_FREE); 2975 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 2976 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 2977 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 2978 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 2979 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 2980 rc = bnxt_hwrm_func_driver_register(bp); 2981 if (rc) { 2982 RTE_LOG(ERR, PMD, 2983 "Failed to register driver"); 2984 rc = -EBUSY; 2985 goto error_free; 2986 } 2987 2988 RTE_LOG(INFO, PMD, 2989 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", 2990 pci_dev->mem_resource[0].phys_addr, 2991 pci_dev->mem_resource[0].addr); 2992 2993 rc = bnxt_hwrm_func_reset(bp); 2994 if (rc) { 2995 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); 2996 rc = -1; 2997 goto error_free; 2998 } 2999 3000 if (BNXT_PF(bp)) { 3001 //if (bp->pf.active_vfs) { 3002 // TODO: Deallocate VF resources? 3003 //} 3004 if (bp->pdev->max_vfs) { 3005 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 3006 if (rc) { 3007 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n"); 3008 goto error_free; 3009 } 3010 } else { 3011 rc = bnxt_hwrm_allocate_pf_only(bp); 3012 if (rc) { 3013 RTE_LOG(ERR, PMD, 3014 "Failed to allocate PF resources\n"); 3015 goto error_free; 3016 } 3017 } 3018 } 3019 3020 bnxt_hwrm_port_led_qcaps(bp); 3021 3022 rc = bnxt_setup_int(bp); 3023 if (rc) 3024 goto error_free; 3025 3026 rc = bnxt_alloc_mem(bp); 3027 if (rc) 3028 goto error_free_int; 3029 3030 rc = bnxt_request_int(bp); 3031 if (rc) 3032 goto error_free_int; 3033 3034 rc = bnxt_alloc_def_cp_ring(bp); 3035 if (rc) 3036 goto error_free_int; 3037 3038 bnxt_enable_int(bp); 3039 3040 return 0; 3041 3042 error_free_int: 3043 bnxt_disable_int(bp); 3044 bnxt_free_def_cp_ring(bp); 3045 bnxt_hwrm_func_buf_unrgtr(bp); 3046 bnxt_free_int(bp); 3047 bnxt_free_mem(bp); 3048 error_free: 3049 bnxt_dev_uninit(eth_dev); 3050 error: 3051 return rc; 3052 } 3053 3054 static int 3055 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { 3056 struct bnxt *bp = eth_dev->data->dev_private; 3057 int rc; 3058 3059 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3060 return -EPERM; 3061 3062 bnxt_disable_int(bp); 3063 bnxt_free_int(bp); 3064 bnxt_free_mem(bp); 3065 if (eth_dev->data->mac_addrs != NULL) { 3066 rte_free(eth_dev->data->mac_addrs); 3067 eth_dev->data->mac_addrs = NULL; 3068 } 3069 if (bp->grp_info != NULL) { 3070 rte_free(bp->grp_info); 3071 bp->grp_info = NULL; 3072 } 3073 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 3074 bnxt_free_hwrm_resources(bp); 3075 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 3076 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 3077 if (bp->dev_stopped == 0) 3078 bnxt_dev_close_op(eth_dev); 3079 if (bp->pf.vf_info) 3080 rte_free(bp->pf.vf_info); 3081 eth_dev->dev_ops = NULL; 3082 eth_dev->rx_pkt_burst = NULL; 3083 eth_dev->tx_pkt_burst = NULL; 3084 3085 return rc; 3086 } 3087 3088 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3089 struct rte_pci_device *pci_dev) 3090 { 3091 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), 3092 bnxt_dev_init); 3093 } 3094 3095 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 3096 { 3097 return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit); 3098 } 3099 3100 static struct rte_pci_driver bnxt_rte_pmd = { 3101 .id_table = bnxt_pci_id_map, 3102 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 3103 RTE_PCI_DRV_INTR_LSC, 3104 .probe = bnxt_pci_probe, 3105 .remove = bnxt_pci_remove, 3106 }; 3107 3108 static bool 3109 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 3110 { 3111 if (strcmp(dev->device->driver->name, drv->driver.name)) 3112 return false; 3113 3114 return true; 3115 } 3116 3117 bool is_bnxt_supported(struct rte_eth_dev *dev) 3118 { 3119 return is_device_supported(dev, &bnxt_rte_pmd); 3120 } 3121 3122 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 3123 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 3124 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 3125