1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Broadcom Limited. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Broadcom Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <inttypes.h> 35 #include <stdbool.h> 36 37 #include <rte_dev.h> 38 #include <rte_ethdev.h> 39 #include <rte_ethdev_pci.h> 40 #include <rte_malloc.h> 41 #include <rte_cycles.h> 42 43 #include "bnxt.h" 44 #include "bnxt_cpr.h" 45 #include "bnxt_filter.h" 46 #include "bnxt_hwrm.h" 47 #include "bnxt_irq.h" 48 #include "bnxt_ring.h" 49 #include "bnxt_rxq.h" 50 #include "bnxt_rxr.h" 51 #include "bnxt_stats.h" 52 #include "bnxt_txq.h" 53 #include "bnxt_txr.h" 54 #include "bnxt_vnic.h" 55 #include "hsi_struct_def_dpdk.h" 56 #include "bnxt_nvm_defs.h" 57 58 #define DRV_MODULE_NAME "bnxt" 59 static const char bnxt_version[] = 60 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; 61 62 #define PCI_VENDOR_ID_BROADCOM 0x14E4 63 64 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 65 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 66 #define BROADCOM_DEV_ID_57414_VF 0x16c1 67 #define BROADCOM_DEV_ID_57301 0x16c8 68 #define BROADCOM_DEV_ID_57302 0x16c9 69 #define BROADCOM_DEV_ID_57304_PF 0x16ca 70 #define BROADCOM_DEV_ID_57304_VF 0x16cb 71 #define BROADCOM_DEV_ID_57417_MF 0x16cc 72 #define BROADCOM_DEV_ID_NS2 0x16cd 73 #define BROADCOM_DEV_ID_57311 0x16ce 74 #define BROADCOM_DEV_ID_57312 0x16cf 75 #define BROADCOM_DEV_ID_57402 0x16d0 76 #define BROADCOM_DEV_ID_57404 0x16d1 77 #define BROADCOM_DEV_ID_57406_PF 0x16d2 78 #define BROADCOM_DEV_ID_57406_VF 0x16d3 79 #define BROADCOM_DEV_ID_57402_MF 0x16d4 80 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 81 #define BROADCOM_DEV_ID_57412 0x16d6 82 #define BROADCOM_DEV_ID_57414 0x16d7 83 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8 84 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9 85 #define BROADCOM_DEV_ID_5741X_VF 0x16dc 86 #define BROADCOM_DEV_ID_57412_MF 0x16de 87 #define BROADCOM_DEV_ID_57314 0x16df 88 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0 89 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 90 #define BROADCOM_DEV_ID_57417_SFP 0x16e2 91 #define BROADCOM_DEV_ID_57416_SFP 0x16e3 92 #define BROADCOM_DEV_ID_57317_SFP 0x16e4 93 #define BROADCOM_DEV_ID_57404_MF 0x16e7 94 #define BROADCOM_DEV_ID_57406_MF 0x16e8 95 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 96 #define BROADCOM_DEV_ID_57407_MF 0x16ea 97 #define BROADCOM_DEV_ID_57414_MF 0x16ec 98 #define BROADCOM_DEV_ID_57416_MF 0x16ee 99 100 static const struct rte_pci_id bnxt_pci_id_map[] = { 101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 102 BROADCOM_DEV_ID_STRATUS_NIC_VF) }, 103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 137 { .vendor_id = 0, /* sentinel */ }, 138 }; 139 140 #define BNXT_ETH_RSS_SUPPORT ( \ 141 ETH_RSS_IPV4 | \ 142 ETH_RSS_NONFRAG_IPV4_TCP | \ 143 ETH_RSS_NONFRAG_IPV4_UDP | \ 144 ETH_RSS_IPV6 | \ 145 ETH_RSS_NONFRAG_IPV6_TCP | \ 146 ETH_RSS_NONFRAG_IPV6_UDP) 147 148 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 149 150 /***********************/ 151 152 /* 153 * High level utility functions 154 */ 155 156 static void bnxt_free_mem(struct bnxt *bp) 157 { 158 bnxt_free_filter_mem(bp); 159 bnxt_free_vnic_attributes(bp); 160 bnxt_free_vnic_mem(bp); 161 162 bnxt_free_stats(bp); 163 bnxt_free_tx_rings(bp); 164 bnxt_free_rx_rings(bp); 165 bnxt_free_def_cp_ring(bp); 166 } 167 168 static int bnxt_alloc_mem(struct bnxt *bp) 169 { 170 int rc; 171 172 /* Default completion ring */ 173 rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); 174 if (rc) 175 goto alloc_mem_err; 176 177 rc = bnxt_alloc_rings(bp, 0, NULL, NULL, 178 bp->def_cp_ring, "def_cp"); 179 if (rc) 180 goto alloc_mem_err; 181 182 rc = bnxt_alloc_vnic_mem(bp); 183 if (rc) 184 goto alloc_mem_err; 185 186 rc = bnxt_alloc_vnic_attributes(bp); 187 if (rc) 188 goto alloc_mem_err; 189 190 rc = bnxt_alloc_filter_mem(bp); 191 if (rc) 192 goto alloc_mem_err; 193 194 return 0; 195 196 alloc_mem_err: 197 bnxt_free_mem(bp); 198 return rc; 199 } 200 201 static int bnxt_init_chip(struct bnxt *bp) 202 { 203 unsigned int i, rss_idx, fw_idx; 204 struct rte_eth_link new; 205 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 206 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 207 uint32_t intr_vector = 0; 208 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 209 uint32_t vec = BNXT_MISC_VEC_ID; 210 int rc; 211 212 /* disable uio/vfio intr/eventfd mapping */ 213 rte_intr_disable(intr_handle); 214 215 if (bp->eth_dev->data->mtu > ETHER_MTU) { 216 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 217 bp->flags |= BNXT_FLAG_JUMBO; 218 } else { 219 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 220 bp->flags &= ~BNXT_FLAG_JUMBO; 221 } 222 223 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 224 if (rc) { 225 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); 226 goto err_out; 227 } 228 229 rc = bnxt_alloc_hwrm_rings(bp); 230 if (rc) { 231 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc); 232 goto err_out; 233 } 234 235 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 236 if (rc) { 237 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc); 238 goto err_out; 239 } 240 241 rc = bnxt_mq_rx_configure(bp); 242 if (rc) { 243 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc); 244 goto err_out; 245 } 246 247 /* VNIC configuration */ 248 for (i = 0; i < bp->nr_vnics; i++) { 249 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 250 251 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 252 if (rc) { 253 RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n", 254 i, rc); 255 goto err_out; 256 } 257 258 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); 259 if (rc) { 260 RTE_LOG(ERR, PMD, 261 "HWRM vnic %d ctx alloc failure rc: %x\n", 262 i, rc); 263 goto err_out; 264 } 265 266 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 267 if (rc) { 268 RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n", 269 i, rc); 270 goto err_out; 271 } 272 273 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 274 if (rc) { 275 RTE_LOG(ERR, PMD, 276 "HWRM vnic %d filter failure rc: %x\n", 277 i, rc); 278 goto err_out; 279 } 280 if (vnic->rss_table && vnic->hash_type) { 281 /* 282 * Fill the RSS hash & redirection table with 283 * ring group ids for all VNICs 284 */ 285 for (rss_idx = 0, fw_idx = 0; 286 rss_idx < HW_HASH_INDEX_SIZE; 287 rss_idx++, fw_idx++) { 288 if (vnic->fw_grp_ids[fw_idx] == 289 INVALID_HW_RING_ID) 290 fw_idx = 0; 291 vnic->rss_table[rss_idx] = 292 vnic->fw_grp_ids[fw_idx]; 293 } 294 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 295 if (rc) { 296 RTE_LOG(ERR, PMD, 297 "HWRM vnic %d set RSS failure rc: %x\n", 298 i, rc); 299 goto err_out; 300 } 301 } 302 303 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 304 305 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) 306 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 307 else 308 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 309 } 310 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 311 if (rc) { 312 RTE_LOG(ERR, PMD, 313 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 314 goto err_out; 315 } 316 317 /* check and configure queue intr-vector mapping */ 318 if ((rte_intr_cap_multiple(intr_handle) || 319 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 320 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 321 intr_vector = bp->eth_dev->data->nb_rx_queues; 322 RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__, 323 intr_vector); 324 if (intr_vector > bp->rx_cp_nr_rings) { 325 RTE_LOG(ERR, PMD, "At most %d intr queues supported", 326 bp->rx_cp_nr_rings); 327 return -ENOTSUP; 328 } 329 if (rte_intr_efd_enable(intr_handle, intr_vector)) 330 return -1; 331 } 332 333 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 334 intr_handle->intr_vec = 335 rte_zmalloc("intr_vec", 336 bp->eth_dev->data->nb_rx_queues * 337 sizeof(int), 0); 338 if (intr_handle->intr_vec == NULL) { 339 RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues" 340 " intr_vec", bp->eth_dev->data->nb_rx_queues); 341 return -ENOMEM; 342 } 343 RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p " 344 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 345 __func__, intr_handle->intr_vec, intr_handle->nb_efd, 346 intr_handle->max_intr); 347 } 348 349 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 350 queue_id++) { 351 intr_handle->intr_vec[queue_id] = vec; 352 if (vec < base + intr_handle->nb_efd - 1) 353 vec++; 354 } 355 356 /* enable uio/vfio intr/eventfd mapping */ 357 rte_intr_enable(intr_handle); 358 359 rc = bnxt_get_hwrm_link_config(bp, &new); 360 if (rc) { 361 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); 362 goto err_out; 363 } 364 365 if (!bp->link_info.link_up) { 366 rc = bnxt_set_hwrm_link_config(bp, true); 367 if (rc) { 368 RTE_LOG(ERR, PMD, 369 "HWRM link config failure rc: %x\n", rc); 370 goto err_out; 371 } 372 } 373 374 return 0; 375 376 err_out: 377 bnxt_free_all_hwrm_resources(bp); 378 379 return rc; 380 } 381 382 static int bnxt_shutdown_nic(struct bnxt *bp) 383 { 384 bnxt_free_all_hwrm_resources(bp); 385 bnxt_free_all_filters(bp); 386 bnxt_free_all_vnics(bp); 387 return 0; 388 } 389 390 static int bnxt_init_nic(struct bnxt *bp) 391 { 392 int rc; 393 394 bnxt_init_ring_grps(bp); 395 bnxt_init_vnics(bp); 396 bnxt_init_filters(bp); 397 398 rc = bnxt_init_chip(bp); 399 if (rc) 400 return rc; 401 402 return 0; 403 } 404 405 /* 406 * Device configuration and status function 407 */ 408 409 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 410 struct rte_eth_dev_info *dev_info) 411 { 412 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 413 uint16_t max_vnics, i, j, vpool, vrxq; 414 unsigned int max_rx_rings; 415 416 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 417 418 /* MAC Specifics */ 419 dev_info->max_mac_addrs = bp->max_l2_ctx; 420 dev_info->max_hash_mac_addrs = 0; 421 422 /* PF/VF specifics */ 423 if (BNXT_PF(bp)) 424 dev_info->max_vfs = bp->pdev->max_vfs; 425 max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, 426 RTE_MIN(bp->max_rsscos_ctx, 427 bp->max_stat_ctx))); 428 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 429 dev_info->max_rx_queues = max_rx_rings; 430 dev_info->max_tx_queues = max_rx_rings; 431 dev_info->reta_size = bp->max_rsscos_ctx; 432 dev_info->hash_key_size = 40; 433 max_vnics = bp->max_vnics; 434 435 /* Fast path specifics */ 436 dev_info->min_rx_bufsize = 1; 437 dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN 438 + VLAN_TAG_SIZE; 439 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 440 DEV_RX_OFFLOAD_IPV4_CKSUM | 441 DEV_RX_OFFLOAD_UDP_CKSUM | 442 DEV_RX_OFFLOAD_TCP_CKSUM | 443 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 444 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 445 DEV_TX_OFFLOAD_IPV4_CKSUM | 446 DEV_TX_OFFLOAD_TCP_CKSUM | 447 DEV_TX_OFFLOAD_UDP_CKSUM | 448 DEV_TX_OFFLOAD_TCP_TSO | 449 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 450 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 451 DEV_TX_OFFLOAD_GRE_TNL_TSO | 452 DEV_TX_OFFLOAD_IPIP_TNL_TSO | 453 DEV_TX_OFFLOAD_GENEVE_TNL_TSO; 454 455 /* *INDENT-OFF* */ 456 dev_info->default_rxconf = (struct rte_eth_rxconf) { 457 .rx_thresh = { 458 .pthresh = 8, 459 .hthresh = 8, 460 .wthresh = 0, 461 }, 462 .rx_free_thresh = 32, 463 .rx_drop_en = 0, 464 }; 465 466 dev_info->default_txconf = (struct rte_eth_txconf) { 467 .tx_thresh = { 468 .pthresh = 32, 469 .hthresh = 0, 470 .wthresh = 0, 471 }, 472 .tx_free_thresh = 32, 473 .tx_rs_thresh = 32, 474 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 475 ETH_TXQ_FLAGS_NOOFFLOADS, 476 }; 477 eth_dev->data->dev_conf.intr_conf.lsc = 1; 478 479 eth_dev->data->dev_conf.intr_conf.rxq = 1; 480 481 /* *INDENT-ON* */ 482 483 /* 484 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 485 * need further investigation. 486 */ 487 488 /* VMDq resources */ 489 vpool = 64; /* ETH_64_POOLS */ 490 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 491 for (i = 0; i < 4; vpool >>= 1, i++) { 492 if (max_vnics > vpool) { 493 for (j = 0; j < 5; vrxq >>= 1, j++) { 494 if (dev_info->max_rx_queues > vrxq) { 495 if (vpool > vrxq) 496 vpool = vrxq; 497 goto found; 498 } 499 } 500 /* Not enough resources to support VMDq */ 501 break; 502 } 503 } 504 /* Not enough resources to support VMDq */ 505 vpool = 0; 506 vrxq = 0; 507 found: 508 dev_info->max_vmdq_pools = vpool; 509 dev_info->vmdq_queue_num = vrxq; 510 511 dev_info->vmdq_pool_base = 0; 512 dev_info->vmdq_queue_base = 0; 513 } 514 515 /* Configure the device based on the configuration provided */ 516 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 517 { 518 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 519 520 bp->rx_queues = (void *)eth_dev->data->rx_queues; 521 bp->tx_queues = (void *)eth_dev->data->tx_queues; 522 523 /* Inherit new configurations */ 524 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 525 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 526 bp->rx_cp_nr_rings = bp->rx_nr_rings; 527 bp->tx_cp_nr_rings = bp->tx_nr_rings; 528 529 if (eth_dev->data->dev_conf.rxmode.jumbo_frame) 530 eth_dev->data->mtu = 531 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 532 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; 533 return 0; 534 } 535 536 static inline int 537 rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev, 538 struct rte_eth_link *link) 539 { 540 struct rte_eth_link *dst = ð_dev->data->dev_link; 541 struct rte_eth_link *src = link; 542 543 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 544 *(uint64_t *)src) == 0) 545 return 1; 546 547 return 0; 548 } 549 550 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 551 { 552 struct rte_eth_link *link = ð_dev->data->dev_link; 553 554 if (link->link_status) 555 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n", 556 eth_dev->data->port_id, 557 (uint32_t)link->link_speed, 558 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 559 ("full-duplex") : ("half-duplex\n")); 560 else 561 RTE_LOG(INFO, PMD, "Port %d Link Down\n", 562 eth_dev->data->port_id); 563 } 564 565 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) 566 { 567 bnxt_print_link_info(eth_dev); 568 return 0; 569 } 570 571 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 572 { 573 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 574 int vlan_mask = 0; 575 int rc; 576 577 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 578 RTE_LOG(ERR, PMD, 579 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 580 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 581 } 582 bp->dev_stopped = 0; 583 584 rc = bnxt_init_nic(bp); 585 if (rc) 586 goto error; 587 588 bnxt_link_update_op(eth_dev, 0); 589 590 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) 591 vlan_mask |= ETH_VLAN_FILTER_MASK; 592 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) 593 vlan_mask |= ETH_VLAN_STRIP_MASK; 594 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 595 if (rc) 596 goto error; 597 598 return 0; 599 600 error: 601 bnxt_shutdown_nic(bp); 602 bnxt_free_tx_mbufs(bp); 603 bnxt_free_rx_mbufs(bp); 604 return rc; 605 } 606 607 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 608 { 609 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 610 611 eth_dev->data->dev_link.link_status = 1; 612 bnxt_set_hwrm_link_config(bp, true); 613 return 0; 614 } 615 616 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 617 { 618 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 619 620 eth_dev->data->dev_link.link_status = 0; 621 bnxt_set_hwrm_link_config(bp, false); 622 return 0; 623 } 624 625 /* Unload the driver, release resources */ 626 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 627 { 628 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 629 630 if (bp->eth_dev->data->dev_started) { 631 /* TBD: STOP HW queues DMA */ 632 eth_dev->data->dev_link.link_status = 0; 633 } 634 bnxt_set_hwrm_link_config(bp, false); 635 bnxt_hwrm_port_clr_stats(bp); 636 bnxt_shutdown_nic(bp); 637 bp->dev_stopped = 1; 638 } 639 640 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 641 { 642 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 643 644 if (bp->dev_stopped == 0) 645 bnxt_dev_stop_op(eth_dev); 646 647 bnxt_free_tx_mbufs(bp); 648 bnxt_free_rx_mbufs(bp); 649 bnxt_free_mem(bp); 650 if (eth_dev->data->mac_addrs != NULL) { 651 rte_free(eth_dev->data->mac_addrs); 652 eth_dev->data->mac_addrs = NULL; 653 } 654 if (bp->grp_info != NULL) { 655 rte_free(bp->grp_info); 656 bp->grp_info = NULL; 657 } 658 } 659 660 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 661 uint32_t index) 662 { 663 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 664 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 665 struct bnxt_vnic_info *vnic; 666 struct bnxt_filter_info *filter, *temp_filter; 667 uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS); 668 uint32_t i; 669 670 /* 671 * Loop through all VNICs from the specified filter flow pools to 672 * remove the corresponding MAC addr filter 673 */ 674 for (i = 0; i < pool; i++) { 675 if (!(pool_mask & (1ULL << i))) 676 continue; 677 678 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 679 filter = STAILQ_FIRST(&vnic->filter); 680 while (filter) { 681 temp_filter = STAILQ_NEXT(filter, next); 682 if (filter->mac_index == index) { 683 STAILQ_REMOVE(&vnic->filter, filter, 684 bnxt_filter_info, next); 685 bnxt_hwrm_clear_l2_filter(bp, filter); 686 filter->mac_index = INVALID_MAC_INDEX; 687 memset(&filter->l2_addr, 0, 688 ETHER_ADDR_LEN); 689 STAILQ_INSERT_TAIL( 690 &bp->free_filter_list, 691 filter, next); 692 } 693 filter = temp_filter; 694 } 695 } 696 } 697 } 698 699 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 700 struct ether_addr *mac_addr, 701 uint32_t index, uint32_t pool) 702 { 703 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 704 struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]); 705 struct bnxt_filter_info *filter; 706 707 if (BNXT_VF(bp)) { 708 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n"); 709 return -ENOTSUP; 710 } 711 712 if (!vnic) { 713 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool); 714 return -EINVAL; 715 } 716 /* Attach requested MAC address to the new l2_filter */ 717 STAILQ_FOREACH(filter, &vnic->filter, next) { 718 if (filter->mac_index == index) { 719 RTE_LOG(ERR, PMD, 720 "MAC addr already existed for pool %d\n", pool); 721 return -EINVAL; 722 } 723 } 724 filter = bnxt_alloc_filter(bp); 725 if (!filter) { 726 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); 727 return -ENODEV; 728 } 729 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 730 filter->mac_index = index; 731 memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); 732 return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 733 } 734 735 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 736 { 737 int rc = 0; 738 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 739 struct rte_eth_link new; 740 unsigned int cnt = BNXT_LINK_WAIT_CNT; 741 742 memset(&new, 0, sizeof(new)); 743 do { 744 /* Retrieve link info from hardware */ 745 rc = bnxt_get_hwrm_link_config(bp, &new); 746 if (rc) { 747 new.link_speed = ETH_LINK_SPEED_100M; 748 new.link_duplex = ETH_LINK_FULL_DUPLEX; 749 RTE_LOG(ERR, PMD, 750 "Failed to retrieve link rc = 0x%x!\n", rc); 751 goto out; 752 } 753 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 754 755 if (!wait_to_complete) 756 break; 757 } while (!new.link_status && cnt--); 758 759 out: 760 /* Timed out or success */ 761 if (new.link_status != eth_dev->data->dev_link.link_status || 762 new.link_speed != eth_dev->data->dev_link.link_speed) { 763 rte_bnxt_atomic_write_link_status(eth_dev, &new); 764 bnxt_print_link_info(eth_dev); 765 } 766 767 return rc; 768 } 769 770 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 771 { 772 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 773 struct bnxt_vnic_info *vnic; 774 775 if (bp->vnic_info == NULL) 776 return; 777 778 vnic = &bp->vnic_info[0]; 779 780 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 781 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 782 } 783 784 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 785 { 786 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 787 struct bnxt_vnic_info *vnic; 788 789 if (bp->vnic_info == NULL) 790 return; 791 792 vnic = &bp->vnic_info[0]; 793 794 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 795 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 796 } 797 798 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 799 { 800 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 801 struct bnxt_vnic_info *vnic; 802 803 if (bp->vnic_info == NULL) 804 return; 805 806 vnic = &bp->vnic_info[0]; 807 808 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 809 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 810 } 811 812 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 813 { 814 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 815 struct bnxt_vnic_info *vnic; 816 817 if (bp->vnic_info == NULL) 818 return; 819 820 vnic = &bp->vnic_info[0]; 821 822 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 823 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 824 } 825 826 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 827 struct rte_eth_rss_reta_entry64 *reta_conf, 828 uint16_t reta_size) 829 { 830 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 831 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 832 struct bnxt_vnic_info *vnic; 833 int i; 834 835 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 836 return -EINVAL; 837 838 if (reta_size != HW_HASH_INDEX_SIZE) { 839 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 840 "(%d) must equal the size supported by the hardware " 841 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 842 return -EINVAL; 843 } 844 /* Update the RSS VNIC(s) */ 845 for (i = 0; i < MAX_FF_POOLS; i++) { 846 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 847 memcpy(vnic->rss_table, reta_conf, reta_size); 848 849 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 850 } 851 } 852 return 0; 853 } 854 855 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 856 struct rte_eth_rss_reta_entry64 *reta_conf, 857 uint16_t reta_size) 858 { 859 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 860 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 861 struct rte_intr_handle *intr_handle 862 = &bp->pdev->intr_handle; 863 864 /* Retrieve from the default VNIC */ 865 if (!vnic) 866 return -EINVAL; 867 if (!vnic->rss_table) 868 return -EINVAL; 869 870 if (reta_size != HW_HASH_INDEX_SIZE) { 871 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 872 "(%d) must equal the size supported by the hardware " 873 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 874 return -EINVAL; 875 } 876 /* EW - need to revisit here copying from u64 to u16 */ 877 memcpy(reta_conf, vnic->rss_table, reta_size); 878 879 if (rte_intr_allow_others(intr_handle)) { 880 if (eth_dev->data->dev_conf.intr_conf.lsc != 0) 881 bnxt_dev_lsc_intr_setup(eth_dev); 882 } 883 884 return 0; 885 } 886 887 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 888 struct rte_eth_rss_conf *rss_conf) 889 { 890 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 891 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 892 struct bnxt_vnic_info *vnic; 893 uint16_t hash_type = 0; 894 int i; 895 896 /* 897 * If RSS enablement were different than dev_configure, 898 * then return -EINVAL 899 */ 900 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 901 if (!rss_conf->rss_hf) 902 RTE_LOG(ERR, PMD, "Hash type NONE\n"); 903 } else { 904 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 905 return -EINVAL; 906 } 907 908 bp->flags |= BNXT_FLAG_UPDATE_HASH; 909 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); 910 911 if (rss_conf->rss_hf & ETH_RSS_IPV4) 912 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 913 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 914 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 915 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 916 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 917 if (rss_conf->rss_hf & ETH_RSS_IPV6) 918 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 919 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 920 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 921 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 922 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 923 924 /* Update the RSS VNIC(s) */ 925 for (i = 0; i < MAX_FF_POOLS; i++) { 926 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 927 vnic->hash_type = hash_type; 928 929 /* 930 * Use the supplied key if the key length is 931 * acceptable and the rss_key is not NULL 932 */ 933 if (rss_conf->rss_key && 934 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) 935 memcpy(vnic->rss_hash_key, rss_conf->rss_key, 936 rss_conf->rss_key_len); 937 938 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 939 } 940 } 941 return 0; 942 } 943 944 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 945 struct rte_eth_rss_conf *rss_conf) 946 { 947 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 948 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 949 int len; 950 uint32_t hash_types; 951 952 /* RSS configuration is the same for all VNICs */ 953 if (vnic && vnic->rss_hash_key) { 954 if (rss_conf->rss_key) { 955 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 956 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 957 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 958 } 959 960 hash_types = vnic->hash_type; 961 rss_conf->rss_hf = 0; 962 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 963 rss_conf->rss_hf |= ETH_RSS_IPV4; 964 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 965 } 966 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 967 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 968 hash_types &= 969 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 970 } 971 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 972 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 973 hash_types &= 974 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 975 } 976 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 977 rss_conf->rss_hf |= ETH_RSS_IPV6; 978 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 979 } 980 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 981 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 982 hash_types &= 983 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 984 } 985 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 986 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 987 hash_types &= 988 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 989 } 990 if (hash_types) { 991 RTE_LOG(ERR, PMD, 992 "Unknwon RSS config from firmware (%08x), RSS disabled", 993 vnic->hash_type); 994 return -ENOTSUP; 995 } 996 } else { 997 rss_conf->rss_hf = 0; 998 } 999 return 0; 1000 } 1001 1002 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 1003 struct rte_eth_fc_conf *fc_conf) 1004 { 1005 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1006 struct rte_eth_link link_info; 1007 int rc; 1008 1009 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1010 if (rc) 1011 return rc; 1012 1013 memset(fc_conf, 0, sizeof(*fc_conf)); 1014 if (bp->link_info.auto_pause) 1015 fc_conf->autoneg = 1; 1016 switch (bp->link_info.pause) { 1017 case 0: 1018 fc_conf->mode = RTE_FC_NONE; 1019 break; 1020 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 1021 fc_conf->mode = RTE_FC_TX_PAUSE; 1022 break; 1023 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 1024 fc_conf->mode = RTE_FC_RX_PAUSE; 1025 break; 1026 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 1027 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 1028 fc_conf->mode = RTE_FC_FULL; 1029 break; 1030 } 1031 return 0; 1032 } 1033 1034 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 1035 struct rte_eth_fc_conf *fc_conf) 1036 { 1037 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1038 1039 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1040 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n"); 1041 return -ENOTSUP; 1042 } 1043 1044 switch (fc_conf->mode) { 1045 case RTE_FC_NONE: 1046 bp->link_info.auto_pause = 0; 1047 bp->link_info.force_pause = 0; 1048 break; 1049 case RTE_FC_RX_PAUSE: 1050 if (fc_conf->autoneg) { 1051 bp->link_info.auto_pause = 1052 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1053 bp->link_info.force_pause = 0; 1054 } else { 1055 bp->link_info.auto_pause = 0; 1056 bp->link_info.force_pause = 1057 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1058 } 1059 break; 1060 case RTE_FC_TX_PAUSE: 1061 if (fc_conf->autoneg) { 1062 bp->link_info.auto_pause = 1063 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 1064 bp->link_info.force_pause = 0; 1065 } else { 1066 bp->link_info.auto_pause = 0; 1067 bp->link_info.force_pause = 1068 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 1069 } 1070 break; 1071 case RTE_FC_FULL: 1072 if (fc_conf->autoneg) { 1073 bp->link_info.auto_pause = 1074 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 1075 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1076 bp->link_info.force_pause = 0; 1077 } else { 1078 bp->link_info.auto_pause = 0; 1079 bp->link_info.force_pause = 1080 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 1081 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1082 } 1083 break; 1084 } 1085 return bnxt_set_hwrm_link_config(bp, true); 1086 } 1087 1088 /* Add UDP tunneling port */ 1089 static int 1090 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 1091 struct rte_eth_udp_tunnel *udp_tunnel) 1092 { 1093 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1094 uint16_t tunnel_type = 0; 1095 int rc = 0; 1096 1097 switch (udp_tunnel->prot_type) { 1098 case RTE_TUNNEL_TYPE_VXLAN: 1099 if (bp->vxlan_port_cnt) { 1100 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1101 udp_tunnel->udp_port); 1102 if (bp->vxlan_port != udp_tunnel->udp_port) { 1103 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1104 return -ENOSPC; 1105 } 1106 bp->vxlan_port_cnt++; 1107 return 0; 1108 } 1109 tunnel_type = 1110 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 1111 bp->vxlan_port_cnt++; 1112 break; 1113 case RTE_TUNNEL_TYPE_GENEVE: 1114 if (bp->geneve_port_cnt) { 1115 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1116 udp_tunnel->udp_port); 1117 if (bp->geneve_port != udp_tunnel->udp_port) { 1118 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1119 return -ENOSPC; 1120 } 1121 bp->geneve_port_cnt++; 1122 return 0; 1123 } 1124 tunnel_type = 1125 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 1126 bp->geneve_port_cnt++; 1127 break; 1128 default: 1129 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1130 return -ENOTSUP; 1131 } 1132 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 1133 tunnel_type); 1134 return rc; 1135 } 1136 1137 static int 1138 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 1139 struct rte_eth_udp_tunnel *udp_tunnel) 1140 { 1141 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1142 uint16_t tunnel_type = 0; 1143 uint16_t port = 0; 1144 int rc = 0; 1145 1146 switch (udp_tunnel->prot_type) { 1147 case RTE_TUNNEL_TYPE_VXLAN: 1148 if (!bp->vxlan_port_cnt) { 1149 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1150 return -EINVAL; 1151 } 1152 if (bp->vxlan_port != udp_tunnel->udp_port) { 1153 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1154 udp_tunnel->udp_port, bp->vxlan_port); 1155 return -EINVAL; 1156 } 1157 if (--bp->vxlan_port_cnt) 1158 return 0; 1159 1160 tunnel_type = 1161 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 1162 port = bp->vxlan_fw_dst_port_id; 1163 break; 1164 case RTE_TUNNEL_TYPE_GENEVE: 1165 if (!bp->geneve_port_cnt) { 1166 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1167 return -EINVAL; 1168 } 1169 if (bp->geneve_port != udp_tunnel->udp_port) { 1170 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1171 udp_tunnel->udp_port, bp->geneve_port); 1172 return -EINVAL; 1173 } 1174 if (--bp->geneve_port_cnt) 1175 return 0; 1176 1177 tunnel_type = 1178 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 1179 port = bp->geneve_fw_dst_port_id; 1180 break; 1181 default: 1182 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1183 return -ENOTSUP; 1184 } 1185 1186 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 1187 if (!rc) { 1188 if (tunnel_type == 1189 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 1190 bp->vxlan_port = 0; 1191 if (tunnel_type == 1192 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 1193 bp->geneve_port = 0; 1194 } 1195 return rc; 1196 } 1197 1198 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1199 { 1200 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1201 struct bnxt_vnic_info *vnic; 1202 unsigned int i; 1203 int rc = 0; 1204 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1205 1206 /* Cycle through all VNICs */ 1207 for (i = 0; i < bp->nr_vnics; i++) { 1208 /* 1209 * For each VNIC and each associated filter(s) 1210 * if VLAN exists && VLAN matches vlan_id 1211 * remove the MAC+VLAN filter 1212 * add a new MAC only filter 1213 * else 1214 * VLAN filter doesn't exist, just skip and continue 1215 */ 1216 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1217 filter = STAILQ_FIRST(&vnic->filter); 1218 while (filter) { 1219 temp_filter = STAILQ_NEXT(filter, next); 1220 1221 if (filter->enables & chk && 1222 filter->l2_ovlan == vlan_id) { 1223 /* Must delete the filter */ 1224 STAILQ_REMOVE(&vnic->filter, filter, 1225 bnxt_filter_info, next); 1226 bnxt_hwrm_clear_l2_filter(bp, filter); 1227 STAILQ_INSERT_TAIL( 1228 &bp->free_filter_list, 1229 filter, next); 1230 1231 /* 1232 * Need to examine to see if the MAC 1233 * filter already existed or not before 1234 * allocating a new one 1235 */ 1236 1237 new_filter = bnxt_alloc_filter(bp); 1238 if (!new_filter) { 1239 RTE_LOG(ERR, PMD, 1240 "MAC/VLAN filter alloc failed\n"); 1241 rc = -ENOMEM; 1242 goto exit; 1243 } 1244 STAILQ_INSERT_TAIL(&vnic->filter, 1245 new_filter, next); 1246 /* Inherit MAC from previous filter */ 1247 new_filter->mac_index = 1248 filter->mac_index; 1249 memcpy(new_filter->l2_addr, 1250 filter->l2_addr, ETHER_ADDR_LEN); 1251 /* MAC only filter */ 1252 rc = bnxt_hwrm_set_l2_filter(bp, 1253 vnic->fw_vnic_id, 1254 new_filter); 1255 if (rc) 1256 goto exit; 1257 RTE_LOG(INFO, PMD, 1258 "Del Vlan filter for %d\n", 1259 vlan_id); 1260 } 1261 filter = temp_filter; 1262 } 1263 } 1264 } 1265 exit: 1266 return rc; 1267 } 1268 1269 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1270 { 1271 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1272 struct bnxt_vnic_info *vnic; 1273 unsigned int i; 1274 int rc = 0; 1275 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN | 1276 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK; 1277 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1278 1279 /* Cycle through all VNICs */ 1280 for (i = 0; i < bp->nr_vnics; i++) { 1281 /* 1282 * For each VNIC and each associated filter(s) 1283 * if VLAN exists: 1284 * if VLAN matches vlan_id 1285 * VLAN filter already exists, just skip and continue 1286 * else 1287 * add a new MAC+VLAN filter 1288 * else 1289 * Remove the old MAC only filter 1290 * Add a new MAC+VLAN filter 1291 */ 1292 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1293 filter = STAILQ_FIRST(&vnic->filter); 1294 while (filter) { 1295 temp_filter = STAILQ_NEXT(filter, next); 1296 1297 if (filter->enables & chk) { 1298 if (filter->l2_ovlan == vlan_id) 1299 goto cont; 1300 } else { 1301 /* Must delete the MAC filter */ 1302 STAILQ_REMOVE(&vnic->filter, filter, 1303 bnxt_filter_info, next); 1304 bnxt_hwrm_clear_l2_filter(bp, filter); 1305 filter->l2_ovlan = 0; 1306 STAILQ_INSERT_TAIL( 1307 &bp->free_filter_list, 1308 filter, next); 1309 } 1310 new_filter = bnxt_alloc_filter(bp); 1311 if (!new_filter) { 1312 RTE_LOG(ERR, PMD, 1313 "MAC/VLAN filter alloc failed\n"); 1314 rc = -ENOMEM; 1315 goto exit; 1316 } 1317 STAILQ_INSERT_TAIL(&vnic->filter, new_filter, 1318 next); 1319 /* Inherit MAC from the previous filter */ 1320 new_filter->mac_index = filter->mac_index; 1321 memcpy(new_filter->l2_addr, filter->l2_addr, 1322 ETHER_ADDR_LEN); 1323 /* MAC + VLAN ID filter */ 1324 new_filter->l2_ovlan = vlan_id; 1325 new_filter->l2_ovlan_mask = 0xF000; 1326 new_filter->enables |= en; 1327 rc = bnxt_hwrm_set_l2_filter(bp, 1328 vnic->fw_vnic_id, 1329 new_filter); 1330 if (rc) 1331 goto exit; 1332 RTE_LOG(INFO, PMD, 1333 "Added Vlan filter for %d\n", vlan_id); 1334 cont: 1335 filter = temp_filter; 1336 } 1337 } 1338 } 1339 exit: 1340 return rc; 1341 } 1342 1343 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 1344 uint16_t vlan_id, int on) 1345 { 1346 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1347 1348 /* These operations apply to ALL existing MAC/VLAN filters */ 1349 if (on) 1350 return bnxt_add_vlan_filter(bp, vlan_id); 1351 else 1352 return bnxt_del_vlan_filter(bp, vlan_id); 1353 } 1354 1355 static int 1356 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 1357 { 1358 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1359 unsigned int i; 1360 1361 if (mask & ETH_VLAN_FILTER_MASK) { 1362 if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { 1363 /* Remove any VLAN filters programmed */ 1364 for (i = 0; i < 4095; i++) 1365 bnxt_del_vlan_filter(bp, i); 1366 } 1367 RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n", 1368 dev->data->dev_conf.rxmode.hw_vlan_filter); 1369 } 1370 1371 if (mask & ETH_VLAN_STRIP_MASK) { 1372 /* Enable or disable VLAN stripping */ 1373 for (i = 0; i < bp->nr_vnics; i++) { 1374 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1375 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1376 vnic->vlan_strip = true; 1377 else 1378 vnic->vlan_strip = false; 1379 bnxt_hwrm_vnic_cfg(bp, vnic); 1380 } 1381 RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n", 1382 dev->data->dev_conf.rxmode.hw_vlan_strip); 1383 } 1384 1385 if (mask & ETH_VLAN_EXTEND_MASK) 1386 RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n"); 1387 1388 return 0; 1389 } 1390 1391 static void 1392 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) 1393 { 1394 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1395 /* Default Filter is tied to VNIC 0 */ 1396 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1397 struct bnxt_filter_info *filter; 1398 int rc; 1399 1400 if (BNXT_VF(bp)) 1401 return; 1402 1403 memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); 1404 memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 1405 1406 STAILQ_FOREACH(filter, &vnic->filter, next) { 1407 /* Default Filter is at Index 0 */ 1408 if (filter->mac_index != 0) 1409 continue; 1410 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 1411 if (rc) 1412 break; 1413 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); 1414 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); 1415 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 1416 filter->enables |= 1417 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 1418 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 1419 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1420 if (rc) 1421 break; 1422 filter->mac_index = 0; 1423 RTE_LOG(DEBUG, PMD, "Set MAC addr\n"); 1424 } 1425 } 1426 1427 static int 1428 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 1429 struct ether_addr *mc_addr_set, 1430 uint32_t nb_mc_addr) 1431 { 1432 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1433 char *mc_addr_list = (char *)mc_addr_set; 1434 struct bnxt_vnic_info *vnic; 1435 uint32_t off = 0, i = 0; 1436 1437 vnic = &bp->vnic_info[0]; 1438 1439 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 1440 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1441 goto allmulti; 1442 } 1443 1444 /* TODO Check for Duplicate mcast addresses */ 1445 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1446 for (i = 0; i < nb_mc_addr; i++) { 1447 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN); 1448 off += ETHER_ADDR_LEN; 1449 } 1450 1451 vnic->mc_addr_cnt = i; 1452 1453 allmulti: 1454 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1455 } 1456 1457 static int 1458 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1459 { 1460 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1461 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 1462 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 1463 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 1464 int ret; 1465 1466 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 1467 fw_major, fw_minor, fw_updt); 1468 1469 ret += 1; /* add the size of '\0' */ 1470 if (fw_size < (uint32_t)ret) 1471 return ret; 1472 else 1473 return 0; 1474 } 1475 1476 static void 1477 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1478 struct rte_eth_rxq_info *qinfo) 1479 { 1480 struct bnxt_rx_queue *rxq; 1481 1482 rxq = dev->data->rx_queues[queue_id]; 1483 1484 qinfo->mp = rxq->mb_pool; 1485 qinfo->scattered_rx = dev->data->scattered_rx; 1486 qinfo->nb_desc = rxq->nb_rx_desc; 1487 1488 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1489 qinfo->conf.rx_drop_en = 0; 1490 qinfo->conf.rx_deferred_start = 0; 1491 } 1492 1493 static void 1494 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1495 struct rte_eth_txq_info *qinfo) 1496 { 1497 struct bnxt_tx_queue *txq; 1498 1499 txq = dev->data->tx_queues[queue_id]; 1500 1501 qinfo->nb_desc = txq->nb_tx_desc; 1502 1503 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 1504 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 1505 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 1506 1507 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1508 qinfo->conf.tx_rs_thresh = 0; 1509 qinfo->conf.txq_flags = txq->txq_flags; 1510 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1511 } 1512 1513 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 1514 { 1515 struct bnxt *bp = eth_dev->data->dev_private; 1516 struct rte_eth_dev_info dev_info; 1517 uint32_t max_dev_mtu; 1518 uint32_t rc = 0; 1519 uint32_t i; 1520 1521 bnxt_dev_info_get_op(eth_dev, &dev_info); 1522 max_dev_mtu = dev_info.max_rx_pktlen - 1523 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2; 1524 1525 if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) { 1526 RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n", 1527 ETHER_MIN_MTU, max_dev_mtu); 1528 return -EINVAL; 1529 } 1530 1531 1532 if (new_mtu > ETHER_MTU) { 1533 bp->flags |= BNXT_FLAG_JUMBO; 1534 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 1535 } else { 1536 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 1537 bp->flags &= ~BNXT_FLAG_JUMBO; 1538 } 1539 1540 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = 1541 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1542 1543 eth_dev->data->mtu = new_mtu; 1544 RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu); 1545 1546 for (i = 0; i < bp->nr_vnics; i++) { 1547 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1548 1549 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + 1550 ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1551 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 1552 if (rc) 1553 break; 1554 1555 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 1556 if (rc) 1557 return rc; 1558 } 1559 1560 return rc; 1561 } 1562 1563 static int 1564 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 1565 { 1566 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1567 uint16_t vlan = bp->vlan; 1568 int rc; 1569 1570 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1571 RTE_LOG(ERR, PMD, 1572 "PVID cannot be modified for this function\n"); 1573 return -ENOTSUP; 1574 } 1575 bp->vlan = on ? pvid : 0; 1576 1577 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 1578 if (rc) 1579 bp->vlan = vlan; 1580 return rc; 1581 } 1582 1583 static int 1584 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 1585 { 1586 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1587 1588 return bnxt_hwrm_port_led_cfg(bp, true); 1589 } 1590 1591 static int 1592 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 1593 { 1594 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1595 1596 return bnxt_hwrm_port_led_cfg(bp, false); 1597 } 1598 1599 static uint32_t 1600 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1601 { 1602 uint32_t desc = 0, raw_cons = 0, cons; 1603 struct bnxt_cp_ring_info *cpr; 1604 struct bnxt_rx_queue *rxq; 1605 struct rx_pkt_cmpl *rxcmp; 1606 uint16_t cmp_type; 1607 uint8_t cmp = 1; 1608 bool valid; 1609 1610 rxq = dev->data->rx_queues[rx_queue_id]; 1611 cpr = rxq->cp_ring; 1612 valid = cpr->valid; 1613 1614 while (raw_cons < rxq->nb_rx_desc) { 1615 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1616 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1617 1618 if (!CMPL_VALID(rxcmp, valid)) 1619 goto nothing_to_do; 1620 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); 1621 cmp_type = CMP_TYPE(rxcmp); 1622 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1623 cmp = (rte_le_to_cpu_32( 1624 ((struct rx_tpa_end_cmpl *) 1625 (rxcmp))->agg_bufs_v1) & 1626 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 1627 RX_TPA_END_CMPL_AGG_BUFS_SFT; 1628 desc++; 1629 } else if (cmp_type == 0x11) { 1630 desc++; 1631 cmp = (rxcmp->agg_bufs_v1 & 1632 RX_PKT_CMPL_AGG_BUFS_MASK) >> 1633 RX_PKT_CMPL_AGG_BUFS_SFT; 1634 } else { 1635 cmp = 1; 1636 } 1637 nothing_to_do: 1638 raw_cons += cmp ? cmp : 2; 1639 } 1640 1641 return desc; 1642 } 1643 1644 static int 1645 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 1646 { 1647 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 1648 struct bnxt_rx_ring_info *rxr; 1649 struct bnxt_cp_ring_info *cpr; 1650 struct bnxt_sw_rx_bd *rx_buf; 1651 struct rx_pkt_cmpl *rxcmp; 1652 uint32_t cons, cp_cons; 1653 1654 if (!rxq) 1655 return -EINVAL; 1656 1657 cpr = rxq->cp_ring; 1658 rxr = rxq->rx_ring; 1659 1660 if (offset >= rxq->nb_rx_desc) 1661 return -EINVAL; 1662 1663 cons = RING_CMP(cpr->cp_ring_struct, offset); 1664 cp_cons = cpr->cp_raw_cons; 1665 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1666 1667 if (cons > cp_cons) { 1668 if (CMPL_VALID(rxcmp, cpr->valid)) 1669 return RTE_ETH_RX_DESC_DONE; 1670 } else { 1671 if (CMPL_VALID(rxcmp, !cpr->valid)) 1672 return RTE_ETH_RX_DESC_DONE; 1673 } 1674 rx_buf = &rxr->rx_buf_ring[cons]; 1675 if (rx_buf->mbuf == NULL) 1676 return RTE_ETH_RX_DESC_UNAVAIL; 1677 1678 1679 return RTE_ETH_RX_DESC_AVAIL; 1680 } 1681 1682 static int 1683 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 1684 { 1685 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 1686 struct bnxt_tx_ring_info *txr; 1687 struct bnxt_cp_ring_info *cpr; 1688 struct bnxt_sw_tx_bd *tx_buf; 1689 struct tx_pkt_cmpl *txcmp; 1690 uint32_t cons, cp_cons; 1691 1692 if (!txq) 1693 return -EINVAL; 1694 1695 cpr = txq->cp_ring; 1696 txr = txq->tx_ring; 1697 1698 if (offset >= txq->nb_tx_desc) 1699 return -EINVAL; 1700 1701 cons = RING_CMP(cpr->cp_ring_struct, offset); 1702 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1703 cp_cons = cpr->cp_raw_cons; 1704 1705 if (cons > cp_cons) { 1706 if (CMPL_VALID(txcmp, cpr->valid)) 1707 return RTE_ETH_TX_DESC_UNAVAIL; 1708 } else { 1709 if (CMPL_VALID(txcmp, !cpr->valid)) 1710 return RTE_ETH_TX_DESC_UNAVAIL; 1711 } 1712 tx_buf = &txr->tx_buf_ring[cons]; 1713 if (tx_buf->mbuf == NULL) 1714 return RTE_ETH_TX_DESC_DONE; 1715 1716 return RTE_ETH_TX_DESC_FULL; 1717 } 1718 1719 static struct bnxt_filter_info * 1720 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 1721 struct rte_eth_ethertype_filter *efilter, 1722 struct bnxt_vnic_info *vnic0, 1723 struct bnxt_vnic_info *vnic, 1724 int *ret) 1725 { 1726 struct bnxt_filter_info *mfilter = NULL; 1727 int match = 0; 1728 *ret = 0; 1729 1730 if (efilter->ether_type != ETHER_TYPE_IPv4 && 1731 efilter->ether_type != ETHER_TYPE_IPv6) { 1732 RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in" 1733 " ethertype filter.", efilter->ether_type); 1734 *ret = -EINVAL; 1735 goto exit; 1736 } 1737 if (efilter->queue >= bp->rx_nr_rings) { 1738 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1739 *ret = -EINVAL; 1740 goto exit; 1741 } 1742 1743 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1744 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1745 if (vnic == NULL) { 1746 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1747 *ret = -EINVAL; 1748 goto exit; 1749 } 1750 1751 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1752 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 1753 if ((!memcmp(efilter->mac_addr.addr_bytes, 1754 mfilter->l2_addr, ETHER_ADDR_LEN) && 1755 mfilter->flags == 1756 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 1757 mfilter->ethertype == efilter->ether_type)) { 1758 match = 1; 1759 break; 1760 } 1761 } 1762 } else { 1763 STAILQ_FOREACH(mfilter, &vnic->filter, next) 1764 if ((!memcmp(efilter->mac_addr.addr_bytes, 1765 mfilter->l2_addr, ETHER_ADDR_LEN) && 1766 mfilter->ethertype == efilter->ether_type && 1767 mfilter->flags == 1768 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 1769 match = 1; 1770 break; 1771 } 1772 } 1773 1774 if (match) 1775 *ret = -EEXIST; 1776 1777 exit: 1778 return mfilter; 1779 } 1780 1781 static int 1782 bnxt_ethertype_filter(struct rte_eth_dev *dev, 1783 enum rte_filter_op filter_op, 1784 void *arg) 1785 { 1786 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1787 struct rte_eth_ethertype_filter *efilter = 1788 (struct rte_eth_ethertype_filter *)arg; 1789 struct bnxt_filter_info *bfilter, *filter1; 1790 struct bnxt_vnic_info *vnic, *vnic0; 1791 int ret; 1792 1793 if (filter_op == RTE_ETH_FILTER_NOP) 1794 return 0; 1795 1796 if (arg == NULL) { 1797 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 1798 filter_op); 1799 return -EINVAL; 1800 } 1801 1802 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1803 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1804 1805 switch (filter_op) { 1806 case RTE_ETH_FILTER_ADD: 1807 bnxt_match_and_validate_ether_filter(bp, efilter, 1808 vnic0, vnic, &ret); 1809 if (ret < 0) 1810 return ret; 1811 1812 bfilter = bnxt_get_unused_filter(bp); 1813 if (bfilter == NULL) { 1814 RTE_LOG(ERR, PMD, 1815 "Not enough resources for a new filter.\n"); 1816 return -ENOMEM; 1817 } 1818 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 1819 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 1820 ETHER_ADDR_LEN); 1821 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 1822 ETHER_ADDR_LEN); 1823 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 1824 bfilter->ethertype = efilter->ether_type; 1825 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 1826 1827 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 1828 if (filter1 == NULL) { 1829 ret = -1; 1830 goto cleanup; 1831 } 1832 bfilter->enables |= 1833 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 1834 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1835 1836 bfilter->dst_id = vnic->fw_vnic_id; 1837 1838 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1839 bfilter->flags = 1840 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1841 } 1842 1843 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 1844 if (ret) 1845 goto cleanup; 1846 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 1847 break; 1848 case RTE_ETH_FILTER_DELETE: 1849 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 1850 vnic0, vnic, &ret); 1851 if (ret == -EEXIST) { 1852 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 1853 1854 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 1855 next); 1856 bnxt_free_filter(bp, filter1); 1857 } else if (ret == 0) { 1858 RTE_LOG(ERR, PMD, "No matching filter found\n"); 1859 } 1860 break; 1861 default: 1862 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 1863 ret = -EINVAL; 1864 goto error; 1865 } 1866 return ret; 1867 cleanup: 1868 bnxt_free_filter(bp, bfilter); 1869 error: 1870 return ret; 1871 } 1872 1873 static inline int 1874 parse_ntuple_filter(struct bnxt *bp, 1875 struct rte_eth_ntuple_filter *nfilter, 1876 struct bnxt_filter_info *bfilter) 1877 { 1878 uint32_t en = 0; 1879 1880 if (nfilter->queue >= bp->rx_nr_rings) { 1881 RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue); 1882 return -EINVAL; 1883 } 1884 1885 switch (nfilter->dst_port_mask) { 1886 case UINT16_MAX: 1887 bfilter->dst_port_mask = -1; 1888 bfilter->dst_port = nfilter->dst_port; 1889 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 1890 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 1891 break; 1892 default: 1893 RTE_LOG(ERR, PMD, "invalid dst_port mask."); 1894 return -EINVAL; 1895 } 1896 1897 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 1898 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1899 1900 switch (nfilter->proto_mask) { 1901 case UINT8_MAX: 1902 if (nfilter->proto == 17) /* IPPROTO_UDP */ 1903 bfilter->ip_protocol = 17; 1904 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 1905 bfilter->ip_protocol = 6; 1906 else 1907 return -EINVAL; 1908 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1909 break; 1910 default: 1911 RTE_LOG(ERR, PMD, "invalid protocol mask."); 1912 return -EINVAL; 1913 } 1914 1915 switch (nfilter->dst_ip_mask) { 1916 case UINT32_MAX: 1917 bfilter->dst_ipaddr_mask[0] = -1; 1918 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 1919 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 1920 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 1921 break; 1922 default: 1923 RTE_LOG(ERR, PMD, "invalid dst_ip mask."); 1924 return -EINVAL; 1925 } 1926 1927 switch (nfilter->src_ip_mask) { 1928 case UINT32_MAX: 1929 bfilter->src_ipaddr_mask[0] = -1; 1930 bfilter->src_ipaddr[0] = nfilter->src_ip; 1931 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 1932 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 1933 break; 1934 default: 1935 RTE_LOG(ERR, PMD, "invalid src_ip mask."); 1936 return -EINVAL; 1937 } 1938 1939 switch (nfilter->src_port_mask) { 1940 case UINT16_MAX: 1941 bfilter->src_port_mask = -1; 1942 bfilter->src_port = nfilter->src_port; 1943 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 1944 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 1945 break; 1946 default: 1947 RTE_LOG(ERR, PMD, "invalid src_port mask."); 1948 return -EINVAL; 1949 } 1950 1951 //TODO Priority 1952 //nfilter->priority = (uint8_t)filter->priority; 1953 1954 bfilter->enables = en; 1955 return 0; 1956 } 1957 1958 static struct bnxt_filter_info* 1959 bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic, 1960 struct bnxt_filter_info *bfilter) 1961 { 1962 struct bnxt_filter_info *mfilter = NULL; 1963 1964 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 1965 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 1966 bfilter->src_ipaddr_mask[0] == 1967 mfilter->src_ipaddr_mask[0] && 1968 bfilter->src_port == mfilter->src_port && 1969 bfilter->src_port_mask == mfilter->src_port_mask && 1970 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 1971 bfilter->dst_ipaddr_mask[0] == 1972 mfilter->dst_ipaddr_mask[0] && 1973 bfilter->dst_port == mfilter->dst_port && 1974 bfilter->dst_port_mask == mfilter->dst_port_mask && 1975 bfilter->flags == mfilter->flags && 1976 bfilter->enables == mfilter->enables) 1977 return mfilter; 1978 } 1979 return NULL; 1980 } 1981 1982 static int 1983 bnxt_cfg_ntuple_filter(struct bnxt *bp, 1984 struct rte_eth_ntuple_filter *nfilter, 1985 enum rte_filter_op filter_op) 1986 { 1987 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 1988 struct bnxt_vnic_info *vnic, *vnic0; 1989 int ret; 1990 1991 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 1992 RTE_LOG(ERR, PMD, "only 5tuple is supported."); 1993 return -EINVAL; 1994 } 1995 1996 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 1997 RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n"); 1998 return -EINVAL; 1999 } 2000 2001 bfilter = bnxt_get_unused_filter(bp); 2002 if (bfilter == NULL) { 2003 RTE_LOG(ERR, PMD, 2004 "Not enough resources for a new filter.\n"); 2005 return -ENOMEM; 2006 } 2007 ret = parse_ntuple_filter(bp, nfilter, bfilter); 2008 if (ret < 0) 2009 goto free_filter; 2010 2011 vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]); 2012 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2013 filter1 = STAILQ_FIRST(&vnic0->filter); 2014 if (filter1 == NULL) { 2015 ret = -1; 2016 goto free_filter; 2017 } 2018 2019 bfilter->dst_id = vnic->fw_vnic_id; 2020 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2021 bfilter->enables |= 2022 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2023 bfilter->ethertype = 0x800; 2024 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2025 2026 mfilter = bnxt_match_ntuple_filter(vnic, bfilter); 2027 2028 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2029 RTE_LOG(ERR, PMD, "filter exists."); 2030 ret = -EEXIST; 2031 goto free_filter; 2032 } 2033 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2034 RTE_LOG(ERR, PMD, "filter doesn't exist."); 2035 ret = -ENOENT; 2036 goto free_filter; 2037 } 2038 2039 if (filter_op == RTE_ETH_FILTER_ADD) { 2040 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2041 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2042 if (ret) 2043 goto free_filter; 2044 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2045 } else { 2046 if (mfilter == NULL) { 2047 /* This should not happen. But for Coverity! */ 2048 ret = -ENOENT; 2049 goto free_filter; 2050 } 2051 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 2052 2053 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, 2054 next); 2055 bnxt_free_filter(bp, mfilter); 2056 bfilter->fw_l2_filter_id = -1; 2057 bnxt_free_filter(bp, bfilter); 2058 } 2059 2060 return 0; 2061 free_filter: 2062 bfilter->fw_l2_filter_id = -1; 2063 bnxt_free_filter(bp, bfilter); 2064 return ret; 2065 } 2066 2067 static int 2068 bnxt_ntuple_filter(struct rte_eth_dev *dev, 2069 enum rte_filter_op filter_op, 2070 void *arg) 2071 { 2072 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2073 int ret; 2074 2075 if (filter_op == RTE_ETH_FILTER_NOP) 2076 return 0; 2077 2078 if (arg == NULL) { 2079 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 2080 filter_op); 2081 return -EINVAL; 2082 } 2083 2084 switch (filter_op) { 2085 case RTE_ETH_FILTER_ADD: 2086 ret = bnxt_cfg_ntuple_filter(bp, 2087 (struct rte_eth_ntuple_filter *)arg, 2088 filter_op); 2089 break; 2090 case RTE_ETH_FILTER_DELETE: 2091 ret = bnxt_cfg_ntuple_filter(bp, 2092 (struct rte_eth_ntuple_filter *)arg, 2093 filter_op); 2094 break; 2095 default: 2096 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 2097 ret = -EINVAL; 2098 break; 2099 } 2100 return ret; 2101 } 2102 2103 static int 2104 bnxt_parse_fdir_filter(struct bnxt *bp, 2105 struct rte_eth_fdir_filter *fdir, 2106 struct bnxt_filter_info *filter) 2107 { 2108 enum rte_fdir_mode fdir_mode = 2109 bp->eth_dev->data->dev_conf.fdir_conf.mode; 2110 struct bnxt_vnic_info *vnic0, *vnic; 2111 struct bnxt_filter_info *filter1; 2112 uint32_t en = 0; 2113 int i; 2114 2115 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2116 return -EINVAL; 2117 2118 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 2119 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 2120 2121 switch (fdir->input.flow_type) { 2122 case RTE_ETH_FLOW_IPV4: 2123 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2124 /* FALLTHROUGH */ 2125 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 2126 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2127 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 2128 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2129 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 2130 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2131 filter->ip_addr_type = 2132 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2133 filter->src_ipaddr_mask[0] = 0xffffffff; 2134 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2135 filter->dst_ipaddr_mask[0] = 0xffffffff; 2136 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2137 filter->ethertype = 0x800; 2138 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2139 break; 2140 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2141 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 2142 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2143 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 2144 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2145 filter->dst_port_mask = 0xffff; 2146 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2147 filter->src_port_mask = 0xffff; 2148 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2149 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 2150 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2151 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 2152 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2153 filter->ip_protocol = 6; 2154 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2155 filter->ip_addr_type = 2156 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2157 filter->src_ipaddr_mask[0] = 0xffffffff; 2158 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2159 filter->dst_ipaddr_mask[0] = 0xffffffff; 2160 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2161 filter->ethertype = 0x800; 2162 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2163 break; 2164 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2165 filter->src_port = fdir->input.flow.udp4_flow.src_port; 2166 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2167 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 2168 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2169 filter->dst_port_mask = 0xffff; 2170 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2171 filter->src_port_mask = 0xffff; 2172 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2173 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 2174 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2175 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 2176 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2177 filter->ip_protocol = 17; 2178 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2179 filter->ip_addr_type = 2180 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2181 filter->src_ipaddr_mask[0] = 0xffffffff; 2182 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2183 filter->dst_ipaddr_mask[0] = 0xffffffff; 2184 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2185 filter->ethertype = 0x800; 2186 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2187 break; 2188 case RTE_ETH_FLOW_IPV6: 2189 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2190 /* FALLTHROUGH */ 2191 filter->ip_addr_type = 2192 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2193 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 2194 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2195 rte_memcpy(filter->src_ipaddr, 2196 fdir->input.flow.ipv6_flow.src_ip, 16); 2197 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2198 rte_memcpy(filter->dst_ipaddr, 2199 fdir->input.flow.ipv6_flow.dst_ip, 16); 2200 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2201 memset(filter->dst_ipaddr_mask, 0xff, 16); 2202 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2203 memset(filter->src_ipaddr_mask, 0xff, 16); 2204 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2205 filter->ethertype = 0x86dd; 2206 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2207 break; 2208 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2209 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 2210 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2211 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 2212 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2213 filter->dst_port_mask = 0xffff; 2214 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2215 filter->src_port_mask = 0xffff; 2216 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2217 filter->ip_addr_type = 2218 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2219 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 2220 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2221 rte_memcpy(filter->src_ipaddr, 2222 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 2223 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2224 rte_memcpy(filter->dst_ipaddr, 2225 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 2226 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2227 memset(filter->dst_ipaddr_mask, 0xff, 16); 2228 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2229 memset(filter->src_ipaddr_mask, 0xff, 16); 2230 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2231 filter->ethertype = 0x86dd; 2232 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2233 break; 2234 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2235 filter->src_port = fdir->input.flow.udp6_flow.src_port; 2236 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2237 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 2238 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2239 filter->dst_port_mask = 0xffff; 2240 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2241 filter->src_port_mask = 0xffff; 2242 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2243 filter->ip_addr_type = 2244 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2245 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 2246 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2247 rte_memcpy(filter->src_ipaddr, 2248 fdir->input.flow.udp6_flow.ip.src_ip, 16); 2249 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2250 rte_memcpy(filter->dst_ipaddr, 2251 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 2252 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2253 memset(filter->dst_ipaddr_mask, 0xff, 16); 2254 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2255 memset(filter->src_ipaddr_mask, 0xff, 16); 2256 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2257 filter->ethertype = 0x86dd; 2258 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2259 break; 2260 case RTE_ETH_FLOW_L2_PAYLOAD: 2261 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 2262 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2263 break; 2264 case RTE_ETH_FLOW_VXLAN: 2265 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2266 return -EINVAL; 2267 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2268 filter->tunnel_type = 2269 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 2270 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2271 break; 2272 case RTE_ETH_FLOW_NVGRE: 2273 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2274 return -EINVAL; 2275 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2276 filter->tunnel_type = 2277 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 2278 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2279 break; 2280 case RTE_ETH_FLOW_UNKNOWN: 2281 case RTE_ETH_FLOW_RAW: 2282 case RTE_ETH_FLOW_FRAG_IPV4: 2283 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 2284 case RTE_ETH_FLOW_FRAG_IPV6: 2285 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 2286 case RTE_ETH_FLOW_IPV6_EX: 2287 case RTE_ETH_FLOW_IPV6_TCP_EX: 2288 case RTE_ETH_FLOW_IPV6_UDP_EX: 2289 case RTE_ETH_FLOW_GENEVE: 2290 /* FALLTHROUGH */ 2291 default: 2292 return -EINVAL; 2293 } 2294 2295 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2296 vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2297 if (vnic == NULL) { 2298 RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue); 2299 return -EINVAL; 2300 } 2301 2302 2303 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2304 rte_memcpy(filter->dst_macaddr, 2305 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 2306 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2307 } 2308 2309 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 2310 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2311 filter1 = STAILQ_FIRST(&vnic0->filter); 2312 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 2313 } else { 2314 filter->dst_id = vnic->fw_vnic_id; 2315 for (i = 0; i < ETHER_ADDR_LEN; i++) 2316 if (filter->dst_macaddr[i] == 0x00) 2317 filter1 = STAILQ_FIRST(&vnic0->filter); 2318 else 2319 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 2320 } 2321 2322 if (filter1 == NULL) 2323 return -EINVAL; 2324 2325 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2326 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2327 2328 filter->enables = en; 2329 2330 return 0; 2331 } 2332 2333 static struct bnxt_filter_info * 2334 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) 2335 { 2336 struct bnxt_filter_info *mf = NULL; 2337 int i; 2338 2339 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2340 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2341 2342 STAILQ_FOREACH(mf, &vnic->filter, next) { 2343 if (mf->filter_type == nf->filter_type && 2344 mf->flags == nf->flags && 2345 mf->src_port == nf->src_port && 2346 mf->src_port_mask == nf->src_port_mask && 2347 mf->dst_port == nf->dst_port && 2348 mf->dst_port_mask == nf->dst_port_mask && 2349 mf->ip_protocol == nf->ip_protocol && 2350 mf->ip_addr_type == nf->ip_addr_type && 2351 mf->ethertype == nf->ethertype && 2352 mf->vni == nf->vni && 2353 mf->tunnel_type == nf->tunnel_type && 2354 mf->l2_ovlan == nf->l2_ovlan && 2355 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 2356 mf->l2_ivlan == nf->l2_ivlan && 2357 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 2358 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && 2359 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 2360 ETHER_ADDR_LEN) && 2361 !memcmp(mf->src_macaddr, nf->src_macaddr, 2362 ETHER_ADDR_LEN) && 2363 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 2364 ETHER_ADDR_LEN) && 2365 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 2366 sizeof(nf->src_ipaddr)) && 2367 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 2368 sizeof(nf->src_ipaddr_mask)) && 2369 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 2370 sizeof(nf->dst_ipaddr)) && 2371 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 2372 sizeof(nf->dst_ipaddr_mask))) 2373 return mf; 2374 } 2375 } 2376 return NULL; 2377 } 2378 2379 static int 2380 bnxt_fdir_filter(struct rte_eth_dev *dev, 2381 enum rte_filter_op filter_op, 2382 void *arg) 2383 { 2384 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2385 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 2386 struct bnxt_filter_info *filter, *match; 2387 struct bnxt_vnic_info *vnic; 2388 int ret = 0, i; 2389 2390 if (filter_op == RTE_ETH_FILTER_NOP) 2391 return 0; 2392 2393 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 2394 return -EINVAL; 2395 2396 switch (filter_op) { 2397 case RTE_ETH_FILTER_ADD: 2398 case RTE_ETH_FILTER_DELETE: 2399 /* FALLTHROUGH */ 2400 filter = bnxt_get_unused_filter(bp); 2401 if (filter == NULL) { 2402 RTE_LOG(ERR, PMD, 2403 "Not enough resources for a new flow.\n"); 2404 return -ENOMEM; 2405 } 2406 2407 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 2408 if (ret != 0) 2409 goto free_filter; 2410 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2411 2412 match = bnxt_match_fdir(bp, filter); 2413 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2414 RTE_LOG(ERR, PMD, "Flow already exists.\n"); 2415 ret = -EEXIST; 2416 goto free_filter; 2417 } 2418 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2419 RTE_LOG(ERR, PMD, "Flow does not exist.\n"); 2420 ret = -ENOENT; 2421 goto free_filter; 2422 } 2423 2424 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2425 vnic = STAILQ_FIRST(&bp->ff_pool[0]); 2426 else 2427 vnic = 2428 STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2429 2430 if (filter_op == RTE_ETH_FILTER_ADD) { 2431 ret = bnxt_hwrm_set_ntuple_filter(bp, 2432 filter->dst_id, 2433 filter); 2434 if (ret) 2435 goto free_filter; 2436 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2437 } else { 2438 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 2439 STAILQ_REMOVE(&vnic->filter, match, 2440 bnxt_filter_info, next); 2441 bnxt_free_filter(bp, match); 2442 filter->fw_l2_filter_id = -1; 2443 bnxt_free_filter(bp, filter); 2444 } 2445 break; 2446 case RTE_ETH_FILTER_FLUSH: 2447 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2448 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2449 2450 STAILQ_FOREACH(filter, &vnic->filter, next) { 2451 if (filter->filter_type == 2452 HWRM_CFA_NTUPLE_FILTER) { 2453 ret = 2454 bnxt_hwrm_clear_ntuple_filter(bp, 2455 filter); 2456 STAILQ_REMOVE(&vnic->filter, filter, 2457 bnxt_filter_info, next); 2458 } 2459 } 2460 } 2461 return ret; 2462 case RTE_ETH_FILTER_UPDATE: 2463 case RTE_ETH_FILTER_STATS: 2464 case RTE_ETH_FILTER_INFO: 2465 /* FALLTHROUGH */ 2466 RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op); 2467 break; 2468 default: 2469 RTE_LOG(ERR, PMD, "unknown operation %u", filter_op); 2470 ret = -EINVAL; 2471 break; 2472 } 2473 return ret; 2474 2475 free_filter: 2476 filter->fw_l2_filter_id = -1; 2477 bnxt_free_filter(bp, filter); 2478 return ret; 2479 } 2480 2481 static int 2482 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, 2483 enum rte_filter_type filter_type, 2484 enum rte_filter_op filter_op, void *arg) 2485 { 2486 int ret = 0; 2487 2488 switch (filter_type) { 2489 case RTE_ETH_FILTER_TUNNEL: 2490 RTE_LOG(ERR, PMD, 2491 "filter type: %d: To be implemented\n", filter_type); 2492 break; 2493 case RTE_ETH_FILTER_FDIR: 2494 ret = bnxt_fdir_filter(dev, filter_op, arg); 2495 break; 2496 case RTE_ETH_FILTER_NTUPLE: 2497 ret = bnxt_ntuple_filter(dev, filter_op, arg); 2498 break; 2499 case RTE_ETH_FILTER_ETHERTYPE: 2500 ret = bnxt_ethertype_filter(dev, filter_op, arg); 2501 break; 2502 case RTE_ETH_FILTER_GENERIC: 2503 if (filter_op != RTE_ETH_FILTER_GET) 2504 return -EINVAL; 2505 *(const void **)arg = &bnxt_flow_ops; 2506 break; 2507 default: 2508 RTE_LOG(ERR, PMD, 2509 "Filter type (%d) not supported", filter_type); 2510 ret = -EINVAL; 2511 break; 2512 } 2513 return ret; 2514 } 2515 2516 static const uint32_t * 2517 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 2518 { 2519 static const uint32_t ptypes[] = { 2520 RTE_PTYPE_L2_ETHER_VLAN, 2521 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2522 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2523 RTE_PTYPE_L4_ICMP, 2524 RTE_PTYPE_L4_TCP, 2525 RTE_PTYPE_L4_UDP, 2526 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2527 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2528 RTE_PTYPE_INNER_L4_ICMP, 2529 RTE_PTYPE_INNER_L4_TCP, 2530 RTE_PTYPE_INNER_L4_UDP, 2531 RTE_PTYPE_UNKNOWN 2532 }; 2533 2534 if (dev->rx_pkt_burst == bnxt_recv_pkts) 2535 return ptypes; 2536 return NULL; 2537 } 2538 2539 2540 2541 static int 2542 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 2543 { 2544 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2545 int rc; 2546 uint32_t dir_entries; 2547 uint32_t entry_length; 2548 2549 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n", 2550 __func__, bp->pdev->addr.domain, bp->pdev->addr.bus, 2551 bp->pdev->addr.devid, bp->pdev->addr.function); 2552 2553 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 2554 if (rc != 0) 2555 return rc; 2556 2557 return dir_entries * entry_length; 2558 } 2559 2560 static int 2561 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 2562 struct rte_dev_eeprom_info *in_eeprom) 2563 { 2564 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2565 uint32_t index; 2566 uint32_t offset; 2567 2568 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2569 "len = %d\n", __func__, bp->pdev->addr.domain, 2570 bp->pdev->addr.bus, bp->pdev->addr.devid, 2571 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2572 2573 if (in_eeprom->offset == 0) /* special offset value to get directory */ 2574 return bnxt_get_nvram_directory(bp, in_eeprom->length, 2575 in_eeprom->data); 2576 2577 index = in_eeprom->offset >> 24; 2578 offset = in_eeprom->offset & 0xffffff; 2579 2580 if (index != 0) 2581 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 2582 in_eeprom->length, in_eeprom->data); 2583 2584 return 0; 2585 } 2586 2587 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 2588 { 2589 switch (dir_type) { 2590 case BNX_DIR_TYPE_CHIMP_PATCH: 2591 case BNX_DIR_TYPE_BOOTCODE: 2592 case BNX_DIR_TYPE_BOOTCODE_2: 2593 case BNX_DIR_TYPE_APE_FW: 2594 case BNX_DIR_TYPE_APE_PATCH: 2595 case BNX_DIR_TYPE_KONG_FW: 2596 case BNX_DIR_TYPE_KONG_PATCH: 2597 case BNX_DIR_TYPE_BONO_FW: 2598 case BNX_DIR_TYPE_BONO_PATCH: 2599 return true; 2600 } 2601 2602 return false; 2603 } 2604 2605 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 2606 { 2607 switch (dir_type) { 2608 case BNX_DIR_TYPE_AVS: 2609 case BNX_DIR_TYPE_EXP_ROM_MBA: 2610 case BNX_DIR_TYPE_PCIE: 2611 case BNX_DIR_TYPE_TSCF_UCODE: 2612 case BNX_DIR_TYPE_EXT_PHY: 2613 case BNX_DIR_TYPE_CCM: 2614 case BNX_DIR_TYPE_ISCSI_BOOT: 2615 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 2616 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 2617 return true; 2618 } 2619 2620 return false; 2621 } 2622 2623 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 2624 { 2625 return bnxt_dir_type_is_ape_bin_format(dir_type) || 2626 bnxt_dir_type_is_other_exec_format(dir_type); 2627 } 2628 2629 static int 2630 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 2631 struct rte_dev_eeprom_info *in_eeprom) 2632 { 2633 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2634 uint8_t index, dir_op; 2635 uint16_t type, ext, ordinal, attr; 2636 2637 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2638 "len = %d\n", __func__, bp->pdev->addr.domain, 2639 bp->pdev->addr.bus, bp->pdev->addr.devid, 2640 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2641 2642 if (!BNXT_PF(bp)) { 2643 RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n"); 2644 return -EINVAL; 2645 } 2646 2647 type = in_eeprom->magic >> 16; 2648 2649 if (type == 0xffff) { /* special value for directory operations */ 2650 index = in_eeprom->magic & 0xff; 2651 dir_op = in_eeprom->magic >> 8; 2652 if (index == 0) 2653 return -EINVAL; 2654 switch (dir_op) { 2655 case 0x0e: /* erase */ 2656 if (in_eeprom->offset != ~in_eeprom->magic) 2657 return -EINVAL; 2658 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 2659 default: 2660 return -EINVAL; 2661 } 2662 } 2663 2664 /* Create or re-write an NVM item: */ 2665 if (bnxt_dir_type_is_executable(type) == true) 2666 return -EOPNOTSUPP; 2667 ext = in_eeprom->magic & 0xffff; 2668 ordinal = in_eeprom->offset >> 16; 2669 attr = in_eeprom->offset & 0xffff; 2670 2671 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 2672 in_eeprom->data, in_eeprom->length); 2673 return 0; 2674 } 2675 2676 /* 2677 * Initialization 2678 */ 2679 2680 static const struct eth_dev_ops bnxt_dev_ops = { 2681 .dev_infos_get = bnxt_dev_info_get_op, 2682 .dev_close = bnxt_dev_close_op, 2683 .dev_configure = bnxt_dev_configure_op, 2684 .dev_start = bnxt_dev_start_op, 2685 .dev_stop = bnxt_dev_stop_op, 2686 .dev_set_link_up = bnxt_dev_set_link_up_op, 2687 .dev_set_link_down = bnxt_dev_set_link_down_op, 2688 .stats_get = bnxt_stats_get_op, 2689 .stats_reset = bnxt_stats_reset_op, 2690 .rx_queue_setup = bnxt_rx_queue_setup_op, 2691 .rx_queue_release = bnxt_rx_queue_release_op, 2692 .tx_queue_setup = bnxt_tx_queue_setup_op, 2693 .tx_queue_release = bnxt_tx_queue_release_op, 2694 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 2695 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 2696 .reta_update = bnxt_reta_update_op, 2697 .reta_query = bnxt_reta_query_op, 2698 .rss_hash_update = bnxt_rss_hash_update_op, 2699 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 2700 .link_update = bnxt_link_update_op, 2701 .promiscuous_enable = bnxt_promiscuous_enable_op, 2702 .promiscuous_disable = bnxt_promiscuous_disable_op, 2703 .allmulticast_enable = bnxt_allmulticast_enable_op, 2704 .allmulticast_disable = bnxt_allmulticast_disable_op, 2705 .mac_addr_add = bnxt_mac_addr_add_op, 2706 .mac_addr_remove = bnxt_mac_addr_remove_op, 2707 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 2708 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 2709 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 2710 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 2711 .vlan_filter_set = bnxt_vlan_filter_set_op, 2712 .vlan_offload_set = bnxt_vlan_offload_set_op, 2713 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 2714 .mtu_set = bnxt_mtu_set_op, 2715 .mac_addr_set = bnxt_set_default_mac_addr_op, 2716 .xstats_get = bnxt_dev_xstats_get_op, 2717 .xstats_get_names = bnxt_dev_xstats_get_names_op, 2718 .xstats_reset = bnxt_dev_xstats_reset_op, 2719 .fw_version_get = bnxt_fw_version_get, 2720 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 2721 .rxq_info_get = bnxt_rxq_info_get_op, 2722 .txq_info_get = bnxt_txq_info_get_op, 2723 .dev_led_on = bnxt_dev_led_on_op, 2724 .dev_led_off = bnxt_dev_led_off_op, 2725 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 2726 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 2727 .rx_queue_count = bnxt_rx_queue_count_op, 2728 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 2729 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 2730 .filter_ctrl = bnxt_filter_ctrl_op, 2731 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 2732 .get_eeprom_length = bnxt_get_eeprom_length_op, 2733 .get_eeprom = bnxt_get_eeprom_op, 2734 .set_eeprom = bnxt_set_eeprom_op, 2735 }; 2736 2737 static bool bnxt_vf_pciid(uint16_t id) 2738 { 2739 if (id == BROADCOM_DEV_ID_57304_VF || 2740 id == BROADCOM_DEV_ID_57406_VF || 2741 id == BROADCOM_DEV_ID_5731X_VF || 2742 id == BROADCOM_DEV_ID_5741X_VF || 2743 id == BROADCOM_DEV_ID_57414_VF || 2744 id == BROADCOM_DEV_ID_STRATUS_NIC_VF) 2745 return true; 2746 return false; 2747 } 2748 2749 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 2750 { 2751 struct bnxt *bp = eth_dev->data->dev_private; 2752 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2753 int rc; 2754 2755 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 2756 if (!pci_dev->mem_resource[0].addr) { 2757 RTE_LOG(ERR, PMD, 2758 "Cannot find PCI device base address, aborting\n"); 2759 rc = -ENODEV; 2760 goto init_err_disable; 2761 } 2762 2763 bp->eth_dev = eth_dev; 2764 bp->pdev = pci_dev; 2765 2766 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 2767 if (!bp->bar0) { 2768 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); 2769 rc = -ENOMEM; 2770 goto init_err_release; 2771 } 2772 return 0; 2773 2774 init_err_release: 2775 if (bp->bar0) 2776 bp->bar0 = NULL; 2777 2778 init_err_disable: 2779 2780 return rc; 2781 } 2782 2783 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 2784 2785 #define ALLOW_FUNC(x) \ 2786 { \ 2787 typeof(x) arg = (x); \ 2788 bp->pf.vf_req_fwd[((arg) >> 5)] &= \ 2789 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 2790 } 2791 static int 2792 bnxt_dev_init(struct rte_eth_dev *eth_dev) 2793 { 2794 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2795 char mz_name[RTE_MEMZONE_NAMESIZE]; 2796 const struct rte_memzone *mz = NULL; 2797 static int version_printed; 2798 uint32_t total_alloc_len; 2799 rte_iova_t mz_phys_addr; 2800 struct bnxt *bp; 2801 int rc; 2802 2803 if (version_printed++ == 0) 2804 RTE_LOG(INFO, PMD, "%s\n", bnxt_version); 2805 2806 rte_eth_copy_pci_info(eth_dev, pci_dev); 2807 2808 bp = eth_dev->data->dev_private; 2809 2810 rte_atomic64_init(&bp->rx_mbuf_alloc_fail); 2811 bp->dev_stopped = 1; 2812 2813 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2814 goto skip_init; 2815 2816 if (bnxt_vf_pciid(pci_dev->id.device_id)) 2817 bp->flags |= BNXT_FLAG_VF; 2818 2819 rc = bnxt_init_board(eth_dev); 2820 if (rc) { 2821 RTE_LOG(ERR, PMD, 2822 "Board initialization failed rc: %x\n", rc); 2823 goto error; 2824 } 2825 skip_init: 2826 eth_dev->dev_ops = &bnxt_dev_ops; 2827 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2828 return 0; 2829 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 2830 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 2831 2832 if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { 2833 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 2834 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 2835 pci_dev->addr.bus, pci_dev->addr.devid, 2836 pci_dev->addr.function, "rx_port_stats"); 2837 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 2838 mz = rte_memzone_lookup(mz_name); 2839 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 2840 sizeof(struct rx_port_stats) + 512); 2841 if (!mz) { 2842 mz = rte_memzone_reserve(mz_name, total_alloc_len, 2843 SOCKET_ID_ANY, 2844 RTE_MEMZONE_2MB | 2845 RTE_MEMZONE_SIZE_HINT_ONLY); 2846 if (mz == NULL) 2847 return -ENOMEM; 2848 } 2849 memset(mz->addr, 0, mz->len); 2850 mz_phys_addr = mz->iova; 2851 if ((unsigned long)mz->addr == mz_phys_addr) { 2852 RTE_LOG(WARNING, PMD, 2853 "Memzone physical address same as virtual.\n"); 2854 RTE_LOG(WARNING, PMD, 2855 "Using rte_mem_virt2iova()\n"); 2856 mz_phys_addr = rte_mem_virt2iova(mz->addr); 2857 if (mz_phys_addr == 0) { 2858 RTE_LOG(ERR, PMD, 2859 "unable to map address to physical memory\n"); 2860 return -ENOMEM; 2861 } 2862 } 2863 2864 bp->rx_mem_zone = (const void *)mz; 2865 bp->hw_rx_port_stats = mz->addr; 2866 bp->hw_rx_port_stats_map = mz_phys_addr; 2867 2868 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 2869 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 2870 pci_dev->addr.bus, pci_dev->addr.devid, 2871 pci_dev->addr.function, "tx_port_stats"); 2872 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 2873 mz = rte_memzone_lookup(mz_name); 2874 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 2875 sizeof(struct tx_port_stats) + 512); 2876 if (!mz) { 2877 mz = rte_memzone_reserve(mz_name, total_alloc_len, 2878 SOCKET_ID_ANY, 2879 RTE_MEMZONE_2MB | 2880 RTE_MEMZONE_SIZE_HINT_ONLY); 2881 if (mz == NULL) 2882 return -ENOMEM; 2883 } 2884 memset(mz->addr, 0, mz->len); 2885 mz_phys_addr = mz->iova; 2886 if ((unsigned long)mz->addr == mz_phys_addr) { 2887 RTE_LOG(WARNING, PMD, 2888 "Memzone physical address same as virtual.\n"); 2889 RTE_LOG(WARNING, PMD, 2890 "Using rte_mem_virt2iova()\n"); 2891 mz_phys_addr = rte_mem_virt2iova(mz->addr); 2892 if (mz_phys_addr == 0) { 2893 RTE_LOG(ERR, PMD, 2894 "unable to map address to physical memory\n"); 2895 return -ENOMEM; 2896 } 2897 } 2898 2899 bp->tx_mem_zone = (const void *)mz; 2900 bp->hw_tx_port_stats = mz->addr; 2901 bp->hw_tx_port_stats_map = mz_phys_addr; 2902 2903 bp->flags |= BNXT_FLAG_PORT_STATS; 2904 } 2905 2906 rc = bnxt_alloc_hwrm_resources(bp); 2907 if (rc) { 2908 RTE_LOG(ERR, PMD, 2909 "hwrm resource allocation failure rc: %x\n", rc); 2910 goto error_free; 2911 } 2912 rc = bnxt_hwrm_ver_get(bp); 2913 if (rc) 2914 goto error_free; 2915 bnxt_hwrm_queue_qportcfg(bp); 2916 2917 bnxt_hwrm_func_qcfg(bp); 2918 2919 /* Get the MAX capabilities for this function */ 2920 rc = bnxt_hwrm_func_qcaps(bp); 2921 if (rc) { 2922 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); 2923 goto error_free; 2924 } 2925 if (bp->max_tx_rings == 0) { 2926 RTE_LOG(ERR, PMD, "No TX rings available!\n"); 2927 rc = -EBUSY; 2928 goto error_free; 2929 } 2930 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 2931 ETHER_ADDR_LEN * bp->max_l2_ctx, 0); 2932 if (eth_dev->data->mac_addrs == NULL) { 2933 RTE_LOG(ERR, PMD, 2934 "Failed to alloc %u bytes needed to store MAC addr tbl", 2935 ETHER_ADDR_LEN * bp->max_l2_ctx); 2936 rc = -ENOMEM; 2937 goto error_free; 2938 } 2939 /* Copy the permanent MAC from the qcap response address now. */ 2940 memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr)); 2941 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 2942 bp->grp_info = rte_zmalloc("bnxt_grp_info", 2943 sizeof(*bp->grp_info) * bp->max_ring_grps, 0); 2944 if (!bp->grp_info) { 2945 RTE_LOG(ERR, PMD, 2946 "Failed to alloc %zu bytes needed to store group info table\n", 2947 sizeof(*bp->grp_info) * bp->max_ring_grps); 2948 rc = -ENOMEM; 2949 goto error_free; 2950 } 2951 2952 /* Forward all requests if firmware is new enough */ 2953 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 2954 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 2955 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 2956 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); 2957 } else { 2958 RTE_LOG(WARNING, PMD, 2959 "Firmware too old for VF mailbox functionality\n"); 2960 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); 2961 } 2962 2963 /* 2964 * The following are used for driver cleanup. If we disallow these, 2965 * VF drivers can't clean up cleanly. 2966 */ 2967 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 2968 ALLOW_FUNC(HWRM_VNIC_FREE); 2969 ALLOW_FUNC(HWRM_RING_FREE); 2970 ALLOW_FUNC(HWRM_RING_GRP_FREE); 2971 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 2972 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 2973 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 2974 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 2975 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 2976 rc = bnxt_hwrm_func_driver_register(bp); 2977 if (rc) { 2978 RTE_LOG(ERR, PMD, 2979 "Failed to register driver"); 2980 rc = -EBUSY; 2981 goto error_free; 2982 } 2983 2984 RTE_LOG(INFO, PMD, 2985 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", 2986 pci_dev->mem_resource[0].phys_addr, 2987 pci_dev->mem_resource[0].addr); 2988 2989 rc = bnxt_hwrm_func_reset(bp); 2990 if (rc) { 2991 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); 2992 rc = -1; 2993 goto error_free; 2994 } 2995 2996 if (BNXT_PF(bp)) { 2997 //if (bp->pf.active_vfs) { 2998 // TODO: Deallocate VF resources? 2999 //} 3000 if (bp->pdev->max_vfs) { 3001 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 3002 if (rc) { 3003 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n"); 3004 goto error_free; 3005 } 3006 } else { 3007 rc = bnxt_hwrm_allocate_pf_only(bp); 3008 if (rc) { 3009 RTE_LOG(ERR, PMD, 3010 "Failed to allocate PF resources\n"); 3011 goto error_free; 3012 } 3013 } 3014 } 3015 3016 bnxt_hwrm_port_led_qcaps(bp); 3017 3018 rc = bnxt_setup_int(bp); 3019 if (rc) 3020 goto error_free; 3021 3022 rc = bnxt_alloc_mem(bp); 3023 if (rc) 3024 goto error_free_int; 3025 3026 rc = bnxt_request_int(bp); 3027 if (rc) 3028 goto error_free_int; 3029 3030 rc = bnxt_alloc_def_cp_ring(bp); 3031 if (rc) 3032 goto error_free_int; 3033 3034 bnxt_enable_int(bp); 3035 3036 return 0; 3037 3038 error_free_int: 3039 bnxt_disable_int(bp); 3040 bnxt_free_def_cp_ring(bp); 3041 bnxt_hwrm_func_buf_unrgtr(bp); 3042 bnxt_free_int(bp); 3043 bnxt_free_mem(bp); 3044 error_free: 3045 bnxt_dev_uninit(eth_dev); 3046 error: 3047 return rc; 3048 } 3049 3050 static int 3051 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { 3052 struct bnxt *bp = eth_dev->data->dev_private; 3053 int rc; 3054 3055 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3056 return -EPERM; 3057 3058 bnxt_disable_int(bp); 3059 bnxt_free_int(bp); 3060 bnxt_free_mem(bp); 3061 if (eth_dev->data->mac_addrs != NULL) { 3062 rte_free(eth_dev->data->mac_addrs); 3063 eth_dev->data->mac_addrs = NULL; 3064 } 3065 if (bp->grp_info != NULL) { 3066 rte_free(bp->grp_info); 3067 bp->grp_info = NULL; 3068 } 3069 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 3070 bnxt_free_hwrm_resources(bp); 3071 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 3072 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 3073 if (bp->dev_stopped == 0) 3074 bnxt_dev_close_op(eth_dev); 3075 if (bp->pf.vf_info) 3076 rte_free(bp->pf.vf_info); 3077 eth_dev->dev_ops = NULL; 3078 eth_dev->rx_pkt_burst = NULL; 3079 eth_dev->tx_pkt_burst = NULL; 3080 3081 return rc; 3082 } 3083 3084 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3085 struct rte_pci_device *pci_dev) 3086 { 3087 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), 3088 bnxt_dev_init); 3089 } 3090 3091 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 3092 { 3093 return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit); 3094 } 3095 3096 static struct rte_pci_driver bnxt_rte_pmd = { 3097 .id_table = bnxt_pci_id_map, 3098 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 3099 RTE_PCI_DRV_INTR_LSC, 3100 .probe = bnxt_pci_probe, 3101 .remove = bnxt_pci_remove, 3102 }; 3103 3104 static bool 3105 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 3106 { 3107 if (strcmp(dev->device->driver->name, drv->driver.name)) 3108 return false; 3109 3110 return true; 3111 } 3112 3113 bool is_bnxt_supported(struct rte_eth_dev *dev) 3114 { 3115 return is_device_supported(dev, &bnxt_rte_pmd); 3116 } 3117 3118 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 3119 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 3120 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 3121