1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Broadcom Limited. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Broadcom Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <inttypes.h> 35 #include <stdbool.h> 36 37 #include <rte_dev.h> 38 #include <rte_ethdev.h> 39 #include <rte_ethdev_pci.h> 40 #include <rte_malloc.h> 41 #include <rte_cycles.h> 42 43 #include "bnxt.h" 44 #include "bnxt_cpr.h" 45 #include "bnxt_filter.h" 46 #include "bnxt_hwrm.h" 47 #include "bnxt_irq.h" 48 #include "bnxt_ring.h" 49 #include "bnxt_rxq.h" 50 #include "bnxt_rxr.h" 51 #include "bnxt_stats.h" 52 #include "bnxt_txq.h" 53 #include "bnxt_txr.h" 54 #include "bnxt_vnic.h" 55 #include "hsi_struct_def_dpdk.h" 56 #include "bnxt_nvm_defs.h" 57 58 #define DRV_MODULE_NAME "bnxt" 59 static const char bnxt_version[] = 60 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; 61 62 #define PCI_VENDOR_ID_BROADCOM 0x14E4 63 64 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 65 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 66 #define BROADCOM_DEV_ID_57414_VF 0x16c1 67 #define BROADCOM_DEV_ID_57301 0x16c8 68 #define BROADCOM_DEV_ID_57302 0x16c9 69 #define BROADCOM_DEV_ID_57304_PF 0x16ca 70 #define BROADCOM_DEV_ID_57304_VF 0x16cb 71 #define BROADCOM_DEV_ID_57417_MF 0x16cc 72 #define BROADCOM_DEV_ID_NS2 0x16cd 73 #define BROADCOM_DEV_ID_57311 0x16ce 74 #define BROADCOM_DEV_ID_57312 0x16cf 75 #define BROADCOM_DEV_ID_57402 0x16d0 76 #define BROADCOM_DEV_ID_57404 0x16d1 77 #define BROADCOM_DEV_ID_57406_PF 0x16d2 78 #define BROADCOM_DEV_ID_57406_VF 0x16d3 79 #define BROADCOM_DEV_ID_57402_MF 0x16d4 80 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 81 #define BROADCOM_DEV_ID_57412 0x16d6 82 #define BROADCOM_DEV_ID_57414 0x16d7 83 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8 84 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9 85 #define BROADCOM_DEV_ID_5741X_VF 0x16dc 86 #define BROADCOM_DEV_ID_57412_MF 0x16de 87 #define BROADCOM_DEV_ID_57314 0x16df 88 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0 89 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 90 #define BROADCOM_DEV_ID_57417_SFP 0x16e2 91 #define BROADCOM_DEV_ID_57416_SFP 0x16e3 92 #define BROADCOM_DEV_ID_57317_SFP 0x16e4 93 #define BROADCOM_DEV_ID_57404_MF 0x16e7 94 #define BROADCOM_DEV_ID_57406_MF 0x16e8 95 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 96 #define BROADCOM_DEV_ID_57407_MF 0x16ea 97 #define BROADCOM_DEV_ID_57414_MF 0x16ec 98 #define BROADCOM_DEV_ID_57416_MF 0x16ee 99 100 static const struct rte_pci_id bnxt_pci_id_map[] = { 101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 102 BROADCOM_DEV_ID_STRATUS_NIC_VF) }, 103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 137 { .vendor_id = 0, /* sentinel */ }, 138 }; 139 140 #define BNXT_ETH_RSS_SUPPORT ( \ 141 ETH_RSS_IPV4 | \ 142 ETH_RSS_NONFRAG_IPV4_TCP | \ 143 ETH_RSS_NONFRAG_IPV4_UDP | \ 144 ETH_RSS_IPV6 | \ 145 ETH_RSS_NONFRAG_IPV6_TCP | \ 146 ETH_RSS_NONFRAG_IPV6_UDP) 147 148 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 149 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); 150 151 /***********************/ 152 153 /* 154 * High level utility functions 155 */ 156 157 static void bnxt_free_mem(struct bnxt *bp) 158 { 159 bnxt_free_filter_mem(bp); 160 bnxt_free_vnic_attributes(bp); 161 bnxt_free_vnic_mem(bp); 162 163 bnxt_free_stats(bp); 164 bnxt_free_tx_rings(bp); 165 bnxt_free_rx_rings(bp); 166 bnxt_free_def_cp_ring(bp); 167 } 168 169 static int bnxt_alloc_mem(struct bnxt *bp) 170 { 171 int rc; 172 173 /* Default completion ring */ 174 rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); 175 if (rc) 176 goto alloc_mem_err; 177 178 rc = bnxt_alloc_rings(bp, 0, NULL, NULL, 179 bp->def_cp_ring, "def_cp"); 180 if (rc) 181 goto alloc_mem_err; 182 183 rc = bnxt_alloc_vnic_mem(bp); 184 if (rc) 185 goto alloc_mem_err; 186 187 rc = bnxt_alloc_vnic_attributes(bp); 188 if (rc) 189 goto alloc_mem_err; 190 191 rc = bnxt_alloc_filter_mem(bp); 192 if (rc) 193 goto alloc_mem_err; 194 195 return 0; 196 197 alloc_mem_err: 198 bnxt_free_mem(bp); 199 return rc; 200 } 201 202 static int bnxt_init_chip(struct bnxt *bp) 203 { 204 unsigned int i, rss_idx, fw_idx; 205 struct rte_eth_link new; 206 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 207 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 208 uint32_t intr_vector = 0; 209 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 210 uint32_t vec = BNXT_MISC_VEC_ID; 211 int rc; 212 213 /* disable uio/vfio intr/eventfd mapping */ 214 rte_intr_disable(intr_handle); 215 216 if (bp->eth_dev->data->mtu > ETHER_MTU) { 217 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 218 bp->flags |= BNXT_FLAG_JUMBO; 219 } else { 220 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 221 bp->flags &= ~BNXT_FLAG_JUMBO; 222 } 223 224 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 225 if (rc) { 226 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); 227 goto err_out; 228 } 229 230 rc = bnxt_alloc_hwrm_rings(bp); 231 if (rc) { 232 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc); 233 goto err_out; 234 } 235 236 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 237 if (rc) { 238 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc); 239 goto err_out; 240 } 241 242 rc = bnxt_mq_rx_configure(bp); 243 if (rc) { 244 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc); 245 goto err_out; 246 } 247 248 /* VNIC configuration */ 249 for (i = 0; i < bp->nr_vnics; i++) { 250 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 251 252 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 253 if (rc) { 254 RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n", 255 i, rc); 256 goto err_out; 257 } 258 259 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); 260 if (rc) { 261 RTE_LOG(ERR, PMD, 262 "HWRM vnic %d ctx alloc failure rc: %x\n", 263 i, rc); 264 goto err_out; 265 } 266 267 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 268 if (rc) { 269 RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n", 270 i, rc); 271 goto err_out; 272 } 273 274 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 275 if (rc) { 276 RTE_LOG(ERR, PMD, 277 "HWRM vnic %d filter failure rc: %x\n", 278 i, rc); 279 goto err_out; 280 } 281 if (vnic->rss_table && vnic->hash_type) { 282 /* 283 * Fill the RSS hash & redirection table with 284 * ring group ids for all VNICs 285 */ 286 for (rss_idx = 0, fw_idx = 0; 287 rss_idx < HW_HASH_INDEX_SIZE; 288 rss_idx++, fw_idx++) { 289 if (vnic->fw_grp_ids[fw_idx] == 290 INVALID_HW_RING_ID) 291 fw_idx = 0; 292 vnic->rss_table[rss_idx] = 293 vnic->fw_grp_ids[fw_idx]; 294 } 295 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 296 if (rc) { 297 RTE_LOG(ERR, PMD, 298 "HWRM vnic %d set RSS failure rc: %x\n", 299 i, rc); 300 goto err_out; 301 } 302 } 303 304 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 305 306 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) 307 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 308 else 309 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 310 } 311 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 312 if (rc) { 313 RTE_LOG(ERR, PMD, 314 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 315 goto err_out; 316 } 317 318 /* check and configure queue intr-vector mapping */ 319 if ((rte_intr_cap_multiple(intr_handle) || 320 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 321 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 322 intr_vector = bp->eth_dev->data->nb_rx_queues; 323 RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__, 324 intr_vector); 325 if (intr_vector > bp->rx_cp_nr_rings) { 326 RTE_LOG(ERR, PMD, "At most %d intr queues supported", 327 bp->rx_cp_nr_rings); 328 return -ENOTSUP; 329 } 330 if (rte_intr_efd_enable(intr_handle, intr_vector)) 331 return -1; 332 } 333 334 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 335 intr_handle->intr_vec = 336 rte_zmalloc("intr_vec", 337 bp->eth_dev->data->nb_rx_queues * 338 sizeof(int), 0); 339 if (intr_handle->intr_vec == NULL) { 340 RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues" 341 " intr_vec", bp->eth_dev->data->nb_rx_queues); 342 return -ENOMEM; 343 } 344 RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p " 345 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 346 __func__, intr_handle->intr_vec, intr_handle->nb_efd, 347 intr_handle->max_intr); 348 } 349 350 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 351 queue_id++) { 352 intr_handle->intr_vec[queue_id] = vec; 353 if (vec < base + intr_handle->nb_efd - 1) 354 vec++; 355 } 356 357 /* enable uio/vfio intr/eventfd mapping */ 358 rte_intr_enable(intr_handle); 359 360 rc = bnxt_get_hwrm_link_config(bp, &new); 361 if (rc) { 362 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); 363 goto err_out; 364 } 365 366 if (!bp->link_info.link_up) { 367 rc = bnxt_set_hwrm_link_config(bp, true); 368 if (rc) { 369 RTE_LOG(ERR, PMD, 370 "HWRM link config failure rc: %x\n", rc); 371 goto err_out; 372 } 373 } 374 bnxt_print_link_info(bp->eth_dev); 375 376 return 0; 377 378 err_out: 379 bnxt_free_all_hwrm_resources(bp); 380 381 return rc; 382 } 383 384 static int bnxt_shutdown_nic(struct bnxt *bp) 385 { 386 bnxt_free_all_hwrm_resources(bp); 387 bnxt_free_all_filters(bp); 388 bnxt_free_all_vnics(bp); 389 return 0; 390 } 391 392 static int bnxt_init_nic(struct bnxt *bp) 393 { 394 int rc; 395 396 bnxt_init_ring_grps(bp); 397 bnxt_init_vnics(bp); 398 bnxt_init_filters(bp); 399 400 rc = bnxt_init_chip(bp); 401 if (rc) 402 return rc; 403 404 return 0; 405 } 406 407 /* 408 * Device configuration and status function 409 */ 410 411 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 412 struct rte_eth_dev_info *dev_info) 413 { 414 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 415 uint16_t max_vnics, i, j, vpool, vrxq; 416 unsigned int max_rx_rings; 417 418 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 419 420 /* MAC Specifics */ 421 dev_info->max_mac_addrs = bp->max_l2_ctx; 422 dev_info->max_hash_mac_addrs = 0; 423 424 /* PF/VF specifics */ 425 if (BNXT_PF(bp)) 426 dev_info->max_vfs = bp->pdev->max_vfs; 427 max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, 428 RTE_MIN(bp->max_rsscos_ctx, 429 bp->max_stat_ctx))); 430 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 431 dev_info->max_rx_queues = max_rx_rings; 432 dev_info->max_tx_queues = max_rx_rings; 433 dev_info->reta_size = bp->max_rsscos_ctx; 434 dev_info->hash_key_size = 40; 435 max_vnics = bp->max_vnics; 436 437 /* Fast path specifics */ 438 dev_info->min_rx_bufsize = 1; 439 dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN 440 + VLAN_TAG_SIZE; 441 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 442 DEV_RX_OFFLOAD_IPV4_CKSUM | 443 DEV_RX_OFFLOAD_UDP_CKSUM | 444 DEV_RX_OFFLOAD_TCP_CKSUM | 445 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 446 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 447 DEV_TX_OFFLOAD_IPV4_CKSUM | 448 DEV_TX_OFFLOAD_TCP_CKSUM | 449 DEV_TX_OFFLOAD_UDP_CKSUM | 450 DEV_TX_OFFLOAD_TCP_TSO | 451 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 452 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 453 DEV_TX_OFFLOAD_GRE_TNL_TSO | 454 DEV_TX_OFFLOAD_IPIP_TNL_TSO | 455 DEV_TX_OFFLOAD_GENEVE_TNL_TSO; 456 457 /* *INDENT-OFF* */ 458 dev_info->default_rxconf = (struct rte_eth_rxconf) { 459 .rx_thresh = { 460 .pthresh = 8, 461 .hthresh = 8, 462 .wthresh = 0, 463 }, 464 .rx_free_thresh = 32, 465 .rx_drop_en = 0, 466 }; 467 468 dev_info->default_txconf = (struct rte_eth_txconf) { 469 .tx_thresh = { 470 .pthresh = 32, 471 .hthresh = 0, 472 .wthresh = 0, 473 }, 474 .tx_free_thresh = 32, 475 .tx_rs_thresh = 32, 476 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 477 ETH_TXQ_FLAGS_NOOFFLOADS, 478 }; 479 eth_dev->data->dev_conf.intr_conf.lsc = 1; 480 481 eth_dev->data->dev_conf.intr_conf.rxq = 1; 482 483 /* *INDENT-ON* */ 484 485 /* 486 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 487 * need further investigation. 488 */ 489 490 /* VMDq resources */ 491 vpool = 64; /* ETH_64_POOLS */ 492 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 493 for (i = 0; i < 4; vpool >>= 1, i++) { 494 if (max_vnics > vpool) { 495 for (j = 0; j < 5; vrxq >>= 1, j++) { 496 if (dev_info->max_rx_queues > vrxq) { 497 if (vpool > vrxq) 498 vpool = vrxq; 499 goto found; 500 } 501 } 502 /* Not enough resources to support VMDq */ 503 break; 504 } 505 } 506 /* Not enough resources to support VMDq */ 507 vpool = 0; 508 vrxq = 0; 509 found: 510 dev_info->max_vmdq_pools = vpool; 511 dev_info->vmdq_queue_num = vrxq; 512 513 dev_info->vmdq_pool_base = 0; 514 dev_info->vmdq_queue_base = 0; 515 } 516 517 /* Configure the device based on the configuration provided */ 518 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 519 { 520 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 521 522 bp->rx_queues = (void *)eth_dev->data->rx_queues; 523 bp->tx_queues = (void *)eth_dev->data->tx_queues; 524 525 /* Inherit new configurations */ 526 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 527 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 528 bp->rx_cp_nr_rings = bp->rx_nr_rings; 529 bp->tx_cp_nr_rings = bp->tx_nr_rings; 530 531 if (eth_dev->data->dev_conf.rxmode.jumbo_frame) 532 eth_dev->data->mtu = 533 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 534 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; 535 return 0; 536 } 537 538 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 539 { 540 struct rte_eth_link *link = ð_dev->data->dev_link; 541 542 if (link->link_status) 543 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n", 544 eth_dev->data->port_id, 545 (uint32_t)link->link_speed, 546 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 547 ("full-duplex") : ("half-duplex\n")); 548 else 549 RTE_LOG(INFO, PMD, "Port %d Link Down\n", 550 eth_dev->data->port_id); 551 } 552 553 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) 554 { 555 bnxt_print_link_info(eth_dev); 556 return 0; 557 } 558 559 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 560 { 561 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 562 int vlan_mask = 0; 563 int rc; 564 565 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 566 RTE_LOG(ERR, PMD, 567 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 568 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 569 } 570 bp->dev_stopped = 0; 571 572 rc = bnxt_init_nic(bp); 573 if (rc) 574 goto error; 575 576 bnxt_link_update_op(eth_dev, 1); 577 578 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) 579 vlan_mask |= ETH_VLAN_FILTER_MASK; 580 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) 581 vlan_mask |= ETH_VLAN_STRIP_MASK; 582 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 583 if (rc) 584 goto error; 585 586 return 0; 587 588 error: 589 bnxt_shutdown_nic(bp); 590 bnxt_free_tx_mbufs(bp); 591 bnxt_free_rx_mbufs(bp); 592 return rc; 593 } 594 595 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 596 { 597 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 598 int rc = 0; 599 600 if (!bp->link_info.link_up) 601 rc = bnxt_set_hwrm_link_config(bp, true); 602 if (!rc) 603 eth_dev->data->dev_link.link_status = 1; 604 605 bnxt_print_link_info(eth_dev); 606 return 0; 607 } 608 609 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 610 { 611 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 612 613 eth_dev->data->dev_link.link_status = 0; 614 bnxt_set_hwrm_link_config(bp, false); 615 bp->link_info.link_up = 0; 616 617 return 0; 618 } 619 620 /* Unload the driver, release resources */ 621 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 622 { 623 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 624 625 if (bp->eth_dev->data->dev_started) { 626 /* TBD: STOP HW queues DMA */ 627 eth_dev->data->dev_link.link_status = 0; 628 } 629 bnxt_set_hwrm_link_config(bp, false); 630 bnxt_hwrm_port_clr_stats(bp); 631 bnxt_shutdown_nic(bp); 632 bp->dev_stopped = 1; 633 } 634 635 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 636 { 637 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 638 639 if (bp->dev_stopped == 0) 640 bnxt_dev_stop_op(eth_dev); 641 642 bnxt_free_tx_mbufs(bp); 643 bnxt_free_rx_mbufs(bp); 644 bnxt_free_mem(bp); 645 if (eth_dev->data->mac_addrs != NULL) { 646 rte_free(eth_dev->data->mac_addrs); 647 eth_dev->data->mac_addrs = NULL; 648 } 649 if (bp->grp_info != NULL) { 650 rte_free(bp->grp_info); 651 bp->grp_info = NULL; 652 } 653 } 654 655 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 656 uint32_t index) 657 { 658 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 659 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 660 struct bnxt_vnic_info *vnic; 661 struct bnxt_filter_info *filter, *temp_filter; 662 uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS); 663 uint32_t i; 664 665 /* 666 * Loop through all VNICs from the specified filter flow pools to 667 * remove the corresponding MAC addr filter 668 */ 669 for (i = 0; i < pool; i++) { 670 if (!(pool_mask & (1ULL << i))) 671 continue; 672 673 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 674 filter = STAILQ_FIRST(&vnic->filter); 675 while (filter) { 676 temp_filter = STAILQ_NEXT(filter, next); 677 if (filter->mac_index == index) { 678 STAILQ_REMOVE(&vnic->filter, filter, 679 bnxt_filter_info, next); 680 bnxt_hwrm_clear_l2_filter(bp, filter); 681 filter->mac_index = INVALID_MAC_INDEX; 682 memset(&filter->l2_addr, 0, 683 ETHER_ADDR_LEN); 684 STAILQ_INSERT_TAIL( 685 &bp->free_filter_list, 686 filter, next); 687 } 688 filter = temp_filter; 689 } 690 } 691 } 692 } 693 694 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 695 struct ether_addr *mac_addr, 696 uint32_t index, uint32_t pool) 697 { 698 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 699 struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]); 700 struct bnxt_filter_info *filter; 701 702 if (BNXT_VF(bp)) { 703 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n"); 704 return -ENOTSUP; 705 } 706 707 if (!vnic) { 708 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool); 709 return -EINVAL; 710 } 711 /* Attach requested MAC address to the new l2_filter */ 712 STAILQ_FOREACH(filter, &vnic->filter, next) { 713 if (filter->mac_index == index) { 714 RTE_LOG(ERR, PMD, 715 "MAC addr already existed for pool %d\n", pool); 716 return -EINVAL; 717 } 718 } 719 filter = bnxt_alloc_filter(bp); 720 if (!filter) { 721 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); 722 return -ENODEV; 723 } 724 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 725 filter->mac_index = index; 726 memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); 727 return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 728 } 729 730 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 731 { 732 int rc = 0; 733 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 734 struct rte_eth_link new; 735 unsigned int cnt = BNXT_LINK_WAIT_CNT; 736 737 memset(&new, 0, sizeof(new)); 738 do { 739 /* Retrieve link info from hardware */ 740 rc = bnxt_get_hwrm_link_config(bp, &new); 741 if (rc) { 742 new.link_speed = ETH_LINK_SPEED_100M; 743 new.link_duplex = ETH_LINK_FULL_DUPLEX; 744 RTE_LOG(ERR, PMD, 745 "Failed to retrieve link rc = 0x%x!\n", rc); 746 goto out; 747 } 748 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 749 750 if (!wait_to_complete) 751 break; 752 } while (!new.link_status && cnt--); 753 754 out: 755 /* Timed out or success */ 756 if (new.link_status != eth_dev->data->dev_link.link_status || 757 new.link_speed != eth_dev->data->dev_link.link_speed) { 758 memcpy(ð_dev->data->dev_link, &new, 759 sizeof(struct rte_eth_link)); 760 bnxt_print_link_info(eth_dev); 761 } 762 763 return rc; 764 } 765 766 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 767 { 768 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 769 struct bnxt_vnic_info *vnic; 770 771 if (bp->vnic_info == NULL) 772 return; 773 774 vnic = &bp->vnic_info[0]; 775 776 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 777 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 778 } 779 780 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 781 { 782 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 783 struct bnxt_vnic_info *vnic; 784 785 if (bp->vnic_info == NULL) 786 return; 787 788 vnic = &bp->vnic_info[0]; 789 790 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 791 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 792 } 793 794 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 795 { 796 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 797 struct bnxt_vnic_info *vnic; 798 799 if (bp->vnic_info == NULL) 800 return; 801 802 vnic = &bp->vnic_info[0]; 803 804 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 805 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 806 } 807 808 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 809 { 810 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 811 struct bnxt_vnic_info *vnic; 812 813 if (bp->vnic_info == NULL) 814 return; 815 816 vnic = &bp->vnic_info[0]; 817 818 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 819 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 820 } 821 822 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 823 struct rte_eth_rss_reta_entry64 *reta_conf, 824 uint16_t reta_size) 825 { 826 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 827 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 828 struct bnxt_vnic_info *vnic; 829 int i; 830 831 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 832 return -EINVAL; 833 834 if (reta_size != HW_HASH_INDEX_SIZE) { 835 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 836 "(%d) must equal the size supported by the hardware " 837 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 838 return -EINVAL; 839 } 840 /* Update the RSS VNIC(s) */ 841 for (i = 0; i < MAX_FF_POOLS; i++) { 842 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 843 memcpy(vnic->rss_table, reta_conf, reta_size); 844 845 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 846 } 847 } 848 return 0; 849 } 850 851 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 852 struct rte_eth_rss_reta_entry64 *reta_conf, 853 uint16_t reta_size) 854 { 855 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 856 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 857 struct rte_intr_handle *intr_handle 858 = &bp->pdev->intr_handle; 859 860 /* Retrieve from the default VNIC */ 861 if (!vnic) 862 return -EINVAL; 863 if (!vnic->rss_table) 864 return -EINVAL; 865 866 if (reta_size != HW_HASH_INDEX_SIZE) { 867 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 868 "(%d) must equal the size supported by the hardware " 869 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 870 return -EINVAL; 871 } 872 /* EW - need to revisit here copying from uint64_t to uint16_t */ 873 memcpy(reta_conf, vnic->rss_table, reta_size); 874 875 if (rte_intr_allow_others(intr_handle)) { 876 if (eth_dev->data->dev_conf.intr_conf.lsc != 0) 877 bnxt_dev_lsc_intr_setup(eth_dev); 878 } 879 880 return 0; 881 } 882 883 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 884 struct rte_eth_rss_conf *rss_conf) 885 { 886 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 887 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 888 struct bnxt_vnic_info *vnic; 889 uint16_t hash_type = 0; 890 int i; 891 892 /* 893 * If RSS enablement were different than dev_configure, 894 * then return -EINVAL 895 */ 896 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 897 if (!rss_conf->rss_hf) 898 RTE_LOG(ERR, PMD, "Hash type NONE\n"); 899 } else { 900 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 901 return -EINVAL; 902 } 903 904 bp->flags |= BNXT_FLAG_UPDATE_HASH; 905 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); 906 907 if (rss_conf->rss_hf & ETH_RSS_IPV4) 908 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 909 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 910 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 911 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 912 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 913 if (rss_conf->rss_hf & ETH_RSS_IPV6) 914 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 915 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 916 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 917 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 918 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 919 920 /* Update the RSS VNIC(s) */ 921 for (i = 0; i < MAX_FF_POOLS; i++) { 922 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 923 vnic->hash_type = hash_type; 924 925 /* 926 * Use the supplied key if the key length is 927 * acceptable and the rss_key is not NULL 928 */ 929 if (rss_conf->rss_key && 930 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) 931 memcpy(vnic->rss_hash_key, rss_conf->rss_key, 932 rss_conf->rss_key_len); 933 934 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 935 } 936 } 937 return 0; 938 } 939 940 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 941 struct rte_eth_rss_conf *rss_conf) 942 { 943 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 944 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 945 int len; 946 uint32_t hash_types; 947 948 /* RSS configuration is the same for all VNICs */ 949 if (vnic && vnic->rss_hash_key) { 950 if (rss_conf->rss_key) { 951 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 952 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 953 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 954 } 955 956 hash_types = vnic->hash_type; 957 rss_conf->rss_hf = 0; 958 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 959 rss_conf->rss_hf |= ETH_RSS_IPV4; 960 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 961 } 962 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 963 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 964 hash_types &= 965 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 966 } 967 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 968 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 969 hash_types &= 970 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 971 } 972 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 973 rss_conf->rss_hf |= ETH_RSS_IPV6; 974 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 975 } 976 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 977 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 978 hash_types &= 979 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 980 } 981 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 982 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 983 hash_types &= 984 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 985 } 986 if (hash_types) { 987 RTE_LOG(ERR, PMD, 988 "Unknwon RSS config from firmware (%08x), RSS disabled", 989 vnic->hash_type); 990 return -ENOTSUP; 991 } 992 } else { 993 rss_conf->rss_hf = 0; 994 } 995 return 0; 996 } 997 998 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 999 struct rte_eth_fc_conf *fc_conf) 1000 { 1001 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1002 struct rte_eth_link link_info; 1003 int rc; 1004 1005 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1006 if (rc) 1007 return rc; 1008 1009 memset(fc_conf, 0, sizeof(*fc_conf)); 1010 if (bp->link_info.auto_pause) 1011 fc_conf->autoneg = 1; 1012 switch (bp->link_info.pause) { 1013 case 0: 1014 fc_conf->mode = RTE_FC_NONE; 1015 break; 1016 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 1017 fc_conf->mode = RTE_FC_TX_PAUSE; 1018 break; 1019 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 1020 fc_conf->mode = RTE_FC_RX_PAUSE; 1021 break; 1022 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 1023 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 1024 fc_conf->mode = RTE_FC_FULL; 1025 break; 1026 } 1027 return 0; 1028 } 1029 1030 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 1031 struct rte_eth_fc_conf *fc_conf) 1032 { 1033 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1034 1035 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1036 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n"); 1037 return -ENOTSUP; 1038 } 1039 1040 switch (fc_conf->mode) { 1041 case RTE_FC_NONE: 1042 bp->link_info.auto_pause = 0; 1043 bp->link_info.force_pause = 0; 1044 break; 1045 case RTE_FC_RX_PAUSE: 1046 if (fc_conf->autoneg) { 1047 bp->link_info.auto_pause = 1048 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1049 bp->link_info.force_pause = 0; 1050 } else { 1051 bp->link_info.auto_pause = 0; 1052 bp->link_info.force_pause = 1053 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1054 } 1055 break; 1056 case RTE_FC_TX_PAUSE: 1057 if (fc_conf->autoneg) { 1058 bp->link_info.auto_pause = 1059 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 1060 bp->link_info.force_pause = 0; 1061 } else { 1062 bp->link_info.auto_pause = 0; 1063 bp->link_info.force_pause = 1064 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 1065 } 1066 break; 1067 case RTE_FC_FULL: 1068 if (fc_conf->autoneg) { 1069 bp->link_info.auto_pause = 1070 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 1071 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1072 bp->link_info.force_pause = 0; 1073 } else { 1074 bp->link_info.auto_pause = 0; 1075 bp->link_info.force_pause = 1076 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 1077 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1078 } 1079 break; 1080 } 1081 return bnxt_set_hwrm_link_config(bp, true); 1082 } 1083 1084 /* Add UDP tunneling port */ 1085 static int 1086 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 1087 struct rte_eth_udp_tunnel *udp_tunnel) 1088 { 1089 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1090 uint16_t tunnel_type = 0; 1091 int rc = 0; 1092 1093 switch (udp_tunnel->prot_type) { 1094 case RTE_TUNNEL_TYPE_VXLAN: 1095 if (bp->vxlan_port_cnt) { 1096 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1097 udp_tunnel->udp_port); 1098 if (bp->vxlan_port != udp_tunnel->udp_port) { 1099 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1100 return -ENOSPC; 1101 } 1102 bp->vxlan_port_cnt++; 1103 return 0; 1104 } 1105 tunnel_type = 1106 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 1107 bp->vxlan_port_cnt++; 1108 break; 1109 case RTE_TUNNEL_TYPE_GENEVE: 1110 if (bp->geneve_port_cnt) { 1111 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1112 udp_tunnel->udp_port); 1113 if (bp->geneve_port != udp_tunnel->udp_port) { 1114 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1115 return -ENOSPC; 1116 } 1117 bp->geneve_port_cnt++; 1118 return 0; 1119 } 1120 tunnel_type = 1121 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 1122 bp->geneve_port_cnt++; 1123 break; 1124 default: 1125 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1126 return -ENOTSUP; 1127 } 1128 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 1129 tunnel_type); 1130 return rc; 1131 } 1132 1133 static int 1134 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 1135 struct rte_eth_udp_tunnel *udp_tunnel) 1136 { 1137 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1138 uint16_t tunnel_type = 0; 1139 uint16_t port = 0; 1140 int rc = 0; 1141 1142 switch (udp_tunnel->prot_type) { 1143 case RTE_TUNNEL_TYPE_VXLAN: 1144 if (!bp->vxlan_port_cnt) { 1145 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1146 return -EINVAL; 1147 } 1148 if (bp->vxlan_port != udp_tunnel->udp_port) { 1149 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1150 udp_tunnel->udp_port, bp->vxlan_port); 1151 return -EINVAL; 1152 } 1153 if (--bp->vxlan_port_cnt) 1154 return 0; 1155 1156 tunnel_type = 1157 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 1158 port = bp->vxlan_fw_dst_port_id; 1159 break; 1160 case RTE_TUNNEL_TYPE_GENEVE: 1161 if (!bp->geneve_port_cnt) { 1162 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1163 return -EINVAL; 1164 } 1165 if (bp->geneve_port != udp_tunnel->udp_port) { 1166 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1167 udp_tunnel->udp_port, bp->geneve_port); 1168 return -EINVAL; 1169 } 1170 if (--bp->geneve_port_cnt) 1171 return 0; 1172 1173 tunnel_type = 1174 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 1175 port = bp->geneve_fw_dst_port_id; 1176 break; 1177 default: 1178 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1179 return -ENOTSUP; 1180 } 1181 1182 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 1183 if (!rc) { 1184 if (tunnel_type == 1185 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 1186 bp->vxlan_port = 0; 1187 if (tunnel_type == 1188 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 1189 bp->geneve_port = 0; 1190 } 1191 return rc; 1192 } 1193 1194 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1195 { 1196 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1197 struct bnxt_vnic_info *vnic; 1198 unsigned int i; 1199 int rc = 0; 1200 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1201 1202 /* Cycle through all VNICs */ 1203 for (i = 0; i < bp->nr_vnics; i++) { 1204 /* 1205 * For each VNIC and each associated filter(s) 1206 * if VLAN exists && VLAN matches vlan_id 1207 * remove the MAC+VLAN filter 1208 * add a new MAC only filter 1209 * else 1210 * VLAN filter doesn't exist, just skip and continue 1211 */ 1212 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1213 filter = STAILQ_FIRST(&vnic->filter); 1214 while (filter) { 1215 temp_filter = STAILQ_NEXT(filter, next); 1216 1217 if (filter->enables & chk && 1218 filter->l2_ovlan == vlan_id) { 1219 /* Must delete the filter */ 1220 STAILQ_REMOVE(&vnic->filter, filter, 1221 bnxt_filter_info, next); 1222 bnxt_hwrm_clear_l2_filter(bp, filter); 1223 STAILQ_INSERT_TAIL( 1224 &bp->free_filter_list, 1225 filter, next); 1226 1227 /* 1228 * Need to examine to see if the MAC 1229 * filter already existed or not before 1230 * allocating a new one 1231 */ 1232 1233 new_filter = bnxt_alloc_filter(bp); 1234 if (!new_filter) { 1235 RTE_LOG(ERR, PMD, 1236 "MAC/VLAN filter alloc failed\n"); 1237 rc = -ENOMEM; 1238 goto exit; 1239 } 1240 STAILQ_INSERT_TAIL(&vnic->filter, 1241 new_filter, next); 1242 /* Inherit MAC from previous filter */ 1243 new_filter->mac_index = 1244 filter->mac_index; 1245 memcpy(new_filter->l2_addr, 1246 filter->l2_addr, ETHER_ADDR_LEN); 1247 /* MAC only filter */ 1248 rc = bnxt_hwrm_set_l2_filter(bp, 1249 vnic->fw_vnic_id, 1250 new_filter); 1251 if (rc) 1252 goto exit; 1253 RTE_LOG(INFO, PMD, 1254 "Del Vlan filter for %d\n", 1255 vlan_id); 1256 } 1257 filter = temp_filter; 1258 } 1259 } 1260 } 1261 exit: 1262 return rc; 1263 } 1264 1265 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1266 { 1267 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1268 struct bnxt_vnic_info *vnic; 1269 unsigned int i; 1270 int rc = 0; 1271 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN | 1272 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK; 1273 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1274 1275 /* Cycle through all VNICs */ 1276 for (i = 0; i < bp->nr_vnics; i++) { 1277 /* 1278 * For each VNIC and each associated filter(s) 1279 * if VLAN exists: 1280 * if VLAN matches vlan_id 1281 * VLAN filter already exists, just skip and continue 1282 * else 1283 * add a new MAC+VLAN filter 1284 * else 1285 * Remove the old MAC only filter 1286 * Add a new MAC+VLAN filter 1287 */ 1288 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1289 filter = STAILQ_FIRST(&vnic->filter); 1290 while (filter) { 1291 temp_filter = STAILQ_NEXT(filter, next); 1292 1293 if (filter->enables & chk) { 1294 if (filter->l2_ovlan == vlan_id) 1295 goto cont; 1296 } else { 1297 /* Must delete the MAC filter */ 1298 STAILQ_REMOVE(&vnic->filter, filter, 1299 bnxt_filter_info, next); 1300 bnxt_hwrm_clear_l2_filter(bp, filter); 1301 filter->l2_ovlan = 0; 1302 STAILQ_INSERT_TAIL( 1303 &bp->free_filter_list, 1304 filter, next); 1305 } 1306 new_filter = bnxt_alloc_filter(bp); 1307 if (!new_filter) { 1308 RTE_LOG(ERR, PMD, 1309 "MAC/VLAN filter alloc failed\n"); 1310 rc = -ENOMEM; 1311 goto exit; 1312 } 1313 STAILQ_INSERT_TAIL(&vnic->filter, new_filter, 1314 next); 1315 /* Inherit MAC from the previous filter */ 1316 new_filter->mac_index = filter->mac_index; 1317 memcpy(new_filter->l2_addr, filter->l2_addr, 1318 ETHER_ADDR_LEN); 1319 /* MAC + VLAN ID filter */ 1320 new_filter->l2_ovlan = vlan_id; 1321 new_filter->l2_ovlan_mask = 0xF000; 1322 new_filter->enables |= en; 1323 rc = bnxt_hwrm_set_l2_filter(bp, 1324 vnic->fw_vnic_id, 1325 new_filter); 1326 if (rc) 1327 goto exit; 1328 RTE_LOG(INFO, PMD, 1329 "Added Vlan filter for %d\n", vlan_id); 1330 cont: 1331 filter = temp_filter; 1332 } 1333 } 1334 } 1335 exit: 1336 return rc; 1337 } 1338 1339 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 1340 uint16_t vlan_id, int on) 1341 { 1342 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1343 1344 /* These operations apply to ALL existing MAC/VLAN filters */ 1345 if (on) 1346 return bnxt_add_vlan_filter(bp, vlan_id); 1347 else 1348 return bnxt_del_vlan_filter(bp, vlan_id); 1349 } 1350 1351 static int 1352 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 1353 { 1354 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1355 unsigned int i; 1356 1357 if (mask & ETH_VLAN_FILTER_MASK) { 1358 if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { 1359 /* Remove any VLAN filters programmed */ 1360 for (i = 0; i < 4095; i++) 1361 bnxt_del_vlan_filter(bp, i); 1362 } 1363 RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n", 1364 dev->data->dev_conf.rxmode.hw_vlan_filter); 1365 } 1366 1367 if (mask & ETH_VLAN_STRIP_MASK) { 1368 /* Enable or disable VLAN stripping */ 1369 for (i = 0; i < bp->nr_vnics; i++) { 1370 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1371 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1372 vnic->vlan_strip = true; 1373 else 1374 vnic->vlan_strip = false; 1375 bnxt_hwrm_vnic_cfg(bp, vnic); 1376 } 1377 RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n", 1378 dev->data->dev_conf.rxmode.hw_vlan_strip); 1379 } 1380 1381 if (mask & ETH_VLAN_EXTEND_MASK) 1382 RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n"); 1383 1384 return 0; 1385 } 1386 1387 static void 1388 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) 1389 { 1390 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1391 /* Default Filter is tied to VNIC 0 */ 1392 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1393 struct bnxt_filter_info *filter; 1394 int rc; 1395 1396 if (BNXT_VF(bp)) 1397 return; 1398 1399 memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); 1400 memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 1401 1402 STAILQ_FOREACH(filter, &vnic->filter, next) { 1403 /* Default Filter is at Index 0 */ 1404 if (filter->mac_index != 0) 1405 continue; 1406 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 1407 if (rc) 1408 break; 1409 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); 1410 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); 1411 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 1412 filter->enables |= 1413 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 1414 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 1415 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1416 if (rc) 1417 break; 1418 filter->mac_index = 0; 1419 RTE_LOG(DEBUG, PMD, "Set MAC addr\n"); 1420 } 1421 } 1422 1423 static int 1424 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 1425 struct ether_addr *mc_addr_set, 1426 uint32_t nb_mc_addr) 1427 { 1428 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1429 char *mc_addr_list = (char *)mc_addr_set; 1430 struct bnxt_vnic_info *vnic; 1431 uint32_t off = 0, i = 0; 1432 1433 vnic = &bp->vnic_info[0]; 1434 1435 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 1436 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1437 goto allmulti; 1438 } 1439 1440 /* TODO Check for Duplicate mcast addresses */ 1441 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1442 for (i = 0; i < nb_mc_addr; i++) { 1443 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN); 1444 off += ETHER_ADDR_LEN; 1445 } 1446 1447 vnic->mc_addr_cnt = i; 1448 1449 allmulti: 1450 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1451 } 1452 1453 static int 1454 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1455 { 1456 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1457 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 1458 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 1459 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 1460 int ret; 1461 1462 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 1463 fw_major, fw_minor, fw_updt); 1464 1465 ret += 1; /* add the size of '\0' */ 1466 if (fw_size < (uint32_t)ret) 1467 return ret; 1468 else 1469 return 0; 1470 } 1471 1472 static void 1473 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1474 struct rte_eth_rxq_info *qinfo) 1475 { 1476 struct bnxt_rx_queue *rxq; 1477 1478 rxq = dev->data->rx_queues[queue_id]; 1479 1480 qinfo->mp = rxq->mb_pool; 1481 qinfo->scattered_rx = dev->data->scattered_rx; 1482 qinfo->nb_desc = rxq->nb_rx_desc; 1483 1484 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1485 qinfo->conf.rx_drop_en = 0; 1486 qinfo->conf.rx_deferred_start = 0; 1487 } 1488 1489 static void 1490 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1491 struct rte_eth_txq_info *qinfo) 1492 { 1493 struct bnxt_tx_queue *txq; 1494 1495 txq = dev->data->tx_queues[queue_id]; 1496 1497 qinfo->nb_desc = txq->nb_tx_desc; 1498 1499 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 1500 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 1501 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 1502 1503 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1504 qinfo->conf.tx_rs_thresh = 0; 1505 qinfo->conf.txq_flags = txq->txq_flags; 1506 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1507 } 1508 1509 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 1510 { 1511 struct bnxt *bp = eth_dev->data->dev_private; 1512 struct rte_eth_dev_info dev_info; 1513 uint32_t max_dev_mtu; 1514 uint32_t rc = 0; 1515 uint32_t i; 1516 1517 bnxt_dev_info_get_op(eth_dev, &dev_info); 1518 max_dev_mtu = dev_info.max_rx_pktlen - 1519 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2; 1520 1521 if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) { 1522 RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n", 1523 ETHER_MIN_MTU, max_dev_mtu); 1524 return -EINVAL; 1525 } 1526 1527 1528 if (new_mtu > ETHER_MTU) { 1529 bp->flags |= BNXT_FLAG_JUMBO; 1530 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 1531 } else { 1532 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 1533 bp->flags &= ~BNXT_FLAG_JUMBO; 1534 } 1535 1536 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = 1537 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1538 1539 eth_dev->data->mtu = new_mtu; 1540 RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu); 1541 1542 for (i = 0; i < bp->nr_vnics; i++) { 1543 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1544 1545 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + 1546 ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1547 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 1548 if (rc) 1549 break; 1550 1551 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 1552 if (rc) 1553 return rc; 1554 } 1555 1556 return rc; 1557 } 1558 1559 static int 1560 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 1561 { 1562 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1563 uint16_t vlan = bp->vlan; 1564 int rc; 1565 1566 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1567 RTE_LOG(ERR, PMD, 1568 "PVID cannot be modified for this function\n"); 1569 return -ENOTSUP; 1570 } 1571 bp->vlan = on ? pvid : 0; 1572 1573 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 1574 if (rc) 1575 bp->vlan = vlan; 1576 return rc; 1577 } 1578 1579 static int 1580 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 1581 { 1582 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1583 1584 return bnxt_hwrm_port_led_cfg(bp, true); 1585 } 1586 1587 static int 1588 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 1589 { 1590 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1591 1592 return bnxt_hwrm_port_led_cfg(bp, false); 1593 } 1594 1595 static uint32_t 1596 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1597 { 1598 uint32_t desc = 0, raw_cons = 0, cons; 1599 struct bnxt_cp_ring_info *cpr; 1600 struct bnxt_rx_queue *rxq; 1601 struct rx_pkt_cmpl *rxcmp; 1602 uint16_t cmp_type; 1603 uint8_t cmp = 1; 1604 bool valid; 1605 1606 rxq = dev->data->rx_queues[rx_queue_id]; 1607 cpr = rxq->cp_ring; 1608 valid = cpr->valid; 1609 1610 while (raw_cons < rxq->nb_rx_desc) { 1611 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1612 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1613 1614 if (!CMPL_VALID(rxcmp, valid)) 1615 goto nothing_to_do; 1616 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); 1617 cmp_type = CMP_TYPE(rxcmp); 1618 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1619 cmp = (rte_le_to_cpu_32( 1620 ((struct rx_tpa_end_cmpl *) 1621 (rxcmp))->agg_bufs_v1) & 1622 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 1623 RX_TPA_END_CMPL_AGG_BUFS_SFT; 1624 desc++; 1625 } else if (cmp_type == 0x11) { 1626 desc++; 1627 cmp = (rxcmp->agg_bufs_v1 & 1628 RX_PKT_CMPL_AGG_BUFS_MASK) >> 1629 RX_PKT_CMPL_AGG_BUFS_SFT; 1630 } else { 1631 cmp = 1; 1632 } 1633 nothing_to_do: 1634 raw_cons += cmp ? cmp : 2; 1635 } 1636 1637 return desc; 1638 } 1639 1640 static int 1641 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 1642 { 1643 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 1644 struct bnxt_rx_ring_info *rxr; 1645 struct bnxt_cp_ring_info *cpr; 1646 struct bnxt_sw_rx_bd *rx_buf; 1647 struct rx_pkt_cmpl *rxcmp; 1648 uint32_t cons, cp_cons; 1649 1650 if (!rxq) 1651 return -EINVAL; 1652 1653 cpr = rxq->cp_ring; 1654 rxr = rxq->rx_ring; 1655 1656 if (offset >= rxq->nb_rx_desc) 1657 return -EINVAL; 1658 1659 cons = RING_CMP(cpr->cp_ring_struct, offset); 1660 cp_cons = cpr->cp_raw_cons; 1661 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1662 1663 if (cons > cp_cons) { 1664 if (CMPL_VALID(rxcmp, cpr->valid)) 1665 return RTE_ETH_RX_DESC_DONE; 1666 } else { 1667 if (CMPL_VALID(rxcmp, !cpr->valid)) 1668 return RTE_ETH_RX_DESC_DONE; 1669 } 1670 rx_buf = &rxr->rx_buf_ring[cons]; 1671 if (rx_buf->mbuf == NULL) 1672 return RTE_ETH_RX_DESC_UNAVAIL; 1673 1674 1675 return RTE_ETH_RX_DESC_AVAIL; 1676 } 1677 1678 static int 1679 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 1680 { 1681 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 1682 struct bnxt_tx_ring_info *txr; 1683 struct bnxt_cp_ring_info *cpr; 1684 struct bnxt_sw_tx_bd *tx_buf; 1685 struct tx_pkt_cmpl *txcmp; 1686 uint32_t cons, cp_cons; 1687 1688 if (!txq) 1689 return -EINVAL; 1690 1691 cpr = txq->cp_ring; 1692 txr = txq->tx_ring; 1693 1694 if (offset >= txq->nb_tx_desc) 1695 return -EINVAL; 1696 1697 cons = RING_CMP(cpr->cp_ring_struct, offset); 1698 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1699 cp_cons = cpr->cp_raw_cons; 1700 1701 if (cons > cp_cons) { 1702 if (CMPL_VALID(txcmp, cpr->valid)) 1703 return RTE_ETH_TX_DESC_UNAVAIL; 1704 } else { 1705 if (CMPL_VALID(txcmp, !cpr->valid)) 1706 return RTE_ETH_TX_DESC_UNAVAIL; 1707 } 1708 tx_buf = &txr->tx_buf_ring[cons]; 1709 if (tx_buf->mbuf == NULL) 1710 return RTE_ETH_TX_DESC_DONE; 1711 1712 return RTE_ETH_TX_DESC_FULL; 1713 } 1714 1715 static struct bnxt_filter_info * 1716 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 1717 struct rte_eth_ethertype_filter *efilter, 1718 struct bnxt_vnic_info *vnic0, 1719 struct bnxt_vnic_info *vnic, 1720 int *ret) 1721 { 1722 struct bnxt_filter_info *mfilter = NULL; 1723 int match = 0; 1724 *ret = 0; 1725 1726 if (efilter->ether_type != ETHER_TYPE_IPv4 && 1727 efilter->ether_type != ETHER_TYPE_IPv6) { 1728 RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in" 1729 " ethertype filter.", efilter->ether_type); 1730 *ret = -EINVAL; 1731 goto exit; 1732 } 1733 if (efilter->queue >= bp->rx_nr_rings) { 1734 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1735 *ret = -EINVAL; 1736 goto exit; 1737 } 1738 1739 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1740 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1741 if (vnic == NULL) { 1742 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1743 *ret = -EINVAL; 1744 goto exit; 1745 } 1746 1747 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1748 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 1749 if ((!memcmp(efilter->mac_addr.addr_bytes, 1750 mfilter->l2_addr, ETHER_ADDR_LEN) && 1751 mfilter->flags == 1752 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 1753 mfilter->ethertype == efilter->ether_type)) { 1754 match = 1; 1755 break; 1756 } 1757 } 1758 } else { 1759 STAILQ_FOREACH(mfilter, &vnic->filter, next) 1760 if ((!memcmp(efilter->mac_addr.addr_bytes, 1761 mfilter->l2_addr, ETHER_ADDR_LEN) && 1762 mfilter->ethertype == efilter->ether_type && 1763 mfilter->flags == 1764 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 1765 match = 1; 1766 break; 1767 } 1768 } 1769 1770 if (match) 1771 *ret = -EEXIST; 1772 1773 exit: 1774 return mfilter; 1775 } 1776 1777 static int 1778 bnxt_ethertype_filter(struct rte_eth_dev *dev, 1779 enum rte_filter_op filter_op, 1780 void *arg) 1781 { 1782 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1783 struct rte_eth_ethertype_filter *efilter = 1784 (struct rte_eth_ethertype_filter *)arg; 1785 struct bnxt_filter_info *bfilter, *filter1; 1786 struct bnxt_vnic_info *vnic, *vnic0; 1787 int ret; 1788 1789 if (filter_op == RTE_ETH_FILTER_NOP) 1790 return 0; 1791 1792 if (arg == NULL) { 1793 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 1794 filter_op); 1795 return -EINVAL; 1796 } 1797 1798 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1799 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1800 1801 switch (filter_op) { 1802 case RTE_ETH_FILTER_ADD: 1803 bnxt_match_and_validate_ether_filter(bp, efilter, 1804 vnic0, vnic, &ret); 1805 if (ret < 0) 1806 return ret; 1807 1808 bfilter = bnxt_get_unused_filter(bp); 1809 if (bfilter == NULL) { 1810 RTE_LOG(ERR, PMD, 1811 "Not enough resources for a new filter.\n"); 1812 return -ENOMEM; 1813 } 1814 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 1815 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 1816 ETHER_ADDR_LEN); 1817 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 1818 ETHER_ADDR_LEN); 1819 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 1820 bfilter->ethertype = efilter->ether_type; 1821 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 1822 1823 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 1824 if (filter1 == NULL) { 1825 ret = -1; 1826 goto cleanup; 1827 } 1828 bfilter->enables |= 1829 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 1830 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1831 1832 bfilter->dst_id = vnic->fw_vnic_id; 1833 1834 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1835 bfilter->flags = 1836 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1837 } 1838 1839 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 1840 if (ret) 1841 goto cleanup; 1842 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 1843 break; 1844 case RTE_ETH_FILTER_DELETE: 1845 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 1846 vnic0, vnic, &ret); 1847 if (ret == -EEXIST) { 1848 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 1849 1850 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 1851 next); 1852 bnxt_free_filter(bp, filter1); 1853 } else if (ret == 0) { 1854 RTE_LOG(ERR, PMD, "No matching filter found\n"); 1855 } 1856 break; 1857 default: 1858 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 1859 ret = -EINVAL; 1860 goto error; 1861 } 1862 return ret; 1863 cleanup: 1864 bnxt_free_filter(bp, bfilter); 1865 error: 1866 return ret; 1867 } 1868 1869 static inline int 1870 parse_ntuple_filter(struct bnxt *bp, 1871 struct rte_eth_ntuple_filter *nfilter, 1872 struct bnxt_filter_info *bfilter) 1873 { 1874 uint32_t en = 0; 1875 1876 if (nfilter->queue >= bp->rx_nr_rings) { 1877 RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue); 1878 return -EINVAL; 1879 } 1880 1881 switch (nfilter->dst_port_mask) { 1882 case UINT16_MAX: 1883 bfilter->dst_port_mask = -1; 1884 bfilter->dst_port = nfilter->dst_port; 1885 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 1886 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 1887 break; 1888 default: 1889 RTE_LOG(ERR, PMD, "invalid dst_port mask."); 1890 return -EINVAL; 1891 } 1892 1893 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 1894 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1895 1896 switch (nfilter->proto_mask) { 1897 case UINT8_MAX: 1898 if (nfilter->proto == 17) /* IPPROTO_UDP */ 1899 bfilter->ip_protocol = 17; 1900 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 1901 bfilter->ip_protocol = 6; 1902 else 1903 return -EINVAL; 1904 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1905 break; 1906 default: 1907 RTE_LOG(ERR, PMD, "invalid protocol mask."); 1908 return -EINVAL; 1909 } 1910 1911 switch (nfilter->dst_ip_mask) { 1912 case UINT32_MAX: 1913 bfilter->dst_ipaddr_mask[0] = -1; 1914 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 1915 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 1916 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 1917 break; 1918 default: 1919 RTE_LOG(ERR, PMD, "invalid dst_ip mask."); 1920 return -EINVAL; 1921 } 1922 1923 switch (nfilter->src_ip_mask) { 1924 case UINT32_MAX: 1925 bfilter->src_ipaddr_mask[0] = -1; 1926 bfilter->src_ipaddr[0] = nfilter->src_ip; 1927 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 1928 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 1929 break; 1930 default: 1931 RTE_LOG(ERR, PMD, "invalid src_ip mask."); 1932 return -EINVAL; 1933 } 1934 1935 switch (nfilter->src_port_mask) { 1936 case UINT16_MAX: 1937 bfilter->src_port_mask = -1; 1938 bfilter->src_port = nfilter->src_port; 1939 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 1940 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 1941 break; 1942 default: 1943 RTE_LOG(ERR, PMD, "invalid src_port mask."); 1944 return -EINVAL; 1945 } 1946 1947 //TODO Priority 1948 //nfilter->priority = (uint8_t)filter->priority; 1949 1950 bfilter->enables = en; 1951 return 0; 1952 } 1953 1954 static struct bnxt_filter_info* 1955 bnxt_match_ntuple_filter(struct bnxt *bp, 1956 struct bnxt_filter_info *bfilter) 1957 { 1958 struct bnxt_filter_info *mfilter = NULL; 1959 int i; 1960 1961 for (i = bp->nr_vnics - 1; i >= 0; i--) { 1962 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1963 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 1964 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 1965 bfilter->src_ipaddr_mask[0] == 1966 mfilter->src_ipaddr_mask[0] && 1967 bfilter->src_port == mfilter->src_port && 1968 bfilter->src_port_mask == mfilter->src_port_mask && 1969 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 1970 bfilter->dst_ipaddr_mask[0] == 1971 mfilter->dst_ipaddr_mask[0] && 1972 bfilter->dst_port == mfilter->dst_port && 1973 bfilter->dst_port_mask == mfilter->dst_port_mask && 1974 bfilter->flags == mfilter->flags && 1975 bfilter->enables == mfilter->enables) 1976 return mfilter; 1977 } 1978 } 1979 return NULL; 1980 } 1981 1982 static int 1983 bnxt_cfg_ntuple_filter(struct bnxt *bp, 1984 struct rte_eth_ntuple_filter *nfilter, 1985 enum rte_filter_op filter_op) 1986 { 1987 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 1988 struct bnxt_vnic_info *vnic, *vnic0; 1989 int ret; 1990 1991 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 1992 RTE_LOG(ERR, PMD, "only 5tuple is supported."); 1993 return -EINVAL; 1994 } 1995 1996 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 1997 RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n"); 1998 return -EINVAL; 1999 } 2000 2001 bfilter = bnxt_get_unused_filter(bp); 2002 if (bfilter == NULL) { 2003 RTE_LOG(ERR, PMD, 2004 "Not enough resources for a new filter.\n"); 2005 return -ENOMEM; 2006 } 2007 ret = parse_ntuple_filter(bp, nfilter, bfilter); 2008 if (ret < 0) 2009 goto free_filter; 2010 2011 vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]); 2012 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2013 filter1 = STAILQ_FIRST(&vnic0->filter); 2014 if (filter1 == NULL) { 2015 ret = -1; 2016 goto free_filter; 2017 } 2018 2019 bfilter->dst_id = vnic->fw_vnic_id; 2020 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2021 bfilter->enables |= 2022 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2023 bfilter->ethertype = 0x800; 2024 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2025 2026 mfilter = bnxt_match_ntuple_filter(bp, bfilter); 2027 2028 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2029 RTE_LOG(ERR, PMD, "filter exists."); 2030 ret = -EEXIST; 2031 goto free_filter; 2032 } 2033 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2034 RTE_LOG(ERR, PMD, "filter doesn't exist."); 2035 ret = -ENOENT; 2036 goto free_filter; 2037 } 2038 2039 if (filter_op == RTE_ETH_FILTER_ADD) { 2040 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2041 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2042 if (ret) 2043 goto free_filter; 2044 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2045 } else { 2046 if (mfilter == NULL) { 2047 /* This should not happen. But for Coverity! */ 2048 ret = -ENOENT; 2049 goto free_filter; 2050 } 2051 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 2052 2053 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, 2054 next); 2055 bnxt_free_filter(bp, mfilter); 2056 bfilter->fw_l2_filter_id = -1; 2057 bnxt_free_filter(bp, bfilter); 2058 } 2059 2060 return 0; 2061 free_filter: 2062 bfilter->fw_l2_filter_id = -1; 2063 bnxt_free_filter(bp, bfilter); 2064 return ret; 2065 } 2066 2067 static int 2068 bnxt_ntuple_filter(struct rte_eth_dev *dev, 2069 enum rte_filter_op filter_op, 2070 void *arg) 2071 { 2072 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2073 int ret; 2074 2075 if (filter_op == RTE_ETH_FILTER_NOP) 2076 return 0; 2077 2078 if (arg == NULL) { 2079 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 2080 filter_op); 2081 return -EINVAL; 2082 } 2083 2084 switch (filter_op) { 2085 case RTE_ETH_FILTER_ADD: 2086 ret = bnxt_cfg_ntuple_filter(bp, 2087 (struct rte_eth_ntuple_filter *)arg, 2088 filter_op); 2089 break; 2090 case RTE_ETH_FILTER_DELETE: 2091 ret = bnxt_cfg_ntuple_filter(bp, 2092 (struct rte_eth_ntuple_filter *)arg, 2093 filter_op); 2094 break; 2095 default: 2096 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 2097 ret = -EINVAL; 2098 break; 2099 } 2100 return ret; 2101 } 2102 2103 static int 2104 bnxt_parse_fdir_filter(struct bnxt *bp, 2105 struct rte_eth_fdir_filter *fdir, 2106 struct bnxt_filter_info *filter) 2107 { 2108 enum rte_fdir_mode fdir_mode = 2109 bp->eth_dev->data->dev_conf.fdir_conf.mode; 2110 struct bnxt_vnic_info *vnic0, *vnic; 2111 struct bnxt_filter_info *filter1; 2112 uint32_t en = 0; 2113 int i; 2114 2115 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2116 return -EINVAL; 2117 2118 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 2119 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 2120 2121 switch (fdir->input.flow_type) { 2122 case RTE_ETH_FLOW_IPV4: 2123 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2124 /* FALLTHROUGH */ 2125 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 2126 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2127 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 2128 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2129 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 2130 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2131 filter->ip_addr_type = 2132 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2133 filter->src_ipaddr_mask[0] = 0xffffffff; 2134 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2135 filter->dst_ipaddr_mask[0] = 0xffffffff; 2136 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2137 filter->ethertype = 0x800; 2138 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2139 break; 2140 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2141 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 2142 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2143 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 2144 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2145 filter->dst_port_mask = 0xffff; 2146 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2147 filter->src_port_mask = 0xffff; 2148 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2149 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 2150 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2151 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 2152 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2153 filter->ip_protocol = 6; 2154 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2155 filter->ip_addr_type = 2156 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2157 filter->src_ipaddr_mask[0] = 0xffffffff; 2158 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2159 filter->dst_ipaddr_mask[0] = 0xffffffff; 2160 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2161 filter->ethertype = 0x800; 2162 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2163 break; 2164 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2165 filter->src_port = fdir->input.flow.udp4_flow.src_port; 2166 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2167 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 2168 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2169 filter->dst_port_mask = 0xffff; 2170 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2171 filter->src_port_mask = 0xffff; 2172 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2173 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 2174 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2175 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 2176 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2177 filter->ip_protocol = 17; 2178 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2179 filter->ip_addr_type = 2180 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2181 filter->src_ipaddr_mask[0] = 0xffffffff; 2182 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2183 filter->dst_ipaddr_mask[0] = 0xffffffff; 2184 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2185 filter->ethertype = 0x800; 2186 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2187 break; 2188 case RTE_ETH_FLOW_IPV6: 2189 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2190 /* FALLTHROUGH */ 2191 filter->ip_addr_type = 2192 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2193 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 2194 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2195 rte_memcpy(filter->src_ipaddr, 2196 fdir->input.flow.ipv6_flow.src_ip, 16); 2197 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2198 rte_memcpy(filter->dst_ipaddr, 2199 fdir->input.flow.ipv6_flow.dst_ip, 16); 2200 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2201 memset(filter->dst_ipaddr_mask, 0xff, 16); 2202 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2203 memset(filter->src_ipaddr_mask, 0xff, 16); 2204 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2205 filter->ethertype = 0x86dd; 2206 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2207 break; 2208 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2209 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 2210 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2211 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 2212 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2213 filter->dst_port_mask = 0xffff; 2214 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2215 filter->src_port_mask = 0xffff; 2216 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2217 filter->ip_addr_type = 2218 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2219 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 2220 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2221 rte_memcpy(filter->src_ipaddr, 2222 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 2223 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2224 rte_memcpy(filter->dst_ipaddr, 2225 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 2226 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2227 memset(filter->dst_ipaddr_mask, 0xff, 16); 2228 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2229 memset(filter->src_ipaddr_mask, 0xff, 16); 2230 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2231 filter->ethertype = 0x86dd; 2232 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2233 break; 2234 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2235 filter->src_port = fdir->input.flow.udp6_flow.src_port; 2236 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2237 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 2238 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2239 filter->dst_port_mask = 0xffff; 2240 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2241 filter->src_port_mask = 0xffff; 2242 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2243 filter->ip_addr_type = 2244 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2245 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 2246 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2247 rte_memcpy(filter->src_ipaddr, 2248 fdir->input.flow.udp6_flow.ip.src_ip, 16); 2249 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2250 rte_memcpy(filter->dst_ipaddr, 2251 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 2252 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2253 memset(filter->dst_ipaddr_mask, 0xff, 16); 2254 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2255 memset(filter->src_ipaddr_mask, 0xff, 16); 2256 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2257 filter->ethertype = 0x86dd; 2258 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2259 break; 2260 case RTE_ETH_FLOW_L2_PAYLOAD: 2261 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 2262 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2263 break; 2264 case RTE_ETH_FLOW_VXLAN: 2265 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2266 return -EINVAL; 2267 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2268 filter->tunnel_type = 2269 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 2270 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2271 break; 2272 case RTE_ETH_FLOW_NVGRE: 2273 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2274 return -EINVAL; 2275 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2276 filter->tunnel_type = 2277 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 2278 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2279 break; 2280 case RTE_ETH_FLOW_UNKNOWN: 2281 case RTE_ETH_FLOW_RAW: 2282 case RTE_ETH_FLOW_FRAG_IPV4: 2283 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 2284 case RTE_ETH_FLOW_FRAG_IPV6: 2285 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 2286 case RTE_ETH_FLOW_IPV6_EX: 2287 case RTE_ETH_FLOW_IPV6_TCP_EX: 2288 case RTE_ETH_FLOW_IPV6_UDP_EX: 2289 case RTE_ETH_FLOW_GENEVE: 2290 /* FALLTHROUGH */ 2291 default: 2292 return -EINVAL; 2293 } 2294 2295 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2296 vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2297 if (vnic == NULL) { 2298 RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue); 2299 return -EINVAL; 2300 } 2301 2302 2303 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2304 rte_memcpy(filter->dst_macaddr, 2305 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 2306 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2307 } 2308 2309 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 2310 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2311 filter1 = STAILQ_FIRST(&vnic0->filter); 2312 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 2313 } else { 2314 filter->dst_id = vnic->fw_vnic_id; 2315 for (i = 0; i < ETHER_ADDR_LEN; i++) 2316 if (filter->dst_macaddr[i] == 0x00) 2317 filter1 = STAILQ_FIRST(&vnic0->filter); 2318 else 2319 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 2320 } 2321 2322 if (filter1 == NULL) 2323 return -EINVAL; 2324 2325 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2326 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2327 2328 filter->enables = en; 2329 2330 return 0; 2331 } 2332 2333 static struct bnxt_filter_info * 2334 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) 2335 { 2336 struct bnxt_filter_info *mf = NULL; 2337 int i; 2338 2339 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2340 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2341 2342 STAILQ_FOREACH(mf, &vnic->filter, next) { 2343 if (mf->filter_type == nf->filter_type && 2344 mf->flags == nf->flags && 2345 mf->src_port == nf->src_port && 2346 mf->src_port_mask == nf->src_port_mask && 2347 mf->dst_port == nf->dst_port && 2348 mf->dst_port_mask == nf->dst_port_mask && 2349 mf->ip_protocol == nf->ip_protocol && 2350 mf->ip_addr_type == nf->ip_addr_type && 2351 mf->ethertype == nf->ethertype && 2352 mf->vni == nf->vni && 2353 mf->tunnel_type == nf->tunnel_type && 2354 mf->l2_ovlan == nf->l2_ovlan && 2355 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 2356 mf->l2_ivlan == nf->l2_ivlan && 2357 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 2358 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && 2359 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 2360 ETHER_ADDR_LEN) && 2361 !memcmp(mf->src_macaddr, nf->src_macaddr, 2362 ETHER_ADDR_LEN) && 2363 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 2364 ETHER_ADDR_LEN) && 2365 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 2366 sizeof(nf->src_ipaddr)) && 2367 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 2368 sizeof(nf->src_ipaddr_mask)) && 2369 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 2370 sizeof(nf->dst_ipaddr)) && 2371 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 2372 sizeof(nf->dst_ipaddr_mask))) 2373 return mf; 2374 } 2375 } 2376 return NULL; 2377 } 2378 2379 static int 2380 bnxt_fdir_filter(struct rte_eth_dev *dev, 2381 enum rte_filter_op filter_op, 2382 void *arg) 2383 { 2384 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2385 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 2386 struct bnxt_filter_info *filter, *match; 2387 struct bnxt_vnic_info *vnic; 2388 int ret = 0, i; 2389 2390 if (filter_op == RTE_ETH_FILTER_NOP) 2391 return 0; 2392 2393 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 2394 return -EINVAL; 2395 2396 switch (filter_op) { 2397 case RTE_ETH_FILTER_ADD: 2398 case RTE_ETH_FILTER_DELETE: 2399 /* FALLTHROUGH */ 2400 filter = bnxt_get_unused_filter(bp); 2401 if (filter == NULL) { 2402 RTE_LOG(ERR, PMD, 2403 "Not enough resources for a new flow.\n"); 2404 return -ENOMEM; 2405 } 2406 2407 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 2408 if (ret != 0) 2409 goto free_filter; 2410 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2411 2412 match = bnxt_match_fdir(bp, filter); 2413 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2414 RTE_LOG(ERR, PMD, "Flow already exists.\n"); 2415 ret = -EEXIST; 2416 goto free_filter; 2417 } 2418 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2419 RTE_LOG(ERR, PMD, "Flow does not exist.\n"); 2420 ret = -ENOENT; 2421 goto free_filter; 2422 } 2423 2424 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2425 vnic = STAILQ_FIRST(&bp->ff_pool[0]); 2426 else 2427 vnic = 2428 STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2429 2430 if (filter_op == RTE_ETH_FILTER_ADD) { 2431 ret = bnxt_hwrm_set_ntuple_filter(bp, 2432 filter->dst_id, 2433 filter); 2434 if (ret) 2435 goto free_filter; 2436 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2437 } else { 2438 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 2439 STAILQ_REMOVE(&vnic->filter, match, 2440 bnxt_filter_info, next); 2441 bnxt_free_filter(bp, match); 2442 filter->fw_l2_filter_id = -1; 2443 bnxt_free_filter(bp, filter); 2444 } 2445 break; 2446 case RTE_ETH_FILTER_FLUSH: 2447 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2448 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2449 2450 STAILQ_FOREACH(filter, &vnic->filter, next) { 2451 if (filter->filter_type == 2452 HWRM_CFA_NTUPLE_FILTER) { 2453 ret = 2454 bnxt_hwrm_clear_ntuple_filter(bp, 2455 filter); 2456 STAILQ_REMOVE(&vnic->filter, filter, 2457 bnxt_filter_info, next); 2458 } 2459 } 2460 } 2461 return ret; 2462 case RTE_ETH_FILTER_UPDATE: 2463 case RTE_ETH_FILTER_STATS: 2464 case RTE_ETH_FILTER_INFO: 2465 /* FALLTHROUGH */ 2466 RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op); 2467 break; 2468 default: 2469 RTE_LOG(ERR, PMD, "unknown operation %u", filter_op); 2470 ret = -EINVAL; 2471 break; 2472 } 2473 return ret; 2474 2475 free_filter: 2476 filter->fw_l2_filter_id = -1; 2477 bnxt_free_filter(bp, filter); 2478 return ret; 2479 } 2480 2481 static int 2482 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, 2483 enum rte_filter_type filter_type, 2484 enum rte_filter_op filter_op, void *arg) 2485 { 2486 int ret = 0; 2487 2488 switch (filter_type) { 2489 case RTE_ETH_FILTER_TUNNEL: 2490 RTE_LOG(ERR, PMD, 2491 "filter type: %d: To be implemented\n", filter_type); 2492 break; 2493 case RTE_ETH_FILTER_FDIR: 2494 ret = bnxt_fdir_filter(dev, filter_op, arg); 2495 break; 2496 case RTE_ETH_FILTER_NTUPLE: 2497 ret = bnxt_ntuple_filter(dev, filter_op, arg); 2498 break; 2499 case RTE_ETH_FILTER_ETHERTYPE: 2500 ret = bnxt_ethertype_filter(dev, filter_op, arg); 2501 break; 2502 case RTE_ETH_FILTER_GENERIC: 2503 if (filter_op != RTE_ETH_FILTER_GET) 2504 return -EINVAL; 2505 *(const void **)arg = &bnxt_flow_ops; 2506 break; 2507 default: 2508 RTE_LOG(ERR, PMD, 2509 "Filter type (%d) not supported", filter_type); 2510 ret = -EINVAL; 2511 break; 2512 } 2513 return ret; 2514 } 2515 2516 static const uint32_t * 2517 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 2518 { 2519 static const uint32_t ptypes[] = { 2520 RTE_PTYPE_L2_ETHER_VLAN, 2521 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2522 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2523 RTE_PTYPE_L4_ICMP, 2524 RTE_PTYPE_L4_TCP, 2525 RTE_PTYPE_L4_UDP, 2526 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2527 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2528 RTE_PTYPE_INNER_L4_ICMP, 2529 RTE_PTYPE_INNER_L4_TCP, 2530 RTE_PTYPE_INNER_L4_UDP, 2531 RTE_PTYPE_UNKNOWN 2532 }; 2533 2534 if (dev->rx_pkt_burst == bnxt_recv_pkts) 2535 return ptypes; 2536 return NULL; 2537 } 2538 2539 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 2540 int reg_win) 2541 { 2542 uint32_t reg_base = *reg_arr & 0xfffff000; 2543 uint32_t win_off; 2544 int i; 2545 2546 for (i = 0; i < count; i++) { 2547 if ((reg_arr[i] & 0xfffff000) != reg_base) 2548 return -ERANGE; 2549 } 2550 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 2551 rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off)); 2552 return 0; 2553 } 2554 2555 static int bnxt_map_ptp_regs(struct bnxt *bp) 2556 { 2557 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2558 uint32_t *reg_arr; 2559 int rc, i; 2560 2561 reg_arr = ptp->rx_regs; 2562 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 2563 if (rc) 2564 return rc; 2565 2566 reg_arr = ptp->tx_regs; 2567 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 2568 if (rc) 2569 return rc; 2570 2571 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 2572 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 2573 2574 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 2575 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 2576 2577 return 0; 2578 } 2579 2580 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 2581 { 2582 rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 + 2583 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16)); 2584 rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 + 2585 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20)); 2586 } 2587 2588 static uint64_t bnxt_cc_read(struct bnxt *bp) 2589 { 2590 uint64_t ns; 2591 2592 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2593 BNXT_GRCPF_REG_SYNC_TIME)); 2594 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2595 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 2596 return ns; 2597 } 2598 2599 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 2600 { 2601 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2602 uint32_t fifo; 2603 2604 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2605 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2606 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 2607 return -EAGAIN; 2608 2609 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2610 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 2611 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2612 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 2613 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2614 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 2615 2616 return 0; 2617 } 2618 2619 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 2620 { 2621 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2622 struct bnxt_pf_info *pf = &bp->pf; 2623 uint16_t port_id; 2624 uint32_t fifo; 2625 2626 if (!ptp) 2627 return -ENODEV; 2628 2629 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2630 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 2631 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 2632 return -EAGAIN; 2633 2634 port_id = pf->port_id; 2635 rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 2636 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV])); 2637 2638 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2639 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 2640 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 2641 /* bnxt_clr_rx_ts(bp); TBD */ 2642 return -EBUSY; 2643 } 2644 2645 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2646 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 2647 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 2648 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 2649 2650 return 0; 2651 } 2652 2653 static int 2654 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 2655 { 2656 uint64_t ns; 2657 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2658 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2659 2660 if (!ptp) 2661 return 0; 2662 2663 ns = rte_timespec_to_ns(ts); 2664 /* Set the timecounters to a new value. */ 2665 ptp->tc.nsec = ns; 2666 2667 return 0; 2668 } 2669 2670 static int 2671 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 2672 { 2673 uint64_t ns, systime_cycles; 2674 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2675 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2676 2677 if (!ptp) 2678 return 0; 2679 2680 systime_cycles = bnxt_cc_read(bp); 2681 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 2682 *ts = rte_ns_to_timespec(ns); 2683 2684 return 0; 2685 } 2686 static int 2687 bnxt_timesync_enable(struct rte_eth_dev *dev) 2688 { 2689 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2690 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2691 uint32_t shift = 0; 2692 2693 if (!ptp) 2694 return 0; 2695 2696 ptp->rx_filter = 1; 2697 ptp->tx_tstamp_en = 1; 2698 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 2699 2700 if (!bnxt_hwrm_ptp_cfg(bp)) 2701 bnxt_map_ptp_regs(bp); 2702 2703 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 2704 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2705 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2706 2707 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 2708 ptp->tc.cc_shift = shift; 2709 ptp->tc.nsec_mask = (1ULL << shift) - 1; 2710 2711 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 2712 ptp->rx_tstamp_tc.cc_shift = shift; 2713 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2714 2715 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 2716 ptp->tx_tstamp_tc.cc_shift = shift; 2717 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2718 2719 return 0; 2720 } 2721 2722 static int 2723 bnxt_timesync_disable(struct rte_eth_dev *dev) 2724 { 2725 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2726 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2727 2728 if (!ptp) 2729 return 0; 2730 2731 ptp->rx_filter = 0; 2732 ptp->tx_tstamp_en = 0; 2733 ptp->rxctl = 0; 2734 2735 bnxt_hwrm_ptp_cfg(bp); 2736 2737 bnxt_unmap_ptp_regs(bp); 2738 2739 return 0; 2740 } 2741 2742 static int 2743 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 2744 struct timespec *timestamp, 2745 uint32_t flags __rte_unused) 2746 { 2747 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2748 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2749 uint64_t rx_tstamp_cycles = 0; 2750 uint64_t ns; 2751 2752 if (!ptp) 2753 return 0; 2754 2755 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 2756 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 2757 *timestamp = rte_ns_to_timespec(ns); 2758 return 0; 2759 } 2760 2761 static int 2762 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 2763 struct timespec *timestamp) 2764 { 2765 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2766 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2767 uint64_t tx_tstamp_cycles = 0; 2768 uint64_t ns; 2769 2770 if (!ptp) 2771 return 0; 2772 2773 bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 2774 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 2775 *timestamp = rte_ns_to_timespec(ns); 2776 2777 return 0; 2778 } 2779 2780 static int 2781 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 2782 { 2783 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2784 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2785 2786 if (!ptp) 2787 return 0; 2788 2789 ptp->tc.nsec += delta; 2790 2791 return 0; 2792 } 2793 2794 static int 2795 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 2796 { 2797 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2798 int rc; 2799 uint32_t dir_entries; 2800 uint32_t entry_length; 2801 2802 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n", 2803 __func__, bp->pdev->addr.domain, bp->pdev->addr.bus, 2804 bp->pdev->addr.devid, bp->pdev->addr.function); 2805 2806 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 2807 if (rc != 0) 2808 return rc; 2809 2810 return dir_entries * entry_length; 2811 } 2812 2813 static int 2814 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 2815 struct rte_dev_eeprom_info *in_eeprom) 2816 { 2817 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2818 uint32_t index; 2819 uint32_t offset; 2820 2821 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2822 "len = %d\n", __func__, bp->pdev->addr.domain, 2823 bp->pdev->addr.bus, bp->pdev->addr.devid, 2824 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2825 2826 if (in_eeprom->offset == 0) /* special offset value to get directory */ 2827 return bnxt_get_nvram_directory(bp, in_eeprom->length, 2828 in_eeprom->data); 2829 2830 index = in_eeprom->offset >> 24; 2831 offset = in_eeprom->offset & 0xffffff; 2832 2833 if (index != 0) 2834 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 2835 in_eeprom->length, in_eeprom->data); 2836 2837 return 0; 2838 } 2839 2840 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 2841 { 2842 switch (dir_type) { 2843 case BNX_DIR_TYPE_CHIMP_PATCH: 2844 case BNX_DIR_TYPE_BOOTCODE: 2845 case BNX_DIR_TYPE_BOOTCODE_2: 2846 case BNX_DIR_TYPE_APE_FW: 2847 case BNX_DIR_TYPE_APE_PATCH: 2848 case BNX_DIR_TYPE_KONG_FW: 2849 case BNX_DIR_TYPE_KONG_PATCH: 2850 case BNX_DIR_TYPE_BONO_FW: 2851 case BNX_DIR_TYPE_BONO_PATCH: 2852 return true; 2853 } 2854 2855 return false; 2856 } 2857 2858 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 2859 { 2860 switch (dir_type) { 2861 case BNX_DIR_TYPE_AVS: 2862 case BNX_DIR_TYPE_EXP_ROM_MBA: 2863 case BNX_DIR_TYPE_PCIE: 2864 case BNX_DIR_TYPE_TSCF_UCODE: 2865 case BNX_DIR_TYPE_EXT_PHY: 2866 case BNX_DIR_TYPE_CCM: 2867 case BNX_DIR_TYPE_ISCSI_BOOT: 2868 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 2869 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 2870 return true; 2871 } 2872 2873 return false; 2874 } 2875 2876 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 2877 { 2878 return bnxt_dir_type_is_ape_bin_format(dir_type) || 2879 bnxt_dir_type_is_other_exec_format(dir_type); 2880 } 2881 2882 static int 2883 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 2884 struct rte_dev_eeprom_info *in_eeprom) 2885 { 2886 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2887 uint8_t index, dir_op; 2888 uint16_t type, ext, ordinal, attr; 2889 2890 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2891 "len = %d\n", __func__, bp->pdev->addr.domain, 2892 bp->pdev->addr.bus, bp->pdev->addr.devid, 2893 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2894 2895 if (!BNXT_PF(bp)) { 2896 RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n"); 2897 return -EINVAL; 2898 } 2899 2900 type = in_eeprom->magic >> 16; 2901 2902 if (type == 0xffff) { /* special value for directory operations */ 2903 index = in_eeprom->magic & 0xff; 2904 dir_op = in_eeprom->magic >> 8; 2905 if (index == 0) 2906 return -EINVAL; 2907 switch (dir_op) { 2908 case 0x0e: /* erase */ 2909 if (in_eeprom->offset != ~in_eeprom->magic) 2910 return -EINVAL; 2911 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 2912 default: 2913 return -EINVAL; 2914 } 2915 } 2916 2917 /* Create or re-write an NVM item: */ 2918 if (bnxt_dir_type_is_executable(type) == true) 2919 return -EOPNOTSUPP; 2920 ext = in_eeprom->magic & 0xffff; 2921 ordinal = in_eeprom->offset >> 16; 2922 attr = in_eeprom->offset & 0xffff; 2923 2924 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 2925 in_eeprom->data, in_eeprom->length); 2926 return 0; 2927 } 2928 2929 /* 2930 * Initialization 2931 */ 2932 2933 static const struct eth_dev_ops bnxt_dev_ops = { 2934 .dev_infos_get = bnxt_dev_info_get_op, 2935 .dev_close = bnxt_dev_close_op, 2936 .dev_configure = bnxt_dev_configure_op, 2937 .dev_start = bnxt_dev_start_op, 2938 .dev_stop = bnxt_dev_stop_op, 2939 .dev_set_link_up = bnxt_dev_set_link_up_op, 2940 .dev_set_link_down = bnxt_dev_set_link_down_op, 2941 .stats_get = bnxt_stats_get_op, 2942 .stats_reset = bnxt_stats_reset_op, 2943 .rx_queue_setup = bnxt_rx_queue_setup_op, 2944 .rx_queue_release = bnxt_rx_queue_release_op, 2945 .tx_queue_setup = bnxt_tx_queue_setup_op, 2946 .tx_queue_release = bnxt_tx_queue_release_op, 2947 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 2948 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 2949 .reta_update = bnxt_reta_update_op, 2950 .reta_query = bnxt_reta_query_op, 2951 .rss_hash_update = bnxt_rss_hash_update_op, 2952 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 2953 .link_update = bnxt_link_update_op, 2954 .promiscuous_enable = bnxt_promiscuous_enable_op, 2955 .promiscuous_disable = bnxt_promiscuous_disable_op, 2956 .allmulticast_enable = bnxt_allmulticast_enable_op, 2957 .allmulticast_disable = bnxt_allmulticast_disable_op, 2958 .mac_addr_add = bnxt_mac_addr_add_op, 2959 .mac_addr_remove = bnxt_mac_addr_remove_op, 2960 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 2961 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 2962 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 2963 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 2964 .vlan_filter_set = bnxt_vlan_filter_set_op, 2965 .vlan_offload_set = bnxt_vlan_offload_set_op, 2966 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 2967 .mtu_set = bnxt_mtu_set_op, 2968 .mac_addr_set = bnxt_set_default_mac_addr_op, 2969 .xstats_get = bnxt_dev_xstats_get_op, 2970 .xstats_get_names = bnxt_dev_xstats_get_names_op, 2971 .xstats_reset = bnxt_dev_xstats_reset_op, 2972 .fw_version_get = bnxt_fw_version_get, 2973 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 2974 .rxq_info_get = bnxt_rxq_info_get_op, 2975 .txq_info_get = bnxt_txq_info_get_op, 2976 .dev_led_on = bnxt_dev_led_on_op, 2977 .dev_led_off = bnxt_dev_led_off_op, 2978 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 2979 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 2980 .rx_queue_count = bnxt_rx_queue_count_op, 2981 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 2982 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 2983 .filter_ctrl = bnxt_filter_ctrl_op, 2984 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 2985 .get_eeprom_length = bnxt_get_eeprom_length_op, 2986 .get_eeprom = bnxt_get_eeprom_op, 2987 .set_eeprom = bnxt_set_eeprom_op, 2988 .timesync_enable = bnxt_timesync_enable, 2989 .timesync_disable = bnxt_timesync_disable, 2990 .timesync_read_time = bnxt_timesync_read_time, 2991 .timesync_write_time = bnxt_timesync_write_time, 2992 .timesync_adjust_time = bnxt_timesync_adjust_time, 2993 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 2994 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 2995 }; 2996 2997 static bool bnxt_vf_pciid(uint16_t id) 2998 { 2999 if (id == BROADCOM_DEV_ID_57304_VF || 3000 id == BROADCOM_DEV_ID_57406_VF || 3001 id == BROADCOM_DEV_ID_5731X_VF || 3002 id == BROADCOM_DEV_ID_5741X_VF || 3003 id == BROADCOM_DEV_ID_57414_VF || 3004 id == BROADCOM_DEV_ID_STRATUS_NIC_VF) 3005 return true; 3006 return false; 3007 } 3008 3009 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 3010 { 3011 struct bnxt *bp = eth_dev->data->dev_private; 3012 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3013 int rc; 3014 3015 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 3016 if (!pci_dev->mem_resource[0].addr) { 3017 RTE_LOG(ERR, PMD, 3018 "Cannot find PCI device base address, aborting\n"); 3019 rc = -ENODEV; 3020 goto init_err_disable; 3021 } 3022 3023 bp->eth_dev = eth_dev; 3024 bp->pdev = pci_dev; 3025 3026 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 3027 if (!bp->bar0) { 3028 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); 3029 rc = -ENOMEM; 3030 goto init_err_release; 3031 } 3032 return 0; 3033 3034 init_err_release: 3035 if (bp->bar0) 3036 bp->bar0 = NULL; 3037 3038 init_err_disable: 3039 3040 return rc; 3041 } 3042 3043 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 3044 3045 #define ALLOW_FUNC(x) \ 3046 { \ 3047 typeof(x) arg = (x); \ 3048 bp->pf.vf_req_fwd[((arg) >> 5)] &= \ 3049 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 3050 } 3051 static int 3052 bnxt_dev_init(struct rte_eth_dev *eth_dev) 3053 { 3054 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3055 char mz_name[RTE_MEMZONE_NAMESIZE]; 3056 const struct rte_memzone *mz = NULL; 3057 static int version_printed; 3058 uint32_t total_alloc_len; 3059 rte_iova_t mz_phys_addr; 3060 struct bnxt *bp; 3061 int rc; 3062 3063 if (version_printed++ == 0) 3064 RTE_LOG(INFO, PMD, "%s\n", bnxt_version); 3065 3066 rte_eth_copy_pci_info(eth_dev, pci_dev); 3067 3068 bp = eth_dev->data->dev_private; 3069 3070 rte_atomic64_init(&bp->rx_mbuf_alloc_fail); 3071 bp->dev_stopped = 1; 3072 3073 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3074 goto skip_init; 3075 3076 if (bnxt_vf_pciid(pci_dev->id.device_id)) 3077 bp->flags |= BNXT_FLAG_VF; 3078 3079 rc = bnxt_init_board(eth_dev); 3080 if (rc) { 3081 RTE_LOG(ERR, PMD, 3082 "Board initialization failed rc: %x\n", rc); 3083 goto error; 3084 } 3085 skip_init: 3086 eth_dev->dev_ops = &bnxt_dev_ops; 3087 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3088 return 0; 3089 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 3090 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 3091 3092 if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { 3093 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3094 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 3095 pci_dev->addr.bus, pci_dev->addr.devid, 3096 pci_dev->addr.function, "rx_port_stats"); 3097 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3098 mz = rte_memzone_lookup(mz_name); 3099 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 3100 sizeof(struct rx_port_stats) + 512); 3101 if (!mz) { 3102 mz = rte_memzone_reserve(mz_name, total_alloc_len, 3103 SOCKET_ID_ANY, 3104 RTE_MEMZONE_2MB | 3105 RTE_MEMZONE_SIZE_HINT_ONLY); 3106 if (mz == NULL) 3107 return -ENOMEM; 3108 } 3109 memset(mz->addr, 0, mz->len); 3110 mz_phys_addr = mz->iova; 3111 if ((unsigned long)mz->addr == mz_phys_addr) { 3112 RTE_LOG(WARNING, PMD, 3113 "Memzone physical address same as virtual.\n"); 3114 RTE_LOG(WARNING, PMD, 3115 "Using rte_mem_virt2iova()\n"); 3116 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3117 if (mz_phys_addr == 0) { 3118 RTE_LOG(ERR, PMD, 3119 "unable to map address to physical memory\n"); 3120 return -ENOMEM; 3121 } 3122 } 3123 3124 bp->rx_mem_zone = (const void *)mz; 3125 bp->hw_rx_port_stats = mz->addr; 3126 bp->hw_rx_port_stats_map = mz_phys_addr; 3127 3128 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 3129 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 3130 pci_dev->addr.bus, pci_dev->addr.devid, 3131 pci_dev->addr.function, "tx_port_stats"); 3132 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 3133 mz = rte_memzone_lookup(mz_name); 3134 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 3135 sizeof(struct tx_port_stats) + 512); 3136 if (!mz) { 3137 mz = rte_memzone_reserve(mz_name, total_alloc_len, 3138 SOCKET_ID_ANY, 3139 RTE_MEMZONE_2MB | 3140 RTE_MEMZONE_SIZE_HINT_ONLY); 3141 if (mz == NULL) 3142 return -ENOMEM; 3143 } 3144 memset(mz->addr, 0, mz->len); 3145 mz_phys_addr = mz->iova; 3146 if ((unsigned long)mz->addr == mz_phys_addr) { 3147 RTE_LOG(WARNING, PMD, 3148 "Memzone physical address same as virtual.\n"); 3149 RTE_LOG(WARNING, PMD, 3150 "Using rte_mem_virt2iova()\n"); 3151 mz_phys_addr = rte_mem_virt2iova(mz->addr); 3152 if (mz_phys_addr == 0) { 3153 RTE_LOG(ERR, PMD, 3154 "unable to map address to physical memory\n"); 3155 return -ENOMEM; 3156 } 3157 } 3158 3159 bp->tx_mem_zone = (const void *)mz; 3160 bp->hw_tx_port_stats = mz->addr; 3161 bp->hw_tx_port_stats_map = mz_phys_addr; 3162 3163 bp->flags |= BNXT_FLAG_PORT_STATS; 3164 } 3165 3166 rc = bnxt_alloc_hwrm_resources(bp); 3167 if (rc) { 3168 RTE_LOG(ERR, PMD, 3169 "hwrm resource allocation failure rc: %x\n", rc); 3170 goto error_free; 3171 } 3172 rc = bnxt_hwrm_ver_get(bp); 3173 if (rc) 3174 goto error_free; 3175 bnxt_hwrm_queue_qportcfg(bp); 3176 3177 bnxt_hwrm_func_qcfg(bp); 3178 3179 /* Get the MAX capabilities for this function */ 3180 rc = bnxt_hwrm_func_qcaps(bp); 3181 if (rc) { 3182 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); 3183 goto error_free; 3184 } 3185 if (bp->max_tx_rings == 0) { 3186 RTE_LOG(ERR, PMD, "No TX rings available!\n"); 3187 rc = -EBUSY; 3188 goto error_free; 3189 } 3190 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 3191 ETHER_ADDR_LEN * bp->max_l2_ctx, 0); 3192 if (eth_dev->data->mac_addrs == NULL) { 3193 RTE_LOG(ERR, PMD, 3194 "Failed to alloc %u bytes needed to store MAC addr tbl", 3195 ETHER_ADDR_LEN * bp->max_l2_ctx); 3196 rc = -ENOMEM; 3197 goto error_free; 3198 } 3199 /* Copy the permanent MAC from the qcap response address now. */ 3200 memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr)); 3201 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 3202 bp->grp_info = rte_zmalloc("bnxt_grp_info", 3203 sizeof(*bp->grp_info) * bp->max_ring_grps, 0); 3204 if (!bp->grp_info) { 3205 RTE_LOG(ERR, PMD, 3206 "Failed to alloc %zu bytes needed to store group info table\n", 3207 sizeof(*bp->grp_info) * bp->max_ring_grps); 3208 rc = -ENOMEM; 3209 goto error_free; 3210 } 3211 3212 /* Forward all requests if firmware is new enough */ 3213 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 3214 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 3215 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 3216 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); 3217 } else { 3218 RTE_LOG(WARNING, PMD, 3219 "Firmware too old for VF mailbox functionality\n"); 3220 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); 3221 } 3222 3223 /* 3224 * The following are used for driver cleanup. If we disallow these, 3225 * VF drivers can't clean up cleanly. 3226 */ 3227 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 3228 ALLOW_FUNC(HWRM_VNIC_FREE); 3229 ALLOW_FUNC(HWRM_RING_FREE); 3230 ALLOW_FUNC(HWRM_RING_GRP_FREE); 3231 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 3232 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 3233 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 3234 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 3235 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 3236 rc = bnxt_hwrm_func_driver_register(bp); 3237 if (rc) { 3238 RTE_LOG(ERR, PMD, 3239 "Failed to register driver"); 3240 rc = -EBUSY; 3241 goto error_free; 3242 } 3243 3244 RTE_LOG(INFO, PMD, 3245 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", 3246 pci_dev->mem_resource[0].phys_addr, 3247 pci_dev->mem_resource[0].addr); 3248 3249 rc = bnxt_hwrm_func_reset(bp); 3250 if (rc) { 3251 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); 3252 rc = -1; 3253 goto error_free; 3254 } 3255 3256 if (BNXT_PF(bp)) { 3257 //if (bp->pf.active_vfs) { 3258 // TODO: Deallocate VF resources? 3259 //} 3260 if (bp->pdev->max_vfs) { 3261 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 3262 if (rc) { 3263 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n"); 3264 goto error_free; 3265 } 3266 } else { 3267 rc = bnxt_hwrm_allocate_pf_only(bp); 3268 if (rc) { 3269 RTE_LOG(ERR, PMD, 3270 "Failed to allocate PF resources\n"); 3271 goto error_free; 3272 } 3273 } 3274 } 3275 3276 bnxt_hwrm_port_led_qcaps(bp); 3277 3278 rc = bnxt_setup_int(bp); 3279 if (rc) 3280 goto error_free; 3281 3282 rc = bnxt_alloc_mem(bp); 3283 if (rc) 3284 goto error_free_int; 3285 3286 rc = bnxt_request_int(bp); 3287 if (rc) 3288 goto error_free_int; 3289 3290 rc = bnxt_alloc_def_cp_ring(bp); 3291 if (rc) 3292 goto error_free_int; 3293 3294 bnxt_enable_int(bp); 3295 3296 return 0; 3297 3298 error_free_int: 3299 bnxt_disable_int(bp); 3300 bnxt_free_def_cp_ring(bp); 3301 bnxt_hwrm_func_buf_unrgtr(bp); 3302 bnxt_free_int(bp); 3303 bnxt_free_mem(bp); 3304 error_free: 3305 bnxt_dev_uninit(eth_dev); 3306 error: 3307 return rc; 3308 } 3309 3310 static int 3311 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { 3312 struct bnxt *bp = eth_dev->data->dev_private; 3313 int rc; 3314 3315 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3316 return -EPERM; 3317 3318 bnxt_disable_int(bp); 3319 bnxt_free_int(bp); 3320 bnxt_free_mem(bp); 3321 if (eth_dev->data->mac_addrs != NULL) { 3322 rte_free(eth_dev->data->mac_addrs); 3323 eth_dev->data->mac_addrs = NULL; 3324 } 3325 if (bp->grp_info != NULL) { 3326 rte_free(bp->grp_info); 3327 bp->grp_info = NULL; 3328 } 3329 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 3330 bnxt_free_hwrm_resources(bp); 3331 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 3332 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 3333 if (bp->dev_stopped == 0) 3334 bnxt_dev_close_op(eth_dev); 3335 if (bp->pf.vf_info) 3336 rte_free(bp->pf.vf_info); 3337 eth_dev->dev_ops = NULL; 3338 eth_dev->rx_pkt_burst = NULL; 3339 eth_dev->tx_pkt_burst = NULL; 3340 3341 return rc; 3342 } 3343 3344 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3345 struct rte_pci_device *pci_dev) 3346 { 3347 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), 3348 bnxt_dev_init); 3349 } 3350 3351 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 3352 { 3353 return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit); 3354 } 3355 3356 static struct rte_pci_driver bnxt_rte_pmd = { 3357 .id_table = bnxt_pci_id_map, 3358 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 3359 RTE_PCI_DRV_INTR_LSC, 3360 .probe = bnxt_pci_probe, 3361 .remove = bnxt_pci_remove, 3362 }; 3363 3364 static bool 3365 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 3366 { 3367 if (strcmp(dev->device->driver->name, drv->driver.name)) 3368 return false; 3369 3370 return true; 3371 } 3372 3373 bool is_bnxt_supported(struct rte_eth_dev *dev) 3374 { 3375 return is_device_supported(dev, &bnxt_rte_pmd); 3376 } 3377 3378 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 3379 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 3380 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 3381