1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Broadcom Limited. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Broadcom Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <inttypes.h> 35 #include <stdbool.h> 36 #include <unistd.h> 37 38 #include <rte_dev.h> 39 #include <rte_ethdev.h> 40 #include <rte_malloc.h> 41 #include <rte_cycles.h> 42 #include <rte_byteorder.h> 43 44 #include "bnxt.h" 45 #include "bnxt_filter.h" 46 #include "bnxt_hwrm.h" 47 #include "bnxt_vnic.h" 48 #include "rte_pmd_bnxt.h" 49 #include "hsi_struct_def_dpdk.h" 50 51 int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg) 52 { 53 struct rte_pmd_bnxt_mb_event_param cb_param; 54 55 cb_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED; 56 cb_param.vf_id = vf_id; 57 cb_param.msg = msg; 58 59 _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX, 60 &cb_param, NULL); 61 62 /* Default to approve */ 63 if (cb_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED) 64 cb_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK; 65 66 return cb_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ? true : false; 67 } 68 69 int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on) 70 { 71 struct rte_eth_dev *eth_dev; 72 struct bnxt *bp; 73 int rc; 74 75 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 76 77 if (on > 1) 78 return -EINVAL; 79 80 eth_dev = &rte_eth_devices[port]; 81 if (!is_bnxt_supported(eth_dev)) 82 return -ENOTSUP; 83 84 bp = (struct bnxt *)eth_dev->data->dev_private; 85 86 if (!BNXT_PF(bp)) { 87 RTE_LOG(ERR, PMD, 88 "Attempt to set Tx loopback on non-PF port %d!\n", 89 port); 90 return -ENOTSUP; 91 } 92 93 if (on) 94 bp->pf.evb_mode = BNXT_EVB_MODE_VEB; 95 else 96 bp->pf.evb_mode = BNXT_EVB_MODE_VEPA; 97 98 rc = bnxt_hwrm_pf_evb_mode(bp); 99 100 return rc; 101 } 102 103 static void 104 rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr) 105 { 106 uint8_t *on = onptr; 107 vnic->bd_stall = !(*on); 108 } 109 110 int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on) 111 { 112 struct rte_eth_dev *eth_dev; 113 struct bnxt *bp; 114 uint32_t i; 115 int rc; 116 117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 118 119 if (on > 1) 120 return -EINVAL; 121 122 eth_dev = &rte_eth_devices[port]; 123 if (!is_bnxt_supported(eth_dev)) 124 return -ENOTSUP; 125 126 bp = (struct bnxt *)eth_dev->data->dev_private; 127 128 if (!BNXT_PF(bp)) { 129 RTE_LOG(ERR, PMD, 130 "Attempt to set all queues drop on non-PF port!\n"); 131 return -ENOTSUP; 132 } 133 134 if (bp->vnic_info == NULL) 135 return -ENODEV; 136 137 /* Stall PF */ 138 for (i = 0; i < bp->nr_vnics; i++) { 139 bp->vnic_info[i].bd_stall = !on; 140 rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]); 141 if (rc) { 142 RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i); 143 return rc; 144 } 145 } 146 147 /* Stall all active VFs */ 148 for (i = 0; i < bp->pf.active_vfs; i++) { 149 rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i, 150 rte_pmd_bnxt_set_all_queues_drop_en_cb, &on, 151 bnxt_hwrm_vnic_cfg); 152 if (rc) { 153 RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i); 154 break; 155 } 156 } 157 158 return rc; 159 } 160 161 int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf, 162 struct ether_addr *mac_addr) 163 { 164 struct rte_eth_dev *dev; 165 struct rte_eth_dev_info dev_info; 166 struct bnxt *bp; 167 int rc; 168 169 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 170 171 dev = &rte_eth_devices[port]; 172 if (!is_bnxt_supported(dev)) 173 return -ENOTSUP; 174 175 rte_eth_dev_info_get(port, &dev_info); 176 bp = (struct bnxt *)dev->data->dev_private; 177 178 if (vf >= dev_info.max_vfs || mac_addr == NULL) 179 return -EINVAL; 180 181 if (!BNXT_PF(bp)) { 182 RTE_LOG(ERR, PMD, 183 "Attempt to set VF %d mac address on non-PF port %d!\n", 184 vf, port); 185 return -ENOTSUP; 186 } 187 188 rc = bnxt_hwrm_func_vf_mac(bp, vf, (uint8_t *)mac_addr); 189 190 return rc; 191 } 192 193 int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf, 194 uint16_t tx_rate, uint64_t q_msk) 195 { 196 struct rte_eth_dev *eth_dev; 197 struct rte_eth_dev_info dev_info; 198 struct bnxt *bp; 199 uint16_t tot_rate = 0; 200 uint64_t idx; 201 int rc; 202 203 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 204 205 eth_dev = &rte_eth_devices[port]; 206 if (!is_bnxt_supported(eth_dev)) 207 return -ENOTSUP; 208 209 rte_eth_dev_info_get(port, &dev_info); 210 bp = (struct bnxt *)eth_dev->data->dev_private; 211 212 if (!bp->pf.active_vfs) 213 return -EINVAL; 214 215 if (vf >= bp->pf.max_vfs) 216 return -EINVAL; 217 218 /* Add up the per queue BW and configure MAX BW of the VF */ 219 for (idx = 0; idx < 64; idx++) { 220 if ((1ULL << idx) & q_msk) 221 tot_rate += tx_rate; 222 } 223 224 /* Requested BW can't be greater than link speed */ 225 if (tot_rate > eth_dev->data->dev_link.link_speed) { 226 RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate); 227 return -EINVAL; 228 } 229 230 /* Requested BW already configured */ 231 if (tot_rate == bp->pf.vf_info[vf].max_tx_rate) 232 return 0; 233 234 rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate, 235 HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW); 236 237 if (!rc) 238 bp->pf.vf_info[vf].max_tx_rate = tot_rate; 239 240 return rc; 241 } 242 243 int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) 244 { 245 struct rte_eth_dev_info dev_info; 246 struct rte_eth_dev *dev; 247 uint32_t func_flags; 248 struct bnxt *bp; 249 int rc; 250 251 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 252 253 if (on > 1) 254 return -EINVAL; 255 256 dev = &rte_eth_devices[port]; 257 if (!is_bnxt_supported(dev)) 258 return -ENOTSUP; 259 260 rte_eth_dev_info_get(port, &dev_info); 261 bp = (struct bnxt *)dev->data->dev_private; 262 263 if (!BNXT_PF(bp)) { 264 RTE_LOG(ERR, PMD, 265 "Attempt to set mac spoof on non-PF port %d!\n", port); 266 return -EINVAL; 267 } 268 269 if (vf >= dev_info.max_vfs) 270 return -EINVAL; 271 272 /* Prev setting same as new setting. */ 273 if (on == bp->pf.vf_info[vf].mac_spoof_en) 274 return 0; 275 276 func_flags = bp->pf.vf_info[vf].func_cfg_flags; 277 278 if (on) 279 func_flags |= 280 HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 281 else 282 func_flags |= 283 HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 284 285 bp->pf.vf_info[vf].func_cfg_flags = func_flags; 286 287 rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf); 288 if (!rc) 289 bp->pf.vf_info[vf].mac_spoof_en = on; 290 291 return rc; 292 } 293 294 int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) 295 { 296 struct rte_eth_dev_info dev_info; 297 struct rte_eth_dev *dev; 298 struct bnxt *bp; 299 int rc; 300 int dflt_vnic; 301 struct bnxt_vnic_info vnic; 302 303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 304 305 if (on > 1) 306 return -EINVAL; 307 308 dev = &rte_eth_devices[port]; 309 if (!is_bnxt_supported(dev)) 310 return -ENOTSUP; 311 312 rte_eth_dev_info_get(port, &dev_info); 313 bp = (struct bnxt *)dev->data->dev_private; 314 315 if (!BNXT_PF(bp)) { 316 RTE_LOG(ERR, PMD, 317 "Attempt to set mac spoof on non-PF port %d!\n", port); 318 return -EINVAL; 319 } 320 321 if (vf >= dev_info.max_vfs) 322 return -EINVAL; 323 324 rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on); 325 if (!rc) { 326 bp->pf.vf_info[vf].vlan_spoof_en = on; 327 if (on) { 328 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); 329 if (dflt_vnic < 0) { 330 /* 331 * This simply indicates there's no driver 332 * loaded. This is not an error. 333 */ 334 RTE_LOG(ERR, PMD, 335 "Unable to get default VNIC for VF %d\n", 336 vf); 337 } else { 338 vnic.fw_vnic_id = dflt_vnic; 339 if (bnxt_hwrm_vnic_qcfg(bp, 340 &vnic, bp->pf.first_vf_id + vf) == 0) { 341 if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, 342 &vnic, bp->pf.vf_info[vf].vlan_count, 343 bp->pf.vf_info[vf].vlan_table)) 344 rc = -1; 345 } 346 } 347 } 348 } else { 349 RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf); 350 } 351 352 return rc; 353 } 354 355 static void 356 rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr) 357 { 358 uint8_t *on = onptr; 359 vnic->vlan_strip = *on; 360 } 361 362 int 363 rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) 364 { 365 struct rte_eth_dev *dev; 366 struct rte_eth_dev_info dev_info; 367 struct bnxt *bp; 368 int rc; 369 370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 371 372 dev = &rte_eth_devices[port]; 373 if (!is_bnxt_supported(dev)) 374 return -ENOTSUP; 375 376 rte_eth_dev_info_get(port, &dev_info); 377 bp = (struct bnxt *)dev->data->dev_private; 378 379 if (vf >= dev_info.max_vfs) 380 return -EINVAL; 381 382 if (!BNXT_PF(bp)) { 383 RTE_LOG(ERR, PMD, 384 "Attempt to set VF %d stripq on non-PF port %d!\n", 385 vf, port); 386 return -ENOTSUP; 387 } 388 389 rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, 390 rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on, 391 bnxt_hwrm_vnic_cfg); 392 if (rc) 393 RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf); 394 395 return rc; 396 } 397 398 int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf, 399 uint16_t rx_mask, uint8_t on) 400 { 401 struct rte_eth_dev *dev; 402 struct rte_eth_dev_info dev_info; 403 uint16_t flag = 0; 404 struct bnxt *bp; 405 int rc; 406 407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 408 409 dev = &rte_eth_devices[port]; 410 if (!is_bnxt_supported(dev)) 411 return -ENOTSUP; 412 413 rte_eth_dev_info_get(port, &dev_info); 414 bp = (struct bnxt *)dev->data->dev_private; 415 416 if (!bp->pf.vf_info) 417 return -EINVAL; 418 419 if (vf >= bp->pdev->max_vfs) 420 return -EINVAL; 421 422 if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) { 423 RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n"); 424 return -ENOTSUP; 425 } 426 427 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) { 428 RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n"); 429 return -ENOTSUP; 430 } 431 432 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 433 flag |= BNXT_VNIC_INFO_BCAST; 434 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 435 flag |= BNXT_VNIC_INFO_ALLMULTI; 436 437 if (on) 438 bp->pf.vf_info[vf].l2_rx_mask |= flag; 439 else 440 bp->pf.vf_info[vf].l2_rx_mask &= ~flag; 441 442 rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, 443 vf_vnic_set_rxmask_cb, 444 &bp->pf.vf_info[vf].l2_rx_mask, 445 bnxt_set_rx_mask_no_vlan); 446 if (rc) 447 RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n"); 448 449 return rc; 450 } 451 452 int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan, 453 uint64_t vf_mask, uint8_t vlan_on) 454 { 455 struct bnxt_vlan_table_entry *ve; 456 struct rte_eth_dev *dev; 457 struct bnxt *bp; 458 uint16_t cnt; 459 int rc = 0; 460 int i, j; 461 462 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 463 464 dev = &rte_eth_devices[port]; 465 if (!is_bnxt_supported(dev)) 466 return -ENOTSUP; 467 468 bp = (struct bnxt *)dev->data->dev_private; 469 if (!bp->pf.vf_info) 470 return -EINVAL; 471 472 for (i = 0; vf_mask; i++, vf_mask >>= 1) { 473 cnt = bp->pf.vf_info[i].vlan_count; 474 if (vf_mask & 1) { 475 if (bp->pf.vf_info[i].vlan_table == NULL) { 476 rc = -1; 477 continue; 478 } 479 if (vlan_on) { 480 /* First, search for a duplicate... */ 481 for (j = 0; j < cnt; j++) { 482 if (rte_be_to_cpu_16( 483 bp->pf.vf_info[i].vlan_table[j].vid) == 484 vlan) 485 break; 486 } 487 if (j == cnt) { 488 /* Now check that there's space */ 489 if (cnt == getpagesize() / 490 sizeof(struct bnxt_vlan_table_entry)) { 491 RTE_LOG(ERR, PMD, 492 "VF %d VLAN table is full\n", 493 i); 494 RTE_LOG(ERR, PMD, 495 "cannot add VLAN %u\n", 496 vlan); 497 rc = -1; 498 continue; 499 } 500 501 cnt = bp->pf.vf_info[i].vlan_count++; 502 /* 503 * And finally, add to the 504 * end of the table 505 */ 506 ve = &bp->pf.vf_info[i].vlan_table[cnt]; 507 /* TODO: Hardcoded TPID */ 508 ve->tpid = rte_cpu_to_be_16(0x8100); 509 ve->vid = rte_cpu_to_be_16(vlan); 510 } 511 } else { 512 for (j = 0; cnt; j++) { 513 if (rte_be_to_cpu_16( 514 bp->pf.vf_info[i].vlan_table[j].vid) != 515 vlan) 516 continue; 517 memmove( 518 &bp->pf.vf_info[i].vlan_table[j], 519 &bp->pf.vf_info[i].vlan_table[j + 1], 520 getpagesize() - 521 ((j + 1) * 522 sizeof(struct bnxt_vlan_table_entry))); 523 j--; 524 cnt = bp->pf.vf_info[i].vlan_count--; 525 } 526 } 527 rte_pmd_bnxt_set_vf_vlan_anti_spoof(dev->data->port_id, 528 i, bp->pf.vf_info[i].vlan_spoof_en); 529 } 530 } 531 532 return rc; 533 } 534 535 int rte_pmd_bnxt_get_vf_stats(uint8_t port, 536 uint16_t vf_id, 537 struct rte_eth_stats *stats) 538 { 539 struct rte_eth_dev *dev; 540 struct rte_eth_dev_info dev_info; 541 struct bnxt *bp; 542 543 dev = &rte_eth_devices[port]; 544 if (!is_bnxt_supported(dev)) 545 return -ENOTSUP; 546 547 rte_eth_dev_info_get(port, &dev_info); 548 bp = (struct bnxt *)dev->data->dev_private; 549 550 if (vf_id >= dev_info.max_vfs) 551 return -EINVAL; 552 553 if (!BNXT_PF(bp)) { 554 RTE_LOG(ERR, PMD, 555 "Attempt to get VF %d stats on non-PF port %d!\n", 556 vf_id, port); 557 return -ENOTSUP; 558 } 559 560 return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats); 561 } 562 563 int rte_pmd_bnxt_reset_vf_stats(uint8_t port, 564 uint16_t vf_id) 565 { 566 struct rte_eth_dev *dev; 567 struct rte_eth_dev_info dev_info; 568 struct bnxt *bp; 569 570 dev = &rte_eth_devices[port]; 571 if (!is_bnxt_supported(dev)) 572 return -ENOTSUP; 573 574 rte_eth_dev_info_get(port, &dev_info); 575 bp = (struct bnxt *)dev->data->dev_private; 576 577 if (vf_id >= dev_info.max_vfs) 578 return -EINVAL; 579 580 if (!BNXT_PF(bp)) { 581 RTE_LOG(ERR, PMD, 582 "Attempt to reset VF %d stats on non-PF port %d!\n", 583 vf_id, port); 584 return -ENOTSUP; 585 } 586 587 return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id); 588 } 589 590 int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id) 591 { 592 struct rte_eth_dev *dev; 593 struct rte_eth_dev_info dev_info; 594 struct bnxt *bp; 595 596 dev = &rte_eth_devices[port]; 597 if (!is_bnxt_supported(dev)) 598 return -ENOTSUP; 599 600 rte_eth_dev_info_get(port, &dev_info); 601 bp = (struct bnxt *)dev->data->dev_private; 602 603 if (vf_id >= dev_info.max_vfs) 604 return -EINVAL; 605 606 if (!BNXT_PF(bp)) { 607 RTE_LOG(ERR, PMD, 608 "Attempt to query VF %d RX stats on non-PF port %d!\n", 609 vf_id, port); 610 return -ENOTSUP; 611 } 612 613 return bnxt_vf_default_vnic_count(bp, vf_id); 614 } 615 616 int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id, 617 uint64_t *count) 618 { 619 struct rte_eth_dev *dev; 620 struct rte_eth_dev_info dev_info; 621 struct bnxt *bp; 622 623 dev = &rte_eth_devices[port]; 624 if (!is_bnxt_supported(dev)) 625 return -ENOTSUP; 626 627 rte_eth_dev_info_get(port, &dev_info); 628 bp = (struct bnxt *)dev->data->dev_private; 629 630 if (vf_id >= dev_info.max_vfs) 631 return -EINVAL; 632 633 if (!BNXT_PF(bp)) { 634 RTE_LOG(ERR, PMD, 635 "Attempt to query VF %d TX drops on non-PF port %d!\n", 636 vf_id, port); 637 return -ENOTSUP; 638 } 639 640 return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf.first_vf_id + vf_id, 641 count); 642 } 643 644 int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr, 645 uint32_t vf_id) 646 { 647 struct rte_eth_dev *dev; 648 struct rte_eth_dev_info dev_info; 649 struct bnxt *bp; 650 struct bnxt_filter_info *filter; 651 struct bnxt_vnic_info vnic; 652 struct ether_addr dflt_mac; 653 int rc; 654 655 dev = &rte_eth_devices[port]; 656 if (!is_bnxt_supported(dev)) 657 return -ENOTSUP; 658 659 rte_eth_dev_info_get(port, &dev_info); 660 bp = (struct bnxt *)dev->data->dev_private; 661 662 if (vf_id >= dev_info.max_vfs) 663 return -EINVAL; 664 665 if (!BNXT_PF(bp)) { 666 RTE_LOG(ERR, PMD, 667 "Attempt to config VF %d MAC on non-PF port %d!\n", 668 vf_id, port); 669 return -ENOTSUP; 670 } 671 672 /* If the VF currently uses a random MAC, update default to this one */ 673 if (bp->pf.vf_info[vf_id].random_mac) { 674 if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0) 675 rc = bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr); 676 } 677 678 /* query the default VNIC id used by the function */ 679 rc = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf_id); 680 if (rc < 0) 681 goto exit; 682 683 memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); 684 vnic.fw_vnic_id = rte_le_to_cpu_16(rc); 685 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf_id); 686 if (rc < 0) 687 goto exit; 688 689 STAILQ_FOREACH(filter, &bp->pf.vf_info[vf_id].filter, next) { 690 if (filter->flags == 691 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX && 692 filter->enables == 693 (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 694 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) && 695 memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) { 696 bnxt_hwrm_clear_filter(bp, filter); 697 break; 698 } 699 } 700 701 if (filter == NULL) 702 filter = bnxt_alloc_vf_filter(bp, vf_id); 703 704 filter->fw_l2_filter_id = UINT64_MAX; 705 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 706 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 707 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 708 memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN); 709 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); 710 711 /* Do not add a filter for the default MAC */ 712 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) || 713 memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN)) 714 rc = bnxt_hwrm_set_filter(bp, vnic.fw_vnic_id, filter); 715 716 exit: 717 return rc; 718 } 719 720 int 721 rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf, 722 uint16_t vlan_id) 723 { 724 struct rte_eth_dev *dev; 725 struct rte_eth_dev_info dev_info; 726 struct bnxt *bp; 727 int rc; 728 729 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); 730 731 dev = &rte_eth_devices[port]; 732 if (!is_bnxt_supported(dev)) 733 return -ENOTSUP; 734 735 rte_eth_dev_info_get(port, &dev_info); 736 bp = (struct bnxt *)dev->data->dev_private; 737 738 if (vf >= dev_info.max_vfs) 739 return -EINVAL; 740 741 if (!BNXT_PF(bp)) { 742 RTE_LOG(ERR, PMD, 743 "Attempt to set VF %d vlan insert on non-PF port %d!\n", 744 vf, port); 745 return -ENOTSUP; 746 } 747 748 bp->pf.vf_info[vf].dflt_vlan = vlan_id; 749 if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) == 750 bp->pf.vf_info[vf].dflt_vlan) 751 return 0; 752 753 rc = bnxt_hwrm_set_vf_vlan(bp, vf); 754 755 return rc; 756 } 757