1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2019 Hisilicon Limited. 3 */ 4 5 #include <linux/pci_regs.h> 6 #include <rte_alarm.h> 7 #include <ethdev_pci.h> 8 #include <rte_io.h> 9 #include <rte_pci.h> 10 #include <rte_vfio.h> 11 12 #include "hns3_ethdev.h" 13 #include "hns3_logs.h" 14 #include "hns3_rxtx.h" 15 #include "hns3_regs.h" 16 #include "hns3_intr.h" 17 #include "hns3_dcb.h" 18 #include "hns3_mp.h" 19 20 #define HNS3VF_KEEP_ALIVE_INTERVAL 2000000 /* us */ 21 #define HNS3VF_SERVICE_INTERVAL 1000000 /* us */ 22 23 #define HNS3VF_RESET_WAIT_MS 20 24 #define HNS3VF_RESET_WAIT_CNT 2000 25 26 /* Reset related Registers */ 27 #define HNS3_GLOBAL_RESET_BIT 0 28 #define HNS3_CORE_RESET_BIT 1 29 #define HNS3_IMP_RESET_BIT 2 30 #define HNS3_FUN_RST_ING_B 0 31 32 enum hns3vf_evt_cause { 33 HNS3VF_VECTOR0_EVENT_RST, 34 HNS3VF_VECTOR0_EVENT_MBX, 35 HNS3VF_VECTOR0_EVENT_OTHER, 36 }; 37 38 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw, 39 uint64_t *levels); 40 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 41 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev); 42 43 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw, 44 struct rte_ether_addr *mac_addr); 45 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, 46 struct rte_ether_addr *mac_addr); 47 /* set PCI bus mastering */ 48 static int 49 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op) 50 { 51 uint16_t reg; 52 int ret; 53 54 ret = rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND); 55 if (ret < 0) { 56 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", 57 PCI_COMMAND); 58 return ret; 59 } 60 61 if (op) 62 /* set the master bit */ 63 reg |= PCI_COMMAND_MASTER; 64 else 65 reg &= ~(PCI_COMMAND_MASTER); 66 67 return rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND); 68 } 69 70 /** 71 * hns3vf_find_pci_capability - lookup a capability in the PCI capability list 72 * @cap: the capability 73 * 74 * Return the address of the given capability within the PCI capability list. 75 */ 76 static int 77 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap) 78 { 79 #define MAX_PCIE_CAPABILITY 48 80 uint16_t status; 81 uint8_t pos; 82 uint8_t id; 83 int ttl; 84 int ret; 85 86 ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS); 87 if (ret < 0) { 88 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS); 89 return 0; 90 } 91 92 if (!(status & PCI_STATUS_CAP_LIST)) 93 return 0; 94 95 ttl = MAX_PCIE_CAPABILITY; 96 ret = rte_pci_read_config(device, &pos, sizeof(pos), 97 PCI_CAPABILITY_LIST); 98 if (ret < 0) { 99 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", 100 PCI_CAPABILITY_LIST); 101 return 0; 102 } 103 104 while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) { 105 ret = rte_pci_read_config(device, &id, sizeof(id), 106 (pos + PCI_CAP_LIST_ID)); 107 if (ret < 0) { 108 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", 109 (pos + PCI_CAP_LIST_ID)); 110 break; 111 } 112 113 if (id == 0xFF) 114 break; 115 116 if (id == cap) 117 return (int)pos; 118 119 ret = rte_pci_read_config(device, &pos, sizeof(pos), 120 (pos + PCI_CAP_LIST_NEXT)); 121 if (ret < 0) { 122 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", 123 (pos + PCI_CAP_LIST_NEXT)); 124 break; 125 } 126 } 127 return 0; 128 } 129 130 static int 131 hns3vf_enable_msix(const struct rte_pci_device *device, bool op) 132 { 133 uint16_t control; 134 int pos; 135 int ret; 136 137 pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX); 138 if (pos) { 139 ret = rte_pci_read_config(device, &control, sizeof(control), 140 (pos + PCI_MSIX_FLAGS)); 141 if (ret < 0) { 142 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", 143 (pos + PCI_MSIX_FLAGS)); 144 return -ENXIO; 145 } 146 147 if (op) 148 control |= PCI_MSIX_FLAGS_ENABLE; 149 else 150 control &= ~PCI_MSIX_FLAGS_ENABLE; 151 ret = rte_pci_write_config(device, &control, sizeof(control), 152 (pos + PCI_MSIX_FLAGS)); 153 if (ret < 0) { 154 PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x", 155 (pos + PCI_MSIX_FLAGS)); 156 } 157 return 0; 158 } 159 return -ENXIO; 160 } 161 162 static int 163 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 164 { 165 /* mac address was checked by upper level interface */ 166 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 167 int ret; 168 169 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, 170 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, 171 RTE_ETHER_ADDR_LEN, false, NULL, 0); 172 if (ret) { 173 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 174 mac_addr); 175 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", 176 mac_str, ret); 177 } 178 return ret; 179 } 180 181 static int 182 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 183 { 184 /* mac address was checked by upper level interface */ 185 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 186 int ret; 187 188 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, 189 HNS3_MBX_MAC_VLAN_UC_REMOVE, 190 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, 191 false, NULL, 0); 192 if (ret) { 193 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 194 mac_addr); 195 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", 196 mac_str, ret); 197 } 198 return ret; 199 } 200 201 static int 202 hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 203 { 204 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 205 struct rte_ether_addr *addr; 206 int ret; 207 int i; 208 209 for (i = 0; i < hw->mc_addrs_num; i++) { 210 addr = &hw->mc_addrs[i]; 211 /* Check if there are duplicate addresses */ 212 if (rte_is_same_ether_addr(addr, mac_addr)) { 213 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 214 addr); 215 hns3_err(hw, "failed to add mc mac addr, same addrs" 216 "(%s) is added by the set_mc_mac_addr_list " 217 "API", mac_str); 218 return -EINVAL; 219 } 220 } 221 222 ret = hns3vf_add_mc_mac_addr(hw, mac_addr); 223 if (ret) { 224 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 225 mac_addr); 226 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 227 mac_str, ret); 228 } 229 return ret; 230 } 231 232 static int 233 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 234 __rte_unused uint32_t idx, 235 __rte_unused uint32_t pool) 236 { 237 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 238 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 239 int ret; 240 241 rte_spinlock_lock(&hw->lock); 242 243 /* 244 * In hns3 network engine adding UC and MC mac address with different 245 * commands with firmware. We need to determine whether the input 246 * address is a UC or a MC address to call different commands. 247 * By the way, it is recommended calling the API function named 248 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 249 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 250 * may affect the specifications of UC mac addresses. 251 */ 252 if (rte_is_multicast_ether_addr(mac_addr)) 253 ret = hns3vf_add_mc_addr_common(hw, mac_addr); 254 else 255 ret = hns3vf_add_uc_mac_addr(hw, mac_addr); 256 257 rte_spinlock_unlock(&hw->lock); 258 if (ret) { 259 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 260 mac_addr); 261 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 262 ret); 263 } 264 265 return ret; 266 } 267 268 static void 269 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 270 { 271 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 272 /* index will be checked by upper level rte interface */ 273 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 274 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 275 int ret; 276 277 rte_spinlock_lock(&hw->lock); 278 279 if (rte_is_multicast_ether_addr(mac_addr)) 280 ret = hns3vf_remove_mc_mac_addr(hw, mac_addr); 281 else 282 ret = hns3vf_remove_uc_mac_addr(hw, mac_addr); 283 284 rte_spinlock_unlock(&hw->lock); 285 if (ret) { 286 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 287 mac_addr); 288 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", 289 mac_str, ret); 290 } 291 } 292 293 static int 294 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, 295 struct rte_ether_addr *mac_addr) 296 { 297 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2) 298 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 299 struct rte_ether_addr *old_addr; 300 uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */ 301 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 302 int ret; 303 304 /* 305 * It has been guaranteed that input parameter named mac_addr is valid 306 * address in the rte layer of DPDK framework. 307 */ 308 old_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 309 rte_spinlock_lock(&hw->lock); 310 memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); 311 memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes, 312 RTE_ETHER_ADDR_LEN); 313 314 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, 315 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes, 316 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0); 317 if (ret) { 318 /* 319 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev 320 * driver. When user has configured a MAC address for VF device 321 * by "ip link set ..." command based on the PF device, the hns3 322 * PF kernel ethdev driver does not allow VF driver to request 323 * reconfiguring a different default MAC address, and return 324 * -EPREM to VF driver through mailbox. 325 */ 326 if (ret == -EPERM) { 327 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 328 old_addr); 329 hns3_warn(hw, "Has permanet mac addr(%s) for vf", 330 mac_str); 331 } else { 332 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 333 mac_addr); 334 hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", 335 mac_str, ret); 336 } 337 } 338 339 rte_ether_addr_copy(mac_addr, 340 (struct rte_ether_addr *)hw->mac.mac_addr); 341 rte_spinlock_unlock(&hw->lock); 342 343 return ret; 344 } 345 346 static int 347 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) 348 { 349 struct hns3_hw *hw = &hns->hw; 350 struct rte_ether_addr *addr; 351 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 352 int err = 0; 353 int ret; 354 int i; 355 356 for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) { 357 addr = &hw->data->mac_addrs[i]; 358 if (rte_is_zero_ether_addr(addr)) 359 continue; 360 if (rte_is_multicast_ether_addr(addr)) 361 ret = del ? hns3vf_remove_mc_mac_addr(hw, addr) : 362 hns3vf_add_mc_mac_addr(hw, addr); 363 else 364 ret = del ? hns3vf_remove_uc_mac_addr(hw, addr) : 365 hns3vf_add_uc_mac_addr(hw, addr); 366 367 if (ret) { 368 err = ret; 369 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 370 addr); 371 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 372 "ret = %d.", del ? "remove" : "restore", 373 mac_str, i, ret); 374 } 375 } 376 return err; 377 } 378 379 static int 380 hns3vf_add_mc_mac_addr(struct hns3_hw *hw, 381 struct rte_ether_addr *mac_addr) 382 { 383 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 384 int ret; 385 386 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, 387 HNS3_MBX_MAC_VLAN_MC_ADD, 388 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, 389 NULL, 0); 390 if (ret) { 391 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 392 mac_addr); 393 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d", 394 mac_str, ret); 395 } 396 397 return ret; 398 } 399 400 static int 401 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, 402 struct rte_ether_addr *mac_addr) 403 { 404 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 405 int ret; 406 407 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, 408 HNS3_MBX_MAC_VLAN_MC_REMOVE, 409 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, 410 NULL, 0); 411 if (ret) { 412 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 413 mac_addr); 414 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d", 415 mac_str, ret); 416 } 417 418 return ret; 419 } 420 421 static int 422 hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, 423 struct rte_ether_addr *mc_addr_set, 424 uint32_t nb_mc_addr) 425 { 426 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 427 struct rte_ether_addr *addr; 428 uint32_t i; 429 uint32_t j; 430 431 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 432 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 433 "invalid. valid range: 0~%d", 434 nb_mc_addr, HNS3_MC_MACADDR_NUM); 435 return -EINVAL; 436 } 437 438 /* Check if input mac addresses are valid */ 439 for (i = 0; i < nb_mc_addr; i++) { 440 addr = &mc_addr_set[i]; 441 if (!rte_is_multicast_ether_addr(addr)) { 442 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 443 addr); 444 hns3_err(hw, 445 "failed to set mc mac addr, addr(%s) invalid.", 446 mac_str); 447 return -EINVAL; 448 } 449 450 /* Check if there are duplicate addresses */ 451 for (j = i + 1; j < nb_mc_addr; j++) { 452 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 453 hns3_ether_format_addr(mac_str, 454 RTE_ETHER_ADDR_FMT_SIZE, 455 addr); 456 hns3_err(hw, "failed to set mc mac addr, " 457 "addrs invalid. two same addrs(%s).", 458 mac_str); 459 return -EINVAL; 460 } 461 } 462 463 /* 464 * Check if there are duplicate addresses between mac_addrs 465 * and mc_addr_set 466 */ 467 for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) { 468 if (rte_is_same_ether_addr(addr, 469 &hw->data->mac_addrs[j])) { 470 hns3_ether_format_addr(mac_str, 471 RTE_ETHER_ADDR_FMT_SIZE, 472 addr); 473 hns3_err(hw, "failed to set mc mac addr, " 474 "addrs invalid. addrs(%s) has already " 475 "configured in mac_addr add API", 476 mac_str); 477 return -EINVAL; 478 } 479 } 480 } 481 482 return 0; 483 } 484 485 static int 486 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, 487 struct rte_ether_addr *mc_addr_set, 488 uint32_t nb_mc_addr) 489 { 490 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 491 struct rte_ether_addr *addr; 492 int cur_addr_num; 493 int set_addr_num; 494 int num; 495 int ret; 496 int i; 497 498 ret = hns3vf_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 499 if (ret) 500 return ret; 501 502 rte_spinlock_lock(&hw->lock); 503 cur_addr_num = hw->mc_addrs_num; 504 for (i = 0; i < cur_addr_num; i++) { 505 num = cur_addr_num - i - 1; 506 addr = &hw->mc_addrs[num]; 507 ret = hns3vf_remove_mc_mac_addr(hw, addr); 508 if (ret) { 509 rte_spinlock_unlock(&hw->lock); 510 return ret; 511 } 512 513 hw->mc_addrs_num--; 514 } 515 516 set_addr_num = (int)nb_mc_addr; 517 for (i = 0; i < set_addr_num; i++) { 518 addr = &mc_addr_set[i]; 519 ret = hns3vf_add_mc_mac_addr(hw, addr); 520 if (ret) { 521 rte_spinlock_unlock(&hw->lock); 522 return ret; 523 } 524 525 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); 526 hw->mc_addrs_num++; 527 } 528 rte_spinlock_unlock(&hw->lock); 529 530 return 0; 531 } 532 533 static int 534 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 535 { 536 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 537 struct hns3_hw *hw = &hns->hw; 538 struct rte_ether_addr *addr; 539 int err = 0; 540 int ret; 541 int i; 542 543 for (i = 0; i < hw->mc_addrs_num; i++) { 544 addr = &hw->mc_addrs[i]; 545 if (!rte_is_multicast_ether_addr(addr)) 546 continue; 547 if (del) 548 ret = hns3vf_remove_mc_mac_addr(hw, addr); 549 else 550 ret = hns3vf_add_mc_mac_addr(hw, addr); 551 if (ret) { 552 err = ret; 553 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 554 addr); 555 hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d", 556 del ? "Remove" : "Restore", mac_str, ret); 557 } 558 } 559 return err; 560 } 561 562 static int 563 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, 564 bool en_uc_pmc, bool en_mc_pmc) 565 { 566 struct hns3_mbx_vf_to_pf_cmd *req; 567 struct hns3_cmd_desc desc; 568 int ret; 569 570 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; 571 572 /* 573 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver, 574 * so there are some features for promiscuous/allmulticast mode in hns3 575 * VF PMD driver as below: 576 * 1. The promiscuous/allmulticast mode can be configured successfully 577 * only based on the trusted VF device. If based on the non trusted 578 * VF device, configuring promiscuous/allmulticast mode will fail. 579 * The hns3 VF device can be confiruged as trusted device by hns3 PF 580 * kernel ethdev driver on the host by the following command: 581 * "ip link set <eth num> vf <vf id> turst on" 582 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD 583 * driver can receive the ingress and outgoing traffic. In the words, 584 * all the ingress packets, all the packets sent from the PF and 585 * other VFs on the same physical port. 586 * 3. Note: Because of the hardware constraints, By default vlan filter 587 * is enabled and couldn't be turned off based on VF device, so vlan 588 * filter is still effective even in promiscuous mode. If upper 589 * applications don't call rte_eth_dev_vlan_filter API function to 590 * set vlan based on VF device, hns3 VF PMD driver will can't receive 591 * the packets with vlan tag in promiscuoue mode. 592 */ 593 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); 594 req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; 595 req->msg[1] = en_bc_pmc ? 1 : 0; 596 req->msg[2] = en_uc_pmc ? 1 : 0; 597 req->msg[3] = en_mc_pmc ? 1 : 0; 598 req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; 599 600 ret = hns3_cmd_send(hw, &desc, 1); 601 if (ret) 602 hns3_err(hw, "Set promisc mode fail, ret = %d", ret); 603 604 return ret; 605 } 606 607 static int 608 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev) 609 { 610 struct hns3_adapter *hns = dev->data->dev_private; 611 struct hns3_hw *hw = &hns->hw; 612 int ret; 613 614 ret = hns3vf_set_promisc_mode(hw, true, true, true); 615 if (ret) 616 hns3_err(hw, "Failed to enable promiscuous mode, ret = %d", 617 ret); 618 return ret; 619 } 620 621 static int 622 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev) 623 { 624 bool allmulti = dev->data->all_multicast ? true : false; 625 struct hns3_adapter *hns = dev->data->dev_private; 626 struct hns3_hw *hw = &hns->hw; 627 int ret; 628 629 ret = hns3vf_set_promisc_mode(hw, true, false, allmulti); 630 if (ret) 631 hns3_err(hw, "Failed to disable promiscuous mode, ret = %d", 632 ret); 633 return ret; 634 } 635 636 static int 637 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev) 638 { 639 struct hns3_adapter *hns = dev->data->dev_private; 640 struct hns3_hw *hw = &hns->hw; 641 int ret; 642 643 if (dev->data->promiscuous) 644 return 0; 645 646 ret = hns3vf_set_promisc_mode(hw, true, false, true); 647 if (ret) 648 hns3_err(hw, "Failed to enable allmulticast mode, ret = %d", 649 ret); 650 return ret; 651 } 652 653 static int 654 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev) 655 { 656 struct hns3_adapter *hns = dev->data->dev_private; 657 struct hns3_hw *hw = &hns->hw; 658 int ret; 659 660 if (dev->data->promiscuous) 661 return 0; 662 663 ret = hns3vf_set_promisc_mode(hw, true, false, false); 664 if (ret) 665 hns3_err(hw, "Failed to disable allmulticast mode, ret = %d", 666 ret); 667 return ret; 668 } 669 670 static int 671 hns3vf_restore_promisc(struct hns3_adapter *hns) 672 { 673 struct hns3_hw *hw = &hns->hw; 674 bool allmulti = hw->data->all_multicast ? true : false; 675 676 if (hw->data->promiscuous) 677 return hns3vf_set_promisc_mode(hw, true, true, true); 678 679 return hns3vf_set_promisc_mode(hw, true, false, allmulti); 680 } 681 682 static int 683 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, 684 bool mmap, enum hns3_ring_type queue_type, 685 uint16_t queue_id) 686 { 687 struct hns3_vf_bind_vector_msg bind_msg; 688 const char *op_str; 689 uint16_t code; 690 int ret; 691 692 memset(&bind_msg, 0, sizeof(bind_msg)); 693 code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : 694 HNS3_MBX_UNMAP_RING_TO_VECTOR; 695 bind_msg.vector_id = vector_id; 696 697 if (queue_type == HNS3_RING_TYPE_RX) 698 bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; 699 else 700 bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX; 701 702 bind_msg.param[0].ring_type = queue_type; 703 bind_msg.ring_num = 1; 704 bind_msg.param[0].tqp_index = queue_id; 705 op_str = mmap ? "Map" : "Unmap"; 706 ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, 707 sizeof(bind_msg), false, NULL, 0); 708 if (ret) 709 hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", 710 op_str, queue_id, bind_msg.vector_id, ret); 711 712 return ret; 713 } 714 715 static int 716 hns3vf_init_ring_with_vector(struct hns3_hw *hw) 717 { 718 uint16_t vec; 719 int ret; 720 int i; 721 722 /* 723 * In hns3 network engine, vector 0 is always the misc interrupt of this 724 * function, vector 1~N can be used respectively for the queues of the 725 * function. Tx and Rx queues with the same number share the interrupt 726 * vector. In the initialization clearing the all hardware mapping 727 * relationship configurations between queues and interrupt vectors is 728 * needed, so some error caused by the residual configurations, such as 729 * the unexpected Tx interrupt, can be avoid. 730 */ 731 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 732 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 733 vec = vec - 1; /* the last interrupt is reserved */ 734 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 735 for (i = 0; i < hw->intr_tqps_num; i++) { 736 /* 737 * Set gap limiter/rate limiter/quanity limiter algorithm 738 * configuration for interrupt coalesce of queue's interrupt. 739 */ 740 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 741 HNS3_TQP_INTR_GL_DEFAULT); 742 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 743 HNS3_TQP_INTR_GL_DEFAULT); 744 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 745 /* 746 * QL(quantity limiter) is not used currently, just set 0 to 747 * close it. 748 */ 749 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 750 751 ret = hns3vf_bind_ring_with_vector(hw, vec, false, 752 HNS3_RING_TYPE_TX, i); 753 if (ret) { 754 PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with " 755 "vector: %u, ret=%d", i, vec, ret); 756 return ret; 757 } 758 759 ret = hns3vf_bind_ring_with_vector(hw, vec, false, 760 HNS3_RING_TYPE_RX, i); 761 if (ret) { 762 PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with " 763 "vector: %u, ret=%d", i, vec, ret); 764 return ret; 765 } 766 } 767 768 return 0; 769 } 770 771 static int 772 hns3vf_dev_configure(struct rte_eth_dev *dev) 773 { 774 struct hns3_adapter *hns = dev->data->dev_private; 775 struct hns3_hw *hw = &hns->hw; 776 struct rte_eth_conf *conf = &dev->data->dev_conf; 777 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 778 uint16_t nb_rx_q = dev->data->nb_rx_queues; 779 uint16_t nb_tx_q = dev->data->nb_tx_queues; 780 struct rte_eth_rss_conf rss_conf; 781 uint32_t max_rx_pkt_len; 782 uint16_t mtu; 783 bool gro_en; 784 int ret; 785 786 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 787 788 /* 789 * Some versions of hardware network engine does not support 790 * individually enable/disable/reset the Tx or Rx queue. These devices 791 * must enable/disable/reset Tx and Rx queues at the same time. When the 792 * numbers of Tx queues allocated by upper applications are not equal to 793 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 794 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 795 * work as usual. But these fake queues are imperceptible, and can not 796 * be used by upper applications. 797 */ 798 if (!hns3_dev_indep_txrx_supported(hw)) { 799 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 800 if (ret) { 801 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", 802 ret); 803 return ret; 804 } 805 } 806 807 hw->adapter_state = HNS3_NIC_CONFIGURING; 808 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 809 hns3_err(hw, "setting link speed/duplex not supported"); 810 ret = -EINVAL; 811 goto cfg_err; 812 } 813 814 /* When RSS is not configured, redirect the packet queue 0 */ 815 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { 816 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 817 hw->rss_dis_flag = false; 818 rss_conf = conf->rx_adv_conf.rss_conf; 819 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 820 if (ret) 821 goto cfg_err; 822 } 823 824 /* 825 * If jumbo frames are enabled, MTU needs to be refreshed 826 * according to the maximum RX packet length. 827 */ 828 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 829 max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; 830 if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || 831 max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { 832 hns3_err(hw, "maximum Rx packet length must be greater " 833 "than %u and less than %u when jumbo frame enabled.", 834 (uint16_t)HNS3_DEFAULT_FRAME_LEN, 835 (uint16_t)HNS3_MAX_FRAME_LEN); 836 ret = -EINVAL; 837 goto cfg_err; 838 } 839 840 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); 841 ret = hns3vf_dev_mtu_set(dev, mtu); 842 if (ret) 843 goto cfg_err; 844 dev->data->mtu = mtu; 845 } 846 847 ret = hns3vf_dev_configure_vlan(dev); 848 if (ret) 849 goto cfg_err; 850 851 /* config hardware GRO */ 852 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; 853 ret = hns3_config_gro(hw, gro_en); 854 if (ret) 855 goto cfg_err; 856 857 hns->rx_simple_allowed = true; 858 hns->rx_vec_allowed = true; 859 hns->tx_simple_allowed = true; 860 hns->tx_vec_allowed = true; 861 862 hns3_init_rx_ptype_tble(dev); 863 864 hw->adapter_state = HNS3_NIC_CONFIGURED; 865 return 0; 866 867 cfg_err: 868 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 869 hw->adapter_state = HNS3_NIC_INITIALIZED; 870 871 return ret; 872 } 873 874 static int 875 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu) 876 { 877 int ret; 878 879 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu, 880 sizeof(mtu), true, NULL, 0); 881 if (ret) 882 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret); 883 884 return ret; 885 } 886 887 static int 888 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 889 { 890 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 891 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 892 int ret; 893 894 /* 895 * The hns3 PF/VF devices on the same port share the hardware MTU 896 * configuration. Currently, we send mailbox to inform hns3 PF kernel 897 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD 898 * driver, there is no need to stop the port for hns3 VF device, and the 899 * MTU value issued by hns3 VF PMD driver must be less than or equal to 900 * PF's MTU. 901 */ 902 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { 903 hns3_err(hw, "Failed to set mtu during resetting"); 904 return -EIO; 905 } 906 907 /* 908 * when Rx of scattered packets is off, we have some possibility of 909 * using vector Rx process function or simple Rx functions in hns3 PMD 910 * driver. If the input MTU is increased and the maximum length of 911 * received packets is greater than the length of a buffer for Rx 912 * packet, the hardware network engine needs to use multiple BDs and 913 * buffers to store these packets. This will cause problems when still 914 * using vector Rx process function or simple Rx function to receiving 915 * packets. So, when Rx of scattered packets is off and device is 916 * started, it is not permitted to increase MTU so that the maximum 917 * length of Rx packets is greater than Rx buffer length. 918 */ 919 if (dev->data->dev_started && !dev->data->scattered_rx && 920 frame_size > hw->rx_buf_len) { 921 hns3_err(hw, "failed to set mtu because current is " 922 "not scattered rx mode"); 923 return -EOPNOTSUPP; 924 } 925 926 rte_spinlock_lock(&hw->lock); 927 ret = hns3vf_config_mtu(hw, mtu); 928 if (ret) { 929 rte_spinlock_unlock(&hw->lock); 930 return ret; 931 } 932 if (mtu > RTE_ETHER_MTU) 933 dev->data->dev_conf.rxmode.offloads |= 934 DEV_RX_OFFLOAD_JUMBO_FRAME; 935 else 936 dev->data->dev_conf.rxmode.offloads &= 937 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 938 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 939 rte_spinlock_unlock(&hw->lock); 940 941 return 0; 942 } 943 944 static int 945 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 946 { 947 struct hns3_adapter *hns = eth_dev->data->dev_private; 948 struct hns3_hw *hw = &hns->hw; 949 uint16_t q_num = hw->tqps_num; 950 951 /* 952 * In interrupt mode, 'max_rx_queues' is set based on the number of 953 * MSI-X interrupt resources of the hardware. 954 */ 955 if (hw->data->dev_conf.intr_conf.rxq == 1) 956 q_num = hw->intr_tqps_num; 957 958 info->max_rx_queues = q_num; 959 info->max_tx_queues = hw->tqps_num; 960 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 961 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 962 info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM; 963 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 964 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 965 966 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 967 DEV_RX_OFFLOAD_UDP_CKSUM | 968 DEV_RX_OFFLOAD_TCP_CKSUM | 969 DEV_RX_OFFLOAD_SCTP_CKSUM | 970 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 971 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 972 DEV_RX_OFFLOAD_SCATTER | 973 DEV_RX_OFFLOAD_VLAN_STRIP | 974 DEV_RX_OFFLOAD_VLAN_FILTER | 975 DEV_RX_OFFLOAD_JUMBO_FRAME | 976 DEV_RX_OFFLOAD_RSS_HASH | 977 DEV_RX_OFFLOAD_TCP_LRO); 978 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 979 DEV_TX_OFFLOAD_IPV4_CKSUM | 980 DEV_TX_OFFLOAD_TCP_CKSUM | 981 DEV_TX_OFFLOAD_UDP_CKSUM | 982 DEV_TX_OFFLOAD_SCTP_CKSUM | 983 DEV_TX_OFFLOAD_MULTI_SEGS | 984 DEV_TX_OFFLOAD_TCP_TSO | 985 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 986 DEV_TX_OFFLOAD_GRE_TNL_TSO | 987 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | 988 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 989 hns3_txvlan_cap_get(hw)); 990 991 if (hns3_dev_outer_udp_cksum_supported(hw)) 992 info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 993 994 if (hns3_dev_indep_txrx_supported(hw)) 995 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 996 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 997 998 info->rx_desc_lim = (struct rte_eth_desc_lim) { 999 .nb_max = HNS3_MAX_RING_DESC, 1000 .nb_min = HNS3_MIN_RING_DESC, 1001 .nb_align = HNS3_ALIGN_RING_DESC, 1002 }; 1003 1004 info->tx_desc_lim = (struct rte_eth_desc_lim) { 1005 .nb_max = HNS3_MAX_RING_DESC, 1006 .nb_min = HNS3_MIN_RING_DESC, 1007 .nb_align = HNS3_ALIGN_RING_DESC, 1008 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 1009 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 1010 }; 1011 1012 info->default_rxconf = (struct rte_eth_rxconf) { 1013 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 1014 /* 1015 * If there are no available Rx buffer descriptors, incoming 1016 * packets are always dropped by hardware based on hns3 network 1017 * engine. 1018 */ 1019 .rx_drop_en = 1, 1020 .offloads = 0, 1021 }; 1022 info->default_txconf = (struct rte_eth_txconf) { 1023 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 1024 .offloads = 0, 1025 }; 1026 1027 info->vmdq_queue_num = 0; 1028 1029 info->reta_size = hw->rss_ind_tbl_size; 1030 info->hash_key_size = HNS3_RSS_KEY_SIZE; 1031 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 1032 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 1033 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 1034 1035 return 0; 1036 } 1037 1038 static void 1039 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr) 1040 { 1041 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 1042 } 1043 1044 static void 1045 hns3vf_disable_irq0(struct hns3_hw *hw) 1046 { 1047 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 1048 } 1049 1050 static void 1051 hns3vf_enable_irq0(struct hns3_hw *hw) 1052 { 1053 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 1054 } 1055 1056 static enum hns3vf_evt_cause 1057 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 1058 { 1059 struct hns3_hw *hw = &hns->hw; 1060 enum hns3vf_evt_cause ret; 1061 uint32_t cmdq_stat_reg; 1062 uint32_t rst_ing_reg; 1063 uint32_t val; 1064 1065 /* Fetch the events from their corresponding regs */ 1066 cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); 1067 1068 if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1069 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING); 1070 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg); 1071 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); 1072 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 1073 val = hns3_read_dev(hw, HNS3_VF_RST_ING); 1074 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); 1075 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); 1076 if (clearval) { 1077 hw->reset.stats.global_cnt++; 1078 hns3_warn(hw, "Global reset detected, clear reset status"); 1079 } else { 1080 hns3_schedule_delayed_reset(hns); 1081 hns3_warn(hw, "Global reset detected, don't clear reset status"); 1082 } 1083 1084 ret = HNS3VF_VECTOR0_EVENT_RST; 1085 goto out; 1086 } 1087 1088 /* Check for vector0 mailbox(=CMDQ RX) event source */ 1089 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 1090 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 1091 ret = HNS3VF_VECTOR0_EVENT_MBX; 1092 goto out; 1093 } 1094 1095 val = 0; 1096 ret = HNS3VF_VECTOR0_EVENT_OTHER; 1097 out: 1098 if (clearval) 1099 *clearval = val; 1100 return ret; 1101 } 1102 1103 static void 1104 hns3vf_interrupt_handler(void *param) 1105 { 1106 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1107 struct hns3_adapter *hns = dev->data->dev_private; 1108 struct hns3_hw *hw = &hns->hw; 1109 enum hns3vf_evt_cause event_cause; 1110 uint32_t clearval; 1111 1112 if (hw->irq_thread_id == 0) 1113 hw->irq_thread_id = pthread_self(); 1114 1115 /* Disable interrupt */ 1116 hns3vf_disable_irq0(hw); 1117 1118 /* Read out interrupt causes */ 1119 event_cause = hns3vf_check_event_cause(hns, &clearval); 1120 1121 switch (event_cause) { 1122 case HNS3VF_VECTOR0_EVENT_RST: 1123 hns3_schedule_reset(hns); 1124 break; 1125 case HNS3VF_VECTOR0_EVENT_MBX: 1126 hns3_dev_handle_mbx_msg(hw); 1127 break; 1128 default: 1129 break; 1130 } 1131 1132 /* Clear interrupt causes */ 1133 hns3vf_clear_event_cause(hw, clearval); 1134 1135 /* Enable interrupt */ 1136 hns3vf_enable_irq0(hw); 1137 } 1138 1139 static void 1140 hns3vf_set_default_dev_specifications(struct hns3_hw *hw) 1141 { 1142 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 1143 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 1144 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 1145 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 1146 } 1147 1148 static void 1149 hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 1150 { 1151 struct hns3_dev_specs_0_cmd *req0; 1152 1153 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 1154 1155 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 1156 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 1157 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 1158 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 1159 } 1160 1161 static int 1162 hns3vf_check_dev_specifications(struct hns3_hw *hw) 1163 { 1164 if (hw->rss_ind_tbl_size == 0 || 1165 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 1166 hns3_warn(hw, "the size of hash lookup table configured (%u)" 1167 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 1168 HNS3_RSS_IND_TBL_SIZE_MAX); 1169 return -EINVAL; 1170 } 1171 1172 return 0; 1173 } 1174 1175 static int 1176 hns3vf_query_dev_specifications(struct hns3_hw *hw) 1177 { 1178 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 1179 int ret; 1180 int i; 1181 1182 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 1183 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 1184 true); 1185 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1186 } 1187 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 1188 1189 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 1190 if (ret) 1191 return ret; 1192 1193 hns3vf_parse_dev_specifications(hw, desc); 1194 1195 return hns3vf_check_dev_specifications(hw); 1196 } 1197 1198 static int 1199 hns3vf_get_capability(struct hns3_hw *hw) 1200 { 1201 struct rte_pci_device *pci_dev; 1202 struct rte_eth_dev *eth_dev; 1203 uint8_t revision; 1204 int ret; 1205 1206 eth_dev = &rte_eth_devices[hw->data->port_id]; 1207 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1208 1209 /* Get PCI revision id */ 1210 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 1211 HNS3_PCI_REVISION_ID); 1212 if (ret != HNS3_PCI_REVISION_ID_LEN) { 1213 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 1214 ret); 1215 return -EIO; 1216 } 1217 hw->revision = revision; 1218 1219 if (revision < PCI_REVISION_ID_HIP09_A) { 1220 hns3vf_set_default_dev_specifications(hw); 1221 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 1222 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 1223 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 1224 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 1225 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 1226 hw->rss_info.ipv6_sctp_offload_supported = false; 1227 hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE; 1228 return 0; 1229 } 1230 1231 ret = hns3vf_query_dev_specifications(hw); 1232 if (ret) { 1233 PMD_INIT_LOG(ERR, 1234 "failed to query dev specifications, ret = %d", 1235 ret); 1236 return ret; 1237 } 1238 1239 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 1240 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 1241 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 1242 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 1243 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 1244 hw->rss_info.ipv6_sctp_offload_supported = true; 1245 hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; 1246 1247 return 0; 1248 } 1249 1250 static int 1251 hns3vf_check_tqp_info(struct hns3_hw *hw) 1252 { 1253 if (hw->tqps_num == 0) { 1254 PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF."); 1255 return -EINVAL; 1256 } 1257 1258 if (hw->rss_size_max == 0) { 1259 PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF."); 1260 return -EINVAL; 1261 } 1262 1263 hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num); 1264 1265 return 0; 1266 } 1267 1268 static int 1269 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw) 1270 { 1271 uint8_t resp_msg; 1272 int ret; 1273 1274 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, 1275 HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0, 1276 true, &resp_msg, sizeof(resp_msg)); 1277 if (ret) { 1278 if (ret == -ETIME) { 1279 /* 1280 * Getting current port based VLAN state from PF driver 1281 * will not affect VF driver's basic function. Because 1282 * the VF driver relies on hns3 PF kernel ether driver, 1283 * to avoid introducing compatibility issues with older 1284 * version of PF driver, no failure will be returned 1285 * when the return value is ETIME. This return value has 1286 * the following scenarios: 1287 * 1) Firmware didn't return the results in time 1288 * 2) the result return by firmware is timeout 1289 * 3) the older version of kernel side PF driver does 1290 * not support this mailbox message. 1291 * For scenarios 1 and 2, it is most likely that a 1292 * hardware error has occurred, or a hardware reset has 1293 * occurred. In this case, these errors will be caught 1294 * by other functions. 1295 */ 1296 PMD_INIT_LOG(WARNING, 1297 "failed to get PVID state for timeout, maybe " 1298 "kernel side PF driver doesn't support this " 1299 "mailbox message, or firmware didn't respond."); 1300 resp_msg = HNS3_PORT_BASE_VLAN_DISABLE; 1301 } else { 1302 PMD_INIT_LOG(ERR, "failed to get port based VLAN state," 1303 " ret = %d", ret); 1304 return ret; 1305 } 1306 } 1307 hw->port_base_vlan_cfg.state = resp_msg ? 1308 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; 1309 return 0; 1310 } 1311 1312 static int 1313 hns3vf_get_queue_info(struct hns3_hw *hw) 1314 { 1315 #define HNS3VF_TQPS_RSS_INFO_LEN 6 1316 uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN]; 1317 int ret; 1318 1319 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true, 1320 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); 1321 if (ret) { 1322 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret); 1323 return ret; 1324 } 1325 1326 memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t)); 1327 memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t)); 1328 1329 return hns3vf_check_tqp_info(hw); 1330 } 1331 1332 static int 1333 hns3vf_get_queue_depth(struct hns3_hw *hw) 1334 { 1335 #define HNS3VF_TQPS_DEPTH_INFO_LEN 4 1336 uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN]; 1337 int ret; 1338 1339 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true, 1340 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN); 1341 if (ret) { 1342 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d", 1343 ret); 1344 return ret; 1345 } 1346 1347 memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t)); 1348 memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t)); 1349 1350 return 0; 1351 } 1352 1353 static int 1354 hns3vf_get_tc_info(struct hns3_hw *hw) 1355 { 1356 uint8_t resp_msg; 1357 int ret; 1358 uint32_t i; 1359 1360 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0, 1361 true, &resp_msg, sizeof(resp_msg)); 1362 if (ret) { 1363 hns3_err(hw, "VF request to get TC info from PF failed %d", 1364 ret); 1365 return ret; 1366 } 1367 1368 hw->hw_tc_map = resp_msg; 1369 1370 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 1371 if (hw->hw_tc_map & BIT(i)) 1372 hw->num_tc++; 1373 } 1374 1375 return 0; 1376 } 1377 1378 static int 1379 hns3vf_get_host_mac_addr(struct hns3_hw *hw) 1380 { 1381 uint8_t host_mac[RTE_ETHER_ADDR_LEN]; 1382 int ret; 1383 1384 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0, 1385 true, host_mac, RTE_ETHER_ADDR_LEN); 1386 if (ret) { 1387 hns3_err(hw, "Failed to get mac addr from PF: %d", ret); 1388 return ret; 1389 } 1390 1391 memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN); 1392 1393 return 0; 1394 } 1395 1396 static int 1397 hns3vf_get_configuration(struct hns3_hw *hw) 1398 { 1399 int ret; 1400 1401 hw->mac.media_type = HNS3_MEDIA_TYPE_NONE; 1402 hw->rss_dis_flag = false; 1403 1404 /* Get device capability */ 1405 ret = hns3vf_get_capability(hw); 1406 if (ret) { 1407 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 1408 return ret; 1409 } 1410 1411 /* Get queue configuration from PF */ 1412 ret = hns3vf_get_queue_info(hw); 1413 if (ret) 1414 return ret; 1415 1416 /* Get queue depth info from PF */ 1417 ret = hns3vf_get_queue_depth(hw); 1418 if (ret) 1419 return ret; 1420 1421 /* Get user defined VF MAC addr from PF */ 1422 ret = hns3vf_get_host_mac_addr(hw); 1423 if (ret) 1424 return ret; 1425 1426 ret = hns3vf_get_port_base_vlan_filter_state(hw); 1427 if (ret) 1428 return ret; 1429 1430 /* Get tc configuration from PF */ 1431 return hns3vf_get_tc_info(hw); 1432 } 1433 1434 static int 1435 hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q, 1436 uint16_t nb_tx_q) 1437 { 1438 struct hns3_hw *hw = &hns->hw; 1439 1440 if (nb_rx_q < hw->num_tc) { 1441 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).", 1442 nb_rx_q, hw->num_tc); 1443 return -EINVAL; 1444 } 1445 1446 if (nb_tx_q < hw->num_tc) { 1447 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).", 1448 nb_tx_q, hw->num_tc); 1449 return -EINVAL; 1450 } 1451 1452 return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q); 1453 } 1454 1455 static void 1456 hns3vf_request_link_info(struct hns3_hw *hw) 1457 { 1458 uint8_t resp_msg; 1459 int ret; 1460 1461 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 1462 return; 1463 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, 1464 &resp_msg, sizeof(resp_msg)); 1465 if (ret) 1466 hns3_err(hw, "Failed to fetch link status from PF: %d", ret); 1467 } 1468 1469 void 1470 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, 1471 uint32_t link_speed, uint8_t link_duplex) 1472 { 1473 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 1474 struct hns3_mac *mac = &hw->mac; 1475 bool report_lse; 1476 bool changed; 1477 1478 changed = mac->link_status != link_status || 1479 mac->link_speed != link_speed || 1480 mac->link_duplex != link_duplex; 1481 if (!changed) 1482 return; 1483 1484 /* 1485 * VF's link status/speed/duplex were updated by polling from PF driver, 1486 * because the link status/speed/duplex may be changed in the polling 1487 * interval, so driver will report lse (lsc event) once any of the above 1488 * thress variables changed. 1489 * But if the PF's link status is down and driver saved link status is 1490 * also down, there are no need to report lse. 1491 */ 1492 report_lse = true; 1493 if (link_status == ETH_LINK_DOWN && link_status == mac->link_status) 1494 report_lse = false; 1495 1496 mac->link_status = link_status; 1497 mac->link_speed = link_speed; 1498 mac->link_duplex = link_duplex; 1499 1500 if (report_lse) 1501 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1502 } 1503 1504 static int 1505 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 1506 { 1507 #define HNS3VF_VLAN_MBX_MSG_LEN 5 1508 struct hns3_hw *hw = &hns->hw; 1509 uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN]; 1510 uint16_t proto = htons(RTE_ETHER_TYPE_VLAN); 1511 uint8_t is_kill = on ? 0 : 1; 1512 1513 msg_data[0] = is_kill; 1514 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1515 memcpy(&msg_data[3], &proto, sizeof(proto)); 1516 1517 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER, 1518 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL, 1519 0); 1520 } 1521 1522 static int 1523 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1524 { 1525 struct hns3_adapter *hns = dev->data->dev_private; 1526 struct hns3_hw *hw = &hns->hw; 1527 int ret; 1528 1529 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { 1530 hns3_err(hw, 1531 "vf set vlan id failed during resetting, vlan_id =%u", 1532 vlan_id); 1533 return -EIO; 1534 } 1535 rte_spinlock_lock(&hw->lock); 1536 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on); 1537 rte_spinlock_unlock(&hw->lock); 1538 if (ret) 1539 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d", 1540 vlan_id, ret); 1541 1542 return ret; 1543 } 1544 1545 static int 1546 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) 1547 { 1548 uint8_t msg_data; 1549 int ret; 1550 1551 msg_data = enable ? 1 : 0; 1552 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, 1553 &msg_data, sizeof(msg_data), false, NULL, 0); 1554 if (ret) 1555 hns3_err(hw, "vf enable strip failed, ret =%d", ret); 1556 1557 return ret; 1558 } 1559 1560 static int 1561 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1562 { 1563 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1564 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 1565 unsigned int tmp_mask; 1566 int ret = 0; 1567 1568 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { 1569 hns3_err(hw, "vf set vlan offload failed during resetting, " 1570 "mask = 0x%x", mask); 1571 return -EIO; 1572 } 1573 1574 tmp_mask = (unsigned int)mask; 1575 /* Vlan stripping setting */ 1576 if (tmp_mask & ETH_VLAN_STRIP_MASK) { 1577 rte_spinlock_lock(&hw->lock); 1578 /* Enable or disable VLAN stripping */ 1579 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1580 ret = hns3vf_en_hw_strip_rxvtag(hw, true); 1581 else 1582 ret = hns3vf_en_hw_strip_rxvtag(hw, false); 1583 rte_spinlock_unlock(&hw->lock); 1584 } 1585 1586 return ret; 1587 } 1588 1589 static int 1590 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on) 1591 { 1592 struct rte_vlan_filter_conf *vfc; 1593 struct hns3_hw *hw = &hns->hw; 1594 uint16_t vlan_id; 1595 uint64_t vbit; 1596 uint64_t ids; 1597 int ret = 0; 1598 uint32_t i; 1599 1600 vfc = &hw->data->vlan_filter_conf; 1601 for (i = 0; i < RTE_DIM(vfc->ids); i++) { 1602 if (vfc->ids[i] == 0) 1603 continue; 1604 ids = vfc->ids[i]; 1605 while (ids) { 1606 /* 1607 * 64 means the num bits of ids, one bit corresponds to 1608 * one vlan id 1609 */ 1610 vlan_id = 64 * i; 1611 /* count trailing zeroes */ 1612 vbit = ~ids & (ids - 1); 1613 /* clear least significant bit set */ 1614 ids ^= (ids ^ (ids - 1)) ^ vbit; 1615 for (; vbit;) { 1616 vbit >>= 1; 1617 vlan_id++; 1618 } 1619 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on); 1620 if (ret) { 1621 hns3_err(hw, 1622 "VF handle vlan table failed, ret =%d, on = %d", 1623 ret, on); 1624 return ret; 1625 } 1626 } 1627 } 1628 1629 return ret; 1630 } 1631 1632 static int 1633 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns) 1634 { 1635 return hns3vf_handle_all_vlan_table(hns, 0); 1636 } 1637 1638 static int 1639 hns3vf_restore_vlan_conf(struct hns3_adapter *hns) 1640 { 1641 struct hns3_hw *hw = &hns->hw; 1642 struct rte_eth_conf *dev_conf; 1643 bool en; 1644 int ret; 1645 1646 dev_conf = &hw->data->dev_conf; 1647 en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true 1648 : false; 1649 ret = hns3vf_en_hw_strip_rxvtag(hw, en); 1650 if (ret) 1651 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en, 1652 ret); 1653 return ret; 1654 } 1655 1656 static int 1657 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) 1658 { 1659 struct hns3_adapter *hns = dev->data->dev_private; 1660 struct rte_eth_dev_data *data = dev->data; 1661 struct hns3_hw *hw = &hns->hw; 1662 int ret; 1663 1664 if (data->dev_conf.txmode.hw_vlan_reject_tagged || 1665 data->dev_conf.txmode.hw_vlan_reject_untagged || 1666 data->dev_conf.txmode.hw_vlan_insert_pvid) { 1667 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged " 1668 "or hw_vlan_insert_pvid is not support!"); 1669 } 1670 1671 /* Apply vlan offload setting */ 1672 ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); 1673 if (ret) 1674 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret); 1675 1676 return ret; 1677 } 1678 1679 static int 1680 hns3vf_set_alive(struct hns3_hw *hw, bool alive) 1681 { 1682 uint8_t msg_data; 1683 1684 msg_data = alive ? 1 : 0; 1685 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data, 1686 sizeof(msg_data), false, NULL, 0); 1687 } 1688 1689 static void 1690 hns3vf_keep_alive_handler(void *param) 1691 { 1692 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1693 struct hns3_adapter *hns = eth_dev->data->dev_private; 1694 struct hns3_hw *hw = &hns->hw; 1695 uint8_t respmsg; 1696 int ret; 1697 1698 ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, 1699 false, &respmsg, sizeof(uint8_t)); 1700 if (ret) 1701 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", 1702 ret); 1703 1704 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, 1705 eth_dev); 1706 } 1707 1708 static void 1709 hns3vf_service_handler(void *param) 1710 { 1711 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1712 struct hns3_adapter *hns = eth_dev->data->dev_private; 1713 struct hns3_hw *hw = &hns->hw; 1714 1715 /* 1716 * The query link status and reset processing are executed in the 1717 * interrupt thread.When the IMP reset occurs, IMP will not respond, 1718 * and the query operation will time out after 30ms. In the case of 1719 * multiple PF/VFs, each query failure timeout causes the IMP reset 1720 * interrupt to fail to respond within 100ms. 1721 * Before querying the link status, check whether there is a reset 1722 * pending, and if so, abandon the query. 1723 */ 1724 if (!hns3vf_is_reset_pending(hns)) 1725 hns3vf_request_link_info(hw); 1726 else 1727 hns3_warn(hw, "Cancel the query when reset is pending"); 1728 1729 rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, 1730 eth_dev); 1731 } 1732 1733 static int 1734 hns3_query_vf_resource(struct hns3_hw *hw) 1735 { 1736 struct hns3_vf_res_cmd *req; 1737 struct hns3_cmd_desc desc; 1738 uint16_t num_msi; 1739 int ret; 1740 1741 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true); 1742 ret = hns3_cmd_send(hw, &desc, 1); 1743 if (ret) { 1744 hns3_err(hw, "query vf resource failed, ret = %d", ret); 1745 return ret; 1746 } 1747 1748 req = (struct hns3_vf_res_cmd *)desc.data; 1749 num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number), 1750 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S); 1751 if (num_msi < HNS3_MIN_VECTOR_NUM) { 1752 hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)", 1753 num_msi, HNS3_MIN_VECTOR_NUM); 1754 return -EINVAL; 1755 } 1756 1757 hw->num_msi = num_msi; 1758 1759 return 0; 1760 } 1761 1762 static int 1763 hns3vf_init_hardware(struct hns3_adapter *hns) 1764 { 1765 struct hns3_hw *hw = &hns->hw; 1766 uint16_t mtu = hw->data->mtu; 1767 int ret; 1768 1769 ret = hns3vf_set_promisc_mode(hw, true, false, false); 1770 if (ret) 1771 return ret; 1772 1773 ret = hns3vf_config_mtu(hw, mtu); 1774 if (ret) 1775 goto err_init_hardware; 1776 1777 ret = hns3vf_vlan_filter_configure(hns, 0, 1); 1778 if (ret) { 1779 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret); 1780 goto err_init_hardware; 1781 } 1782 1783 ret = hns3_config_gro(hw, false); 1784 if (ret) { 1785 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 1786 goto err_init_hardware; 1787 } 1788 1789 /* 1790 * In the initialization clearing the all hardware mapping relationship 1791 * configurations between queues and interrupt vectors is needed, so 1792 * some error caused by the residual configurations, such as the 1793 * unexpected interrupt, can be avoid. 1794 */ 1795 ret = hns3vf_init_ring_with_vector(hw); 1796 if (ret) { 1797 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 1798 goto err_init_hardware; 1799 } 1800 1801 ret = hns3vf_set_alive(hw, true); 1802 if (ret) { 1803 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret); 1804 goto err_init_hardware; 1805 } 1806 1807 return 0; 1808 1809 err_init_hardware: 1810 (void)hns3vf_set_promisc_mode(hw, false, false, false); 1811 return ret; 1812 } 1813 1814 static int 1815 hns3vf_clear_vport_list(struct hns3_hw *hw) 1816 { 1817 return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, 1818 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, 1819 NULL, 0); 1820 } 1821 1822 static int 1823 hns3vf_init_vf(struct rte_eth_dev *eth_dev) 1824 { 1825 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1826 struct hns3_adapter *hns = eth_dev->data->dev_private; 1827 struct hns3_hw *hw = &hns->hw; 1828 int ret; 1829 1830 PMD_INIT_FUNC_TRACE(); 1831 1832 /* Get hardware io base address from pcie BAR2 IO space */ 1833 hw->io_base = pci_dev->mem_resource[2].addr; 1834 1835 /* Firmware command queue initialize */ 1836 ret = hns3_cmd_init_queue(hw); 1837 if (ret) { 1838 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 1839 goto err_cmd_init_queue; 1840 } 1841 1842 /* Firmware command initialize */ 1843 ret = hns3_cmd_init(hw); 1844 if (ret) { 1845 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 1846 goto err_cmd_init; 1847 } 1848 1849 /* Get VF resource */ 1850 ret = hns3_query_vf_resource(hw); 1851 if (ret) 1852 goto err_cmd_init; 1853 1854 rte_spinlock_init(&hw->mbx_resp.lock); 1855 1856 hns3vf_clear_event_cause(hw, 0); 1857 1858 ret = rte_intr_callback_register(&pci_dev->intr_handle, 1859 hns3vf_interrupt_handler, eth_dev); 1860 if (ret) { 1861 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 1862 goto err_intr_callback_register; 1863 } 1864 1865 /* Enable interrupt */ 1866 rte_intr_enable(&pci_dev->intr_handle); 1867 hns3vf_enable_irq0(hw); 1868 1869 /* Get configuration from PF */ 1870 ret = hns3vf_get_configuration(hw); 1871 if (ret) { 1872 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 1873 goto err_get_config; 1874 } 1875 1876 ret = hns3_tqp_stats_init(hw); 1877 if (ret) 1878 goto err_get_config; 1879 1880 /* Hardware statistics of imissed registers cleared. */ 1881 ret = hns3_update_imissed_stats(hw, true); 1882 if (ret) { 1883 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 1884 goto err_set_tc_queue; 1885 } 1886 1887 ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num); 1888 if (ret) { 1889 PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret); 1890 goto err_set_tc_queue; 1891 } 1892 1893 ret = hns3vf_clear_vport_list(hw); 1894 if (ret) { 1895 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret); 1896 goto err_set_tc_queue; 1897 } 1898 1899 ret = hns3vf_init_hardware(hns); 1900 if (ret) 1901 goto err_set_tc_queue; 1902 1903 hns3_rss_set_default_args(hw); 1904 1905 return 0; 1906 1907 err_set_tc_queue: 1908 hns3_tqp_stats_uninit(hw); 1909 1910 err_get_config: 1911 hns3vf_disable_irq0(hw); 1912 rte_intr_disable(&pci_dev->intr_handle); 1913 hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, 1914 eth_dev); 1915 err_intr_callback_register: 1916 err_cmd_init: 1917 hns3_cmd_uninit(hw); 1918 hns3_cmd_destroy_queue(hw); 1919 err_cmd_init_queue: 1920 hw->io_base = NULL; 1921 1922 return ret; 1923 } 1924 1925 static void 1926 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) 1927 { 1928 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1929 struct hns3_adapter *hns = eth_dev->data->dev_private; 1930 struct hns3_hw *hw = &hns->hw; 1931 1932 PMD_INIT_FUNC_TRACE(); 1933 1934 hns3_rss_uninit(hns); 1935 (void)hns3_config_gro(hw, false); 1936 (void)hns3vf_set_alive(hw, false); 1937 (void)hns3vf_set_promisc_mode(hw, false, false, false); 1938 hns3_tqp_stats_uninit(hw); 1939 hns3vf_disable_irq0(hw); 1940 rte_intr_disable(&pci_dev->intr_handle); 1941 hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, 1942 eth_dev); 1943 hns3_cmd_uninit(hw); 1944 hns3_cmd_destroy_queue(hw); 1945 hw->io_base = NULL; 1946 } 1947 1948 static int 1949 hns3vf_do_stop(struct hns3_adapter *hns) 1950 { 1951 struct hns3_hw *hw = &hns->hw; 1952 int ret; 1953 1954 hw->mac.link_status = ETH_LINK_DOWN; 1955 1956 /* 1957 * The "hns3vf_do_stop" function will also be called by .stop_service to 1958 * prepare reset. At the time of global or IMP reset, the command cannot 1959 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 1960 * accessed during the reset process. So the mbuf can not be released 1961 * during reset and is required to be released after the reset is 1962 * completed. 1963 */ 1964 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 1965 hns3_dev_release_mbufs(hns); 1966 1967 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 1968 hns3vf_configure_mac_addr(hns, true); 1969 ret = hns3_reset_all_tqps(hns); 1970 if (ret) { 1971 hns3_err(hw, "failed to reset all queues ret = %d", 1972 ret); 1973 return ret; 1974 } 1975 } 1976 return 0; 1977 } 1978 1979 static void 1980 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev) 1981 { 1982 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1983 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1984 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1985 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 1986 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 1987 uint16_t q_id; 1988 1989 if (dev->data->dev_conf.intr_conf.rxq == 0) 1990 return; 1991 1992 /* unmap the ring with vector */ 1993 if (rte_intr_allow_others(intr_handle)) { 1994 vec = RTE_INTR_VEC_RXTX_OFFSET; 1995 base = RTE_INTR_VEC_RXTX_OFFSET; 1996 } 1997 if (rte_intr_dp_is_en(intr_handle)) { 1998 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 1999 (void)hns3vf_bind_ring_with_vector(hw, vec, false, 2000 HNS3_RING_TYPE_RX, 2001 q_id); 2002 if (vec < base + intr_handle->nb_efd - 1) 2003 vec++; 2004 } 2005 } 2006 /* Clean datapath event and queue/vec mapping */ 2007 rte_intr_efd_disable(intr_handle); 2008 if (intr_handle->intr_vec) { 2009 rte_free(intr_handle->intr_vec); 2010 intr_handle->intr_vec = NULL; 2011 } 2012 } 2013 2014 static int 2015 hns3vf_dev_stop(struct rte_eth_dev *dev) 2016 { 2017 struct hns3_adapter *hns = dev->data->dev_private; 2018 struct hns3_hw *hw = &hns->hw; 2019 2020 PMD_INIT_FUNC_TRACE(); 2021 dev->data->dev_started = 0; 2022 2023 hw->adapter_state = HNS3_NIC_STOPPING; 2024 hns3_set_rxtx_function(dev); 2025 rte_wmb(); 2026 /* Disable datapath on secondary process. */ 2027 hns3_mp_req_stop_rxtx(dev); 2028 /* Prevent crashes when queues are still in use. */ 2029 rte_delay_ms(hw->tqps_num); 2030 2031 rte_spinlock_lock(&hw->lock); 2032 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 2033 hns3_stop_tqps(hw); 2034 hns3vf_do_stop(hns); 2035 hns3vf_unmap_rx_interrupt(dev); 2036 hw->adapter_state = HNS3_NIC_CONFIGURED; 2037 } 2038 hns3_rx_scattered_reset(dev); 2039 rte_eal_alarm_cancel(hns3vf_service_handler, dev); 2040 rte_spinlock_unlock(&hw->lock); 2041 2042 return 0; 2043 } 2044 2045 static int 2046 hns3vf_dev_close(struct rte_eth_dev *eth_dev) 2047 { 2048 struct hns3_adapter *hns = eth_dev->data->dev_private; 2049 struct hns3_hw *hw = &hns->hw; 2050 int ret = 0; 2051 2052 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2053 rte_free(eth_dev->process_private); 2054 eth_dev->process_private = NULL; 2055 return 0; 2056 } 2057 2058 if (hw->adapter_state == HNS3_NIC_STARTED) 2059 ret = hns3vf_dev_stop(eth_dev); 2060 2061 hw->adapter_state = HNS3_NIC_CLOSING; 2062 hns3_reset_abort(hns); 2063 hw->adapter_state = HNS3_NIC_CLOSED; 2064 rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev); 2065 hns3vf_configure_all_mc_mac_addr(hns, true); 2066 hns3vf_remove_all_vlan_table(hns); 2067 hns3vf_uninit_vf(eth_dev); 2068 hns3_free_all_queues(eth_dev); 2069 rte_free(hw->reset.wait_data); 2070 rte_free(eth_dev->process_private); 2071 eth_dev->process_private = NULL; 2072 hns3_mp_uninit_primary(); 2073 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 2074 2075 return ret; 2076 } 2077 2078 static int 2079 hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2080 size_t fw_size) 2081 { 2082 struct hns3_adapter *hns = eth_dev->data->dev_private; 2083 struct hns3_hw *hw = &hns->hw; 2084 uint32_t version = hw->fw_version; 2085 int ret; 2086 2087 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2088 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2089 HNS3_FW_VERSION_BYTE3_S), 2090 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2091 HNS3_FW_VERSION_BYTE2_S), 2092 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2093 HNS3_FW_VERSION_BYTE1_S), 2094 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2095 HNS3_FW_VERSION_BYTE0_S)); 2096 ret += 1; /* add the size of '\0' */ 2097 if (fw_size < (uint32_t)ret) 2098 return ret; 2099 else 2100 return 0; 2101 } 2102 2103 static int 2104 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, 2105 __rte_unused int wait_to_complete) 2106 { 2107 struct hns3_adapter *hns = eth_dev->data->dev_private; 2108 struct hns3_hw *hw = &hns->hw; 2109 struct hns3_mac *mac = &hw->mac; 2110 struct rte_eth_link new_link; 2111 2112 memset(&new_link, 0, sizeof(new_link)); 2113 switch (mac->link_speed) { 2114 case ETH_SPEED_NUM_10M: 2115 case ETH_SPEED_NUM_100M: 2116 case ETH_SPEED_NUM_1G: 2117 case ETH_SPEED_NUM_10G: 2118 case ETH_SPEED_NUM_25G: 2119 case ETH_SPEED_NUM_40G: 2120 case ETH_SPEED_NUM_50G: 2121 case ETH_SPEED_NUM_100G: 2122 case ETH_SPEED_NUM_200G: 2123 new_link.link_speed = mac->link_speed; 2124 break; 2125 default: 2126 if (mac->link_status) 2127 new_link.link_speed = ETH_SPEED_NUM_UNKNOWN; 2128 else 2129 new_link.link_speed = ETH_SPEED_NUM_NONE; 2130 break; 2131 } 2132 2133 new_link.link_duplex = mac->link_duplex; 2134 new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 2135 new_link.link_autoneg = 2136 !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); 2137 2138 return rte_eth_linkstatus_set(eth_dev, &new_link); 2139 } 2140 2141 static int 2142 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) 2143 { 2144 struct hns3_hw *hw = &hns->hw; 2145 uint16_t nb_rx_q = hw->data->nb_rx_queues; 2146 uint16_t nb_tx_q = hw->data->nb_tx_queues; 2147 int ret; 2148 2149 ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q); 2150 if (ret) 2151 return ret; 2152 2153 hns3_enable_rxd_adv_layout(hw); 2154 2155 ret = hns3_init_queues(hns, reset_queue); 2156 if (ret) 2157 hns3_err(hw, "failed to init queues, ret = %d.", ret); 2158 2159 return ret; 2160 } 2161 2162 static int 2163 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) 2164 { 2165 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2166 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2167 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2168 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 2169 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 2170 uint32_t intr_vector; 2171 uint16_t q_id; 2172 int ret; 2173 2174 /* 2175 * hns3 needs a separate interrupt to be used as event interrupt which 2176 * could not be shared with task queue pair, so KERNEL drivers need 2177 * support multiple interrupt vectors. 2178 */ 2179 if (dev->data->dev_conf.intr_conf.rxq == 0 || 2180 !rte_intr_cap_multiple(intr_handle)) 2181 return 0; 2182 2183 rte_intr_disable(intr_handle); 2184 intr_vector = hw->used_rx_queues; 2185 /* It creates event fd for each intr vector when MSIX is used */ 2186 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2187 return -EINVAL; 2188 2189 if (intr_handle->intr_vec == NULL) { 2190 intr_handle->intr_vec = 2191 rte_zmalloc("intr_vec", 2192 hw->used_rx_queues * sizeof(int), 0); 2193 if (intr_handle->intr_vec == NULL) { 2194 hns3_err(hw, "Failed to allocate %u rx_queues" 2195 " intr_vec", hw->used_rx_queues); 2196 ret = -ENOMEM; 2197 goto vf_alloc_intr_vec_error; 2198 } 2199 } 2200 2201 if (rte_intr_allow_others(intr_handle)) { 2202 vec = RTE_INTR_VEC_RXTX_OFFSET; 2203 base = RTE_INTR_VEC_RXTX_OFFSET; 2204 } 2205 2206 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 2207 ret = hns3vf_bind_ring_with_vector(hw, vec, true, 2208 HNS3_RING_TYPE_RX, q_id); 2209 if (ret) 2210 goto vf_bind_vector_error; 2211 intr_handle->intr_vec[q_id] = vec; 2212 /* 2213 * If there are not enough efds (e.g. not enough interrupt), 2214 * remaining queues will be bond to the last interrupt. 2215 */ 2216 if (vec < base + intr_handle->nb_efd - 1) 2217 vec++; 2218 } 2219 rte_intr_enable(intr_handle); 2220 return 0; 2221 2222 vf_bind_vector_error: 2223 free(intr_handle->intr_vec); 2224 intr_handle->intr_vec = NULL; 2225 vf_alloc_intr_vec_error: 2226 rte_intr_efd_disable(intr_handle); 2227 return ret; 2228 } 2229 2230 static int 2231 hns3vf_restore_rx_interrupt(struct hns3_hw *hw) 2232 { 2233 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 2234 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2235 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2236 uint16_t q_id; 2237 int ret; 2238 2239 if (dev->data->dev_conf.intr_conf.rxq == 0) 2240 return 0; 2241 2242 if (rte_intr_dp_is_en(intr_handle)) { 2243 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 2244 ret = hns3vf_bind_ring_with_vector(hw, 2245 intr_handle->intr_vec[q_id], true, 2246 HNS3_RING_TYPE_RX, q_id); 2247 if (ret) 2248 return ret; 2249 } 2250 } 2251 2252 return 0; 2253 } 2254 2255 static void 2256 hns3vf_restore_filter(struct rte_eth_dev *dev) 2257 { 2258 hns3_restore_rss_filter(dev); 2259 } 2260 2261 static int 2262 hns3vf_dev_start(struct rte_eth_dev *dev) 2263 { 2264 struct hns3_adapter *hns = dev->data->dev_private; 2265 struct hns3_hw *hw = &hns->hw; 2266 int ret; 2267 2268 PMD_INIT_FUNC_TRACE(); 2269 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 2270 return -EBUSY; 2271 2272 rte_spinlock_lock(&hw->lock); 2273 hw->adapter_state = HNS3_NIC_STARTING; 2274 ret = hns3vf_do_start(hns, true); 2275 if (ret) { 2276 hw->adapter_state = HNS3_NIC_CONFIGURED; 2277 rte_spinlock_unlock(&hw->lock); 2278 return ret; 2279 } 2280 ret = hns3vf_map_rx_interrupt(dev); 2281 if (ret) 2282 goto map_rx_inter_err; 2283 2284 /* 2285 * There are three register used to control the status of a TQP 2286 * (contains a pair of Tx queue and Rx queue) in the new version network 2287 * engine. One is used to control the enabling of Tx queue, the other is 2288 * used to control the enabling of Rx queue, and the last is the master 2289 * switch used to control the enabling of the tqp. The Tx register and 2290 * TQP register must be enabled at the same time to enable a Tx queue. 2291 * The same applies to the Rx queue. For the older network enginem, this 2292 * function only refresh the enabled flag, and it is used to update the 2293 * status of queue in the dpdk framework. 2294 */ 2295 ret = hns3_start_all_txqs(dev); 2296 if (ret) 2297 goto map_rx_inter_err; 2298 2299 ret = hns3_start_all_rxqs(dev); 2300 if (ret) 2301 goto start_all_rxqs_fail; 2302 2303 hw->adapter_state = HNS3_NIC_STARTED; 2304 rte_spinlock_unlock(&hw->lock); 2305 2306 hns3_rx_scattered_calc(dev); 2307 hns3_set_rxtx_function(dev); 2308 hns3_mp_req_start_rxtx(dev); 2309 hns3vf_service_handler(dev); 2310 2311 hns3vf_restore_filter(dev); 2312 2313 /* Enable interrupt of all rx queues before enabling queues */ 2314 hns3_dev_all_rx_queue_intr_enable(hw, true); 2315 2316 /* 2317 * After finished the initialization, start all tqps to receive/transmit 2318 * packets and refresh all queue status. 2319 */ 2320 hns3_start_tqps(hw); 2321 2322 return ret; 2323 2324 start_all_rxqs_fail: 2325 hns3_stop_all_txqs(dev); 2326 map_rx_inter_err: 2327 (void)hns3vf_do_stop(hns); 2328 hw->adapter_state = HNS3_NIC_CONFIGURED; 2329 rte_spinlock_unlock(&hw->lock); 2330 2331 return ret; 2332 } 2333 2334 static bool 2335 is_vf_reset_done(struct hns3_hw *hw) 2336 { 2337 #define HNS3_FUN_RST_ING_BITS \ 2338 (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \ 2339 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \ 2340 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \ 2341 BIT(HNS3_VECTOR0_FUNCRESET_INT_B)) 2342 2343 uint32_t val; 2344 2345 if (hw->reset.level == HNS3_VF_RESET) { 2346 val = hns3_read_dev(hw, HNS3_VF_RST_ING); 2347 if (val & HNS3_VF_RST_ING_BIT) 2348 return false; 2349 } else { 2350 val = hns3_read_dev(hw, HNS3_FUN_RST_ING); 2351 if (val & HNS3_FUN_RST_ING_BITS) 2352 return false; 2353 } 2354 return true; 2355 } 2356 2357 bool 2358 hns3vf_is_reset_pending(struct hns3_adapter *hns) 2359 { 2360 struct hns3_hw *hw = &hns->hw; 2361 enum hns3_reset_level reset; 2362 2363 /* 2364 * According to the protocol of PCIe, FLR to a PF device resets the PF 2365 * state as well as the SR-IOV extended capability including VF Enable 2366 * which means that VFs no longer exist. 2367 * 2368 * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device 2369 * is in FLR stage, the register state of VF device is not reliable, 2370 * so register states detection can not be carried out. In this case, 2371 * we just ignore the register states and return false to indicate that 2372 * there are no other reset states that need to be processed by driver. 2373 */ 2374 if (hw->reset.level == HNS3_VF_FULL_RESET) 2375 return false; 2376 2377 /* Check the registers to confirm whether there is reset pending */ 2378 hns3vf_check_event_cause(hns, NULL); 2379 reset = hns3vf_get_reset_level(hw, &hw->reset.pending); 2380 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { 2381 hns3_warn(hw, "High level reset %d is pending", reset); 2382 return true; 2383 } 2384 return false; 2385 } 2386 2387 static int 2388 hns3vf_wait_hardware_ready(struct hns3_adapter *hns) 2389 { 2390 struct hns3_hw *hw = &hns->hw; 2391 struct hns3_wait_data *wait_data = hw->reset.wait_data; 2392 struct timeval tv; 2393 2394 if (wait_data->result == HNS3_WAIT_SUCCESS) { 2395 /* 2396 * After vf reset is ready, the PF may not have completed 2397 * the reset processing. The vf sending mbox to PF may fail 2398 * during the pf reset, so it is better to add extra delay. 2399 */ 2400 if (hw->reset.level == HNS3_VF_FUNC_RESET || 2401 hw->reset.level == HNS3_FLR_RESET) 2402 return 0; 2403 /* Reset retry process, no need to add extra delay. */ 2404 if (hw->reset.attempts) 2405 return 0; 2406 if (wait_data->check_completion == NULL) 2407 return 0; 2408 2409 wait_data->check_completion = NULL; 2410 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC; 2411 wait_data->count = 1; 2412 wait_data->result = HNS3_WAIT_REQUEST; 2413 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, 2414 wait_data); 2415 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete"); 2416 return -EAGAIN; 2417 } else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 2418 gettimeofday(&tv, NULL); 2419 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 2420 tv.tv_sec, tv.tv_usec); 2421 return -ETIME; 2422 } else if (wait_data->result == HNS3_WAIT_REQUEST) 2423 return -EAGAIN; 2424 2425 wait_data->hns = hns; 2426 wait_data->check_completion = is_vf_reset_done; 2427 wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT * 2428 HNS3VF_RESET_WAIT_MS + get_timeofday_ms(); 2429 wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC; 2430 wait_data->count = HNS3VF_RESET_WAIT_CNT; 2431 wait_data->result = HNS3_WAIT_REQUEST; 2432 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 2433 return -EAGAIN; 2434 } 2435 2436 static int 2437 hns3vf_prepare_reset(struct hns3_adapter *hns) 2438 { 2439 struct hns3_hw *hw = &hns->hw; 2440 int ret; 2441 2442 if (hw->reset.level == HNS3_VF_FUNC_RESET) { 2443 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, 2444 0, true, NULL, 0); 2445 if (ret) 2446 return ret; 2447 } 2448 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 2449 2450 return 0; 2451 } 2452 2453 static int 2454 hns3vf_stop_service(struct hns3_adapter *hns) 2455 { 2456 struct hns3_hw *hw = &hns->hw; 2457 struct rte_eth_dev *eth_dev; 2458 2459 eth_dev = &rte_eth_devices[hw->data->port_id]; 2460 if (hw->adapter_state == HNS3_NIC_STARTED) { 2461 rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev); 2462 hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed, 2463 hw->mac.link_duplex); 2464 } 2465 hw->mac.link_status = ETH_LINK_DOWN; 2466 2467 hns3_set_rxtx_function(eth_dev); 2468 rte_wmb(); 2469 /* Disable datapath on secondary process. */ 2470 hns3_mp_req_stop_rxtx(eth_dev); 2471 rte_delay_ms(hw->tqps_num); 2472 2473 rte_spinlock_lock(&hw->lock); 2474 if (hw->adapter_state == HNS3_NIC_STARTED || 2475 hw->adapter_state == HNS3_NIC_STOPPING) { 2476 hns3_enable_all_queues(hw, false); 2477 hns3vf_do_stop(hns); 2478 hw->reset.mbuf_deferred_free = true; 2479 } else 2480 hw->reset.mbuf_deferred_free = false; 2481 2482 /* 2483 * It is cumbersome for hardware to pick-and-choose entries for deletion 2484 * from table space. Hence, for function reset software intervention is 2485 * required to delete the entries. 2486 */ 2487 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 2488 hns3vf_configure_all_mc_mac_addr(hns, true); 2489 rte_spinlock_unlock(&hw->lock); 2490 2491 return 0; 2492 } 2493 2494 static int 2495 hns3vf_start_service(struct hns3_adapter *hns) 2496 { 2497 struct hns3_hw *hw = &hns->hw; 2498 struct rte_eth_dev *eth_dev; 2499 2500 eth_dev = &rte_eth_devices[hw->data->port_id]; 2501 hns3_set_rxtx_function(eth_dev); 2502 hns3_mp_req_start_rxtx(eth_dev); 2503 if (hw->adapter_state == HNS3_NIC_STARTED) { 2504 hns3vf_service_handler(eth_dev); 2505 2506 /* Enable interrupt of all rx queues before enabling queues */ 2507 hns3_dev_all_rx_queue_intr_enable(hw, true); 2508 /* 2509 * Enable state of each rxq and txq will be recovered after 2510 * reset, so we need to restore them before enable all tqps; 2511 */ 2512 hns3_restore_tqp_enable_state(hw); 2513 /* 2514 * When finished the initialization, enable queues to receive 2515 * and transmit packets. 2516 */ 2517 hns3_enable_all_queues(hw, true); 2518 } 2519 2520 return 0; 2521 } 2522 2523 static int 2524 hns3vf_check_default_mac_change(struct hns3_hw *hw) 2525 { 2526 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2527 struct rte_ether_addr *hw_mac; 2528 int ret; 2529 2530 /* 2531 * The hns3 PF ethdev driver in kernel support setting VF MAC address 2532 * on the host by "ip link set ..." command. If the hns3 PF kernel 2533 * ethdev driver sets the MAC address for VF device after the 2534 * initialization of the related VF device, the PF driver will notify 2535 * VF driver to reset VF device to make the new MAC address effective 2536 * immediately. The hns3 VF PMD driver should check whether the MAC 2537 * address has been changed by the PF kernel ethdev driver, if changed 2538 * VF driver should configure hardware using the new MAC address in the 2539 * recovering hardware configuration stage of the reset process. 2540 */ 2541 ret = hns3vf_get_host_mac_addr(hw); 2542 if (ret) 2543 return ret; 2544 2545 hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr; 2546 ret = rte_is_zero_ether_addr(hw_mac); 2547 if (ret) { 2548 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac); 2549 } else { 2550 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac); 2551 if (!ret) { 2552 rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]); 2553 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2554 &hw->data->mac_addrs[0]); 2555 hns3_warn(hw, "Default MAC address has been changed to:" 2556 " %s by the host PF kernel ethdev driver", 2557 mac_str); 2558 } 2559 } 2560 2561 return 0; 2562 } 2563 2564 static int 2565 hns3vf_restore_conf(struct hns3_adapter *hns) 2566 { 2567 struct hns3_hw *hw = &hns->hw; 2568 int ret; 2569 2570 ret = hns3vf_check_default_mac_change(hw); 2571 if (ret) 2572 return ret; 2573 2574 ret = hns3vf_configure_mac_addr(hns, false); 2575 if (ret) 2576 return ret; 2577 2578 ret = hns3vf_configure_all_mc_mac_addr(hns, false); 2579 if (ret) 2580 goto err_mc_mac; 2581 2582 ret = hns3vf_restore_promisc(hns); 2583 if (ret) 2584 goto err_vlan_table; 2585 2586 ret = hns3vf_restore_vlan_conf(hns); 2587 if (ret) 2588 goto err_vlan_table; 2589 2590 ret = hns3vf_get_port_base_vlan_filter_state(hw); 2591 if (ret) 2592 goto err_vlan_table; 2593 2594 ret = hns3vf_restore_rx_interrupt(hw); 2595 if (ret) 2596 goto err_vlan_table; 2597 2598 ret = hns3_restore_gro_conf(hw); 2599 if (ret) 2600 goto err_vlan_table; 2601 2602 if (hw->adapter_state == HNS3_NIC_STARTED) { 2603 ret = hns3vf_do_start(hns, false); 2604 if (ret) 2605 goto err_vlan_table; 2606 hns3_info(hw, "hns3vf dev restart successful!"); 2607 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 2608 hw->adapter_state = HNS3_NIC_CONFIGURED; 2609 return 0; 2610 2611 err_vlan_table: 2612 hns3vf_configure_all_mc_mac_addr(hns, true); 2613 err_mc_mac: 2614 hns3vf_configure_mac_addr(hns, true); 2615 return ret; 2616 } 2617 2618 static enum hns3_reset_level 2619 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels) 2620 { 2621 enum hns3_reset_level reset_level; 2622 2623 /* return the highest priority reset level amongst all */ 2624 if (hns3_atomic_test_bit(HNS3_VF_RESET, levels)) 2625 reset_level = HNS3_VF_RESET; 2626 else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels)) 2627 reset_level = HNS3_VF_FULL_RESET; 2628 else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels)) 2629 reset_level = HNS3_VF_PF_FUNC_RESET; 2630 else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels)) 2631 reset_level = HNS3_VF_FUNC_RESET; 2632 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 2633 reset_level = HNS3_FLR_RESET; 2634 else 2635 reset_level = HNS3_NONE_RESET; 2636 2637 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 2638 return HNS3_NONE_RESET; 2639 2640 return reset_level; 2641 } 2642 2643 static void 2644 hns3vf_reset_service(void *param) 2645 { 2646 struct hns3_adapter *hns = (struct hns3_adapter *)param; 2647 struct hns3_hw *hw = &hns->hw; 2648 enum hns3_reset_level reset_level; 2649 struct timeval tv_delta; 2650 struct timeval tv_start; 2651 struct timeval tv; 2652 uint64_t msec; 2653 2654 /* 2655 * The interrupt is not triggered within the delay time. 2656 * The interrupt may have been lost. It is necessary to handle 2657 * the interrupt to recover from the error. 2658 */ 2659 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 2660 SCHEDULE_DEFERRED) { 2661 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 2662 __ATOMIC_RELAXED); 2663 hns3_err(hw, "Handling interrupts in delayed tasks"); 2664 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 2665 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); 2666 if (reset_level == HNS3_NONE_RESET) { 2667 hns3_err(hw, "No reset level is set, try global reset"); 2668 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); 2669 } 2670 } 2671 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 2672 2673 /* 2674 * Hardware reset has been notified, we now have to poll & check if 2675 * hardware has actually completed the reset sequence. 2676 */ 2677 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); 2678 if (reset_level != HNS3_NONE_RESET) { 2679 gettimeofday(&tv_start, NULL); 2680 hns3_reset_process(hns, reset_level); 2681 gettimeofday(&tv, NULL); 2682 timersub(&tv, &tv_start, &tv_delta); 2683 msec = tv_delta.tv_sec * MSEC_PER_SEC + 2684 tv_delta.tv_usec / USEC_PER_MSEC; 2685 if (msec > HNS3_RESET_PROCESS_MS) 2686 hns3_err(hw, "%d handle long time delta %" PRIx64 2687 " ms time=%ld.%.6ld", 2688 hw->reset.level, msec, tv.tv_sec, tv.tv_usec); 2689 } 2690 } 2691 2692 static int 2693 hns3vf_reinit_dev(struct hns3_adapter *hns) 2694 { 2695 struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id]; 2696 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2697 struct hns3_hw *hw = &hns->hw; 2698 int ret; 2699 2700 if (hw->reset.level == HNS3_VF_FULL_RESET) { 2701 rte_intr_disable(&pci_dev->intr_handle); 2702 ret = hns3vf_set_bus_master(pci_dev, true); 2703 if (ret < 0) { 2704 hns3_err(hw, "failed to set pci bus, ret = %d", ret); 2705 return ret; 2706 } 2707 } 2708 2709 /* Firmware command initialize */ 2710 ret = hns3_cmd_init(hw); 2711 if (ret) { 2712 hns3_err(hw, "Failed to init cmd: %d", ret); 2713 return ret; 2714 } 2715 2716 if (hw->reset.level == HNS3_VF_FULL_RESET) { 2717 /* 2718 * UIO enables msix by writing the pcie configuration space 2719 * vfio_pci enables msix in rte_intr_enable. 2720 */ 2721 if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO || 2722 pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) { 2723 if (hns3vf_enable_msix(pci_dev, true)) 2724 hns3_err(hw, "Failed to enable msix"); 2725 } 2726 2727 rte_intr_enable(&pci_dev->intr_handle); 2728 } 2729 2730 ret = hns3_reset_all_tqps(hns); 2731 if (ret) { 2732 hns3_err(hw, "Failed to reset all queues: %d", ret); 2733 return ret; 2734 } 2735 2736 ret = hns3vf_init_hardware(hns); 2737 if (ret) { 2738 hns3_err(hw, "Failed to init hardware: %d", ret); 2739 return ret; 2740 } 2741 2742 return 0; 2743 } 2744 2745 static const struct eth_dev_ops hns3vf_eth_dev_ops = { 2746 .dev_configure = hns3vf_dev_configure, 2747 .dev_start = hns3vf_dev_start, 2748 .dev_stop = hns3vf_dev_stop, 2749 .dev_close = hns3vf_dev_close, 2750 .mtu_set = hns3vf_dev_mtu_set, 2751 .promiscuous_enable = hns3vf_dev_promiscuous_enable, 2752 .promiscuous_disable = hns3vf_dev_promiscuous_disable, 2753 .allmulticast_enable = hns3vf_dev_allmulticast_enable, 2754 .allmulticast_disable = hns3vf_dev_allmulticast_disable, 2755 .stats_get = hns3_stats_get, 2756 .stats_reset = hns3_stats_reset, 2757 .xstats_get = hns3_dev_xstats_get, 2758 .xstats_get_names = hns3_dev_xstats_get_names, 2759 .xstats_reset = hns3_dev_xstats_reset, 2760 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 2761 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 2762 .dev_infos_get = hns3vf_dev_infos_get, 2763 .fw_version_get = hns3vf_fw_version_get, 2764 .rx_queue_setup = hns3_rx_queue_setup, 2765 .tx_queue_setup = hns3_tx_queue_setup, 2766 .rx_queue_release = hns3_dev_rx_queue_release, 2767 .tx_queue_release = hns3_dev_tx_queue_release, 2768 .rx_queue_start = hns3_dev_rx_queue_start, 2769 .rx_queue_stop = hns3_dev_rx_queue_stop, 2770 .tx_queue_start = hns3_dev_tx_queue_start, 2771 .tx_queue_stop = hns3_dev_tx_queue_stop, 2772 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 2773 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 2774 .rxq_info_get = hns3_rxq_info_get, 2775 .txq_info_get = hns3_txq_info_get, 2776 .rx_burst_mode_get = hns3_rx_burst_mode_get, 2777 .tx_burst_mode_get = hns3_tx_burst_mode_get, 2778 .mac_addr_add = hns3vf_add_mac_addr, 2779 .mac_addr_remove = hns3vf_remove_mac_addr, 2780 .mac_addr_set = hns3vf_set_default_mac_addr, 2781 .set_mc_addr_list = hns3vf_set_mc_mac_addr_list, 2782 .link_update = hns3vf_dev_link_update, 2783 .rss_hash_update = hns3_dev_rss_hash_update, 2784 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 2785 .reta_update = hns3_dev_rss_reta_update, 2786 .reta_query = hns3_dev_rss_reta_query, 2787 .flow_ops_get = hns3_dev_flow_ops_get, 2788 .vlan_filter_set = hns3vf_vlan_filter_set, 2789 .vlan_offload_set = hns3vf_vlan_offload_set, 2790 .get_reg = hns3_get_regs, 2791 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 2792 .tx_done_cleanup = hns3_tx_done_cleanup, 2793 }; 2794 2795 static const struct hns3_reset_ops hns3vf_reset_ops = { 2796 .reset_service = hns3vf_reset_service, 2797 .stop_service = hns3vf_stop_service, 2798 .prepare_reset = hns3vf_prepare_reset, 2799 .wait_hardware_ready = hns3vf_wait_hardware_ready, 2800 .reinit_dev = hns3vf_reinit_dev, 2801 .restore_conf = hns3vf_restore_conf, 2802 .start_service = hns3vf_start_service, 2803 }; 2804 2805 static int 2806 hns3vf_dev_init(struct rte_eth_dev *eth_dev) 2807 { 2808 struct hns3_adapter *hns = eth_dev->data->dev_private; 2809 struct hns3_hw *hw = &hns->hw; 2810 int ret; 2811 2812 PMD_INIT_FUNC_TRACE(); 2813 2814 eth_dev->process_private = (struct hns3_process_private *) 2815 rte_zmalloc_socket("hns3_filter_list", 2816 sizeof(struct hns3_process_private), 2817 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); 2818 if (eth_dev->process_private == NULL) { 2819 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); 2820 return -ENOMEM; 2821 } 2822 2823 /* initialize flow filter lists */ 2824 hns3_filterlist_init(eth_dev); 2825 2826 hns3_set_rxtx_function(eth_dev); 2827 eth_dev->dev_ops = &hns3vf_eth_dev_ops; 2828 eth_dev->rx_queue_count = hns3_rx_queue_count; 2829 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2830 ret = hns3_mp_init_secondary(); 2831 if (ret) { 2832 PMD_INIT_LOG(ERR, "Failed to init for secondary " 2833 "process, ret = %d", ret); 2834 goto err_mp_init_secondary; 2835 } 2836 2837 hw->secondary_cnt++; 2838 return 0; 2839 } 2840 2841 ret = hns3_mp_init_primary(); 2842 if (ret) { 2843 PMD_INIT_LOG(ERR, 2844 "Failed to init for primary process, ret = %d", 2845 ret); 2846 goto err_mp_init_primary; 2847 } 2848 2849 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 2850 hns->is_vf = true; 2851 hw->data = eth_dev->data; 2852 hns3_parse_devargs(eth_dev); 2853 2854 ret = hns3_reset_init(hw); 2855 if (ret) 2856 goto err_init_reset; 2857 hw->reset.ops = &hns3vf_reset_ops; 2858 2859 ret = hns3vf_init_vf(eth_dev); 2860 if (ret) { 2861 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret); 2862 goto err_init_vf; 2863 } 2864 2865 /* Allocate memory for storing MAC addresses */ 2866 eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac", 2867 sizeof(struct rte_ether_addr) * 2868 HNS3_VF_UC_MACADDR_NUM, 0); 2869 if (eth_dev->data->mac_addrs == NULL) { 2870 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 2871 "to store MAC addresses", 2872 sizeof(struct rte_ether_addr) * 2873 HNS3_VF_UC_MACADDR_NUM); 2874 ret = -ENOMEM; 2875 goto err_rte_zmalloc; 2876 } 2877 2878 /* 2879 * The hns3 PF ethdev driver in kernel support setting VF MAC address 2880 * on the host by "ip link set ..." command. To avoid some incorrect 2881 * scenes, for example, hns3 VF PMD driver fails to receive and send 2882 * packets after user configure the MAC address by using the 2883 * "ip link set ..." command, hns3 VF PMD driver keep the same MAC 2884 * address strategy as the hns3 kernel ethdev driver in the 2885 * initialization. If user configure a MAC address by the ip command 2886 * for VF device, then hns3 VF PMD driver will start with it, otherwise 2887 * start with a random MAC address in the initialization. 2888 */ 2889 if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr)) 2890 rte_eth_random_addr(hw->mac.mac_addr); 2891 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 2892 ð_dev->data->mac_addrs[0]); 2893 2894 hw->adapter_state = HNS3_NIC_INITIALIZED; 2895 2896 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 2897 SCHEDULE_PENDING) { 2898 hns3_err(hw, "Reschedule reset service after dev_init"); 2899 hns3_schedule_reset(hns); 2900 } else { 2901 /* IMP will wait ready flag before reset */ 2902 hns3_notify_reset_ready(hw, false); 2903 } 2904 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, 2905 eth_dev); 2906 return 0; 2907 2908 err_rte_zmalloc: 2909 hns3vf_uninit_vf(eth_dev); 2910 2911 err_init_vf: 2912 rte_free(hw->reset.wait_data); 2913 2914 err_init_reset: 2915 hns3_mp_uninit_primary(); 2916 2917 err_mp_init_primary: 2918 err_mp_init_secondary: 2919 eth_dev->dev_ops = NULL; 2920 eth_dev->rx_pkt_burst = NULL; 2921 eth_dev->rx_descriptor_status = NULL; 2922 eth_dev->tx_pkt_burst = NULL; 2923 eth_dev->tx_pkt_prepare = NULL; 2924 eth_dev->tx_descriptor_status = NULL; 2925 rte_free(eth_dev->process_private); 2926 eth_dev->process_private = NULL; 2927 2928 return ret; 2929 } 2930 2931 static int 2932 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) 2933 { 2934 struct hns3_adapter *hns = eth_dev->data->dev_private; 2935 struct hns3_hw *hw = &hns->hw; 2936 2937 PMD_INIT_FUNC_TRACE(); 2938 2939 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2940 rte_free(eth_dev->process_private); 2941 eth_dev->process_private = NULL; 2942 return 0; 2943 } 2944 2945 if (hw->adapter_state < HNS3_NIC_CLOSING) 2946 hns3vf_dev_close(eth_dev); 2947 2948 hw->adapter_state = HNS3_NIC_REMOVED; 2949 return 0; 2950 } 2951 2952 static int 2953 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2954 struct rte_pci_device *pci_dev) 2955 { 2956 return rte_eth_dev_pci_generic_probe(pci_dev, 2957 sizeof(struct hns3_adapter), 2958 hns3vf_dev_init); 2959 } 2960 2961 static int 2962 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev) 2963 { 2964 return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit); 2965 } 2966 2967 static const struct rte_pci_id pci_id_hns3vf_map[] = { 2968 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) }, 2969 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) }, 2970 { .vendor_id = 0, }, /* sentinel */ 2971 }; 2972 2973 static struct rte_pci_driver rte_hns3vf_pmd = { 2974 .id_table = pci_id_hns3vf_map, 2975 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2976 .probe = eth_hns3vf_pci_probe, 2977 .remove = eth_hns3vf_pci_remove, 2978 }; 2979 2980 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd); 2981 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map); 2982 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci"); 2983 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf, 2984 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 2985 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "); 2986