1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include <limits.h> 10 #include <time.h> 11 #include <rte_alarm.h> 12 13 #include "qede_ethdev.h" 14 15 static uint8_t npar_tx_switching = 1; 16 17 /* Alarm timeout. */ 18 #define QEDE_ALARM_TIMEOUT_US 100000 19 20 /* Global variable to hold absolute path of fw file */ 21 char fw_file[PATH_MAX]; 22 23 const char *QEDE_DEFAULT_FIRMWARE = 24 "/lib/firmware/qed/qed_init_values-8.18.9.0.bin"; 25 26 static void 27 qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params) 28 { 29 int i; 30 31 for (i = 0; i < edev->num_hwfns; i++) { 32 struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; 33 p_hwfn->pf_params = *params; 34 } 35 } 36 37 static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev) 38 { 39 edev->regview = pci_dev->mem_resource[0].addr; 40 edev->doorbells = pci_dev->mem_resource[2].addr; 41 } 42 43 static int 44 qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, 45 enum qed_protocol protocol, uint32_t dp_module, 46 uint8_t dp_level, bool is_vf) 47 { 48 struct ecore_hw_prepare_params hw_prepare_params; 49 struct qede_dev *qdev = (struct qede_dev *)edev; 50 int rc; 51 52 ecore_init_struct(edev); 53 edev->drv_type = DRV_ID_DRV_TYPE_LINUX; 54 qdev->protocol = protocol; 55 56 if (is_vf) 57 edev->b_is_vf = true; 58 59 ecore_init_dp(edev, dp_module, dp_level, NULL); 60 qed_init_pci(edev, pci_dev); 61 62 memset(&hw_prepare_params, 0, sizeof(hw_prepare_params)); 63 hw_prepare_params.personality = ECORE_PCI_ETH; 64 hw_prepare_params.drv_resc_alloc = false; 65 hw_prepare_params.chk_reg_fifo = false; 66 hw_prepare_params.initiate_pf_flr = true; 67 hw_prepare_params.epoch = (u32)time(NULL); 68 rc = ecore_hw_prepare(edev, &hw_prepare_params); 69 if (rc) { 70 DP_ERR(edev, "hw prepare failed\n"); 71 return rc; 72 } 73 74 return rc; 75 } 76 77 static int qed_nic_setup(struct ecore_dev *edev) 78 { 79 int rc, i; 80 81 rc = ecore_resc_alloc(edev); 82 if (rc) 83 return rc; 84 85 DP_INFO(edev, "Allocated qed resources\n"); 86 ecore_resc_setup(edev); 87 88 return rc; 89 } 90 91 #ifdef CONFIG_ECORE_ZIPPED_FW 92 static int qed_alloc_stream_mem(struct ecore_dev *edev) 93 { 94 int i; 95 96 for_each_hwfn(edev, i) { 97 struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; 98 99 p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 100 sizeof(*p_hwfn->stream)); 101 if (!p_hwfn->stream) 102 return -ENOMEM; 103 } 104 105 return 0; 106 } 107 108 static void qed_free_stream_mem(struct ecore_dev *edev) 109 { 110 int i; 111 112 for_each_hwfn(edev, i) { 113 struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; 114 115 if (!p_hwfn->stream) 116 return; 117 118 OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream); 119 } 120 } 121 #endif 122 123 #ifdef CONFIG_ECORE_BINARY_FW 124 static int qed_load_firmware_data(struct ecore_dev *edev) 125 { 126 int fd; 127 struct stat st; 128 const char *fw = RTE_LIBRTE_QEDE_FW; 129 130 if (strcmp(fw, "") == 0) 131 strcpy(fw_file, QEDE_DEFAULT_FIRMWARE); 132 else 133 strcpy(fw_file, fw); 134 135 fd = open(fw_file, O_RDONLY); 136 if (fd < 0) { 137 DP_NOTICE(edev, false, "Can't open firmware file\n"); 138 return -ENOENT; 139 } 140 141 if (fstat(fd, &st) < 0) { 142 DP_NOTICE(edev, false, "Can't stat firmware file\n"); 143 close(fd); 144 return -1; 145 } 146 147 edev->firmware = rte_zmalloc("qede_fw", st.st_size, 148 RTE_CACHE_LINE_SIZE); 149 if (!edev->firmware) { 150 DP_NOTICE(edev, false, "Can't allocate memory for firmware\n"); 151 close(fd); 152 return -ENOMEM; 153 } 154 155 if (read(fd, edev->firmware, st.st_size) != st.st_size) { 156 DP_NOTICE(edev, false, "Can't read firmware data\n"); 157 close(fd); 158 return -1; 159 } 160 161 edev->fw_len = st.st_size; 162 if (edev->fw_len < 104) { 163 DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n", 164 edev->fw_len); 165 close(fd); 166 return -EINVAL; 167 } 168 169 close(fd); 170 return 0; 171 } 172 #endif 173 174 static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn) 175 { 176 uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced; 177 178 is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac, 179 &is_mac_forced); 180 if (is_mac_exist && is_mac_forced) 181 rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN); 182 183 /* Always update link configuration according to bulletin */ 184 qed_link_update(hwfn); 185 } 186 187 static void qede_vf_task(void *arg) 188 { 189 struct ecore_hwfn *p_hwfn = arg; 190 uint8_t change = 0; 191 192 /* Read the bulletin board, and re-schedule the task */ 193 ecore_vf_read_bulletin(p_hwfn, &change); 194 if (change) 195 qed_handle_bulletin_change(p_hwfn); 196 197 rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn); 198 } 199 200 static void qed_start_iov_task(struct ecore_dev *edev) 201 { 202 struct ecore_hwfn *p_hwfn; 203 int i; 204 205 for_each_hwfn(edev, i) { 206 p_hwfn = &edev->hwfns[i]; 207 if (!IS_PF(edev)) 208 rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, 209 p_hwfn); 210 } 211 } 212 213 static void qed_stop_iov_task(struct ecore_dev *edev) 214 { 215 struct ecore_hwfn *p_hwfn; 216 int i; 217 218 for_each_hwfn(edev, i) { 219 p_hwfn = &edev->hwfns[i]; 220 if (!IS_PF(edev)) 221 rte_eal_alarm_cancel(qede_vf_task, p_hwfn); 222 } 223 } 224 static int qed_slowpath_start(struct ecore_dev *edev, 225 struct qed_slowpath_params *params) 226 { 227 bool allow_npar_tx_switching; 228 const uint8_t *data = NULL; 229 struct ecore_hwfn *hwfn; 230 struct ecore_mcp_drv_version drv_version; 231 struct ecore_hw_init_params hw_init_params; 232 struct qede_dev *qdev = (struct qede_dev *)edev; 233 int rc; 234 235 #ifdef CONFIG_ECORE_BINARY_FW 236 if (IS_PF(edev)) { 237 rc = qed_load_firmware_data(edev); 238 if (rc) { 239 DP_ERR(edev, "Failed to find fw file %s\n", fw_file); 240 goto err; 241 } 242 } 243 #endif 244 245 rc = qed_nic_setup(edev); 246 if (rc) 247 goto err; 248 249 /* set int_coalescing_mode */ 250 edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 251 252 #ifdef CONFIG_ECORE_ZIPPED_FW 253 if (IS_PF(edev)) { 254 /* Allocate stream for unzipping */ 255 rc = qed_alloc_stream_mem(edev); 256 if (rc) { 257 DP_NOTICE(edev, true, 258 "Failed to allocate stream memory\n"); 259 goto err2; 260 } 261 } 262 263 qed_start_iov_task(edev); 264 #endif 265 266 #ifdef CONFIG_ECORE_BINARY_FW 267 if (IS_PF(edev)) 268 data = (const uint8_t *)edev->firmware + sizeof(u32); 269 #endif 270 271 allow_npar_tx_switching = npar_tx_switching ? true : false; 272 273 /* Start the slowpath */ 274 memset(&hw_init_params, 0, sizeof(hw_init_params)); 275 hw_init_params.b_hw_start = true; 276 hw_init_params.int_mode = ECORE_INT_MODE_MSIX; 277 hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching; 278 hw_init_params.bin_fw_data = data; 279 hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; 280 hw_init_params.avoid_eng_reset = false; 281 rc = ecore_hw_init(edev, &hw_init_params); 282 if (rc) { 283 DP_ERR(edev, "ecore_hw_init failed\n"); 284 goto err2; 285 } 286 287 DP_INFO(edev, "HW inited and function started\n"); 288 289 if (IS_PF(edev)) { 290 hwfn = ECORE_LEADING_HWFN(edev); 291 drv_version.version = (params->drv_major << 24) | 292 (params->drv_minor << 16) | 293 (params->drv_rev << 8) | (params->drv_eng); 294 /* TBD: strlcpy() */ 295 strncpy((char *)drv_version.name, (const char *)params->name, 296 MCP_DRV_VER_STR_SIZE - 4); 297 rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 298 &drv_version); 299 if (rc) { 300 DP_NOTICE(edev, true, 301 "Failed sending drv version command\n"); 302 return rc; 303 } 304 } 305 306 ecore_reset_vport_stats(edev); 307 308 return 0; 309 310 ecore_hw_stop(edev); 311 err2: 312 ecore_resc_free(edev); 313 err: 314 #ifdef CONFIG_ECORE_BINARY_FW 315 if (IS_PF(edev)) { 316 if (edev->firmware) 317 rte_free(edev->firmware); 318 edev->firmware = NULL; 319 } 320 #endif 321 qed_stop_iov_task(edev); 322 323 return rc; 324 } 325 326 static int 327 qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) 328 { 329 struct ecore_ptt *ptt = NULL; 330 struct ecore_tunnel_info *tun = &edev->tunnel; 331 332 memset(dev_info, 0, sizeof(struct qed_dev_info)); 333 334 if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && 335 tun->vxlan.b_mode_enabled) 336 dev_info->vxlan_enable = true; 337 338 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 339 tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && 340 tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) 341 dev_info->gre_enable = true; 342 343 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 344 tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && 345 tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) 346 dev_info->geneve_enable = true; 347 348 dev_info->num_hwfns = edev->num_hwfns; 349 dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]); 350 dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu; 351 352 rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr, 353 ETHER_ADDR_LEN); 354 355 if (IS_PF(edev)) { 356 dev_info->fw_major = FW_MAJOR_VERSION; 357 dev_info->fw_minor = FW_MINOR_VERSION; 358 dev_info->fw_rev = FW_REVISION_VERSION; 359 dev_info->fw_eng = FW_ENGINEERING_VERSION; 360 dev_info->mf_mode = edev->mf_mode; 361 dev_info->tx_switching = false; 362 363 ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev)); 364 if (ptt) { 365 ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt, 366 &dev_info->mfw_rev, NULL); 367 368 ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt, 369 &dev_info->flash_size); 370 371 /* Workaround to allow PHY-read commands for 372 * B0 bringup. 373 */ 374 if (ECORE_IS_BB_B0(edev)) 375 dev_info->flash_size = 0xffffffff; 376 377 ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt); 378 } 379 } else { 380 ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major, 381 &dev_info->fw_minor, &dev_info->fw_rev, 382 &dev_info->fw_eng); 383 384 ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt, 385 &dev_info->mfw_rev, NULL); 386 } 387 388 return 0; 389 } 390 391 int 392 qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) 393 { 394 struct qede_dev *qdev = (struct qede_dev *)edev; 395 uint8_t queues = 0; 396 int i; 397 398 memset(info, 0, sizeof(*info)); 399 400 info->num_tc = 1 /* @@@TBD aelior MULTI_COS */; 401 402 if (IS_PF(edev)) { 403 int max_vf_vlan_filters = 0; 404 405 info->num_queues = 0; 406 for_each_hwfn(edev, i) 407 info->num_queues += 408 FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE); 409 410 if (edev->p_iov_info) 411 max_vf_vlan_filters = edev->p_iov_info->total_vfs * 412 ECORE_ETH_VF_NUM_VLAN_FILTERS; 413 info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) - 414 max_vf_vlan_filters; 415 416 rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr, 417 ETHER_ADDR_LEN); 418 } else { 419 ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev), 420 &info->num_queues); 421 if (edev->num_hwfns > 1) { 422 ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues); 423 info->num_queues += queues; 424 } 425 426 ecore_vf_get_num_vlan_filters(&edev->hwfns[0], 427 (u8 *)&info->num_vlan_filters); 428 429 ecore_vf_get_port_mac(&edev->hwfns[0], 430 (uint8_t *)&info->port_mac); 431 432 info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]); 433 } 434 435 qed_fill_dev_info(edev, &info->common); 436 437 if (IS_VF(edev)) 438 memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN); 439 440 return 0; 441 } 442 443 static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE]) 444 { 445 int i; 446 447 rte_memcpy(edev->name, name, NAME_SIZE); 448 for_each_hwfn(edev, i) { 449 snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 450 } 451 } 452 453 static uint32_t 454 qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info, 455 void *sb_virt_addr, dma_addr_t sb_phy_addr, 456 uint16_t sb_id, enum qed_sb_type type) 457 { 458 struct ecore_hwfn *p_hwfn; 459 int hwfn_index; 460 uint16_t rel_sb_id; 461 uint8_t n_hwfns; 462 uint32_t rc; 463 464 /* RoCE uses single engine and CMT uses two engines. When using both 465 * we force only a single engine. Storage uses only engine 0 too. 466 */ 467 if (type == QED_SB_TYPE_L2_QUEUE) 468 n_hwfns = edev->num_hwfns; 469 else 470 n_hwfns = 1; 471 472 hwfn_index = sb_id % n_hwfns; 473 p_hwfn = &edev->hwfns[hwfn_index]; 474 rel_sb_id = sb_id / n_hwfns; 475 476 DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 477 hwfn_index, rel_sb_id, sb_id); 478 479 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 480 sb_virt_addr, sb_phy_addr, rel_sb_id); 481 482 return rc; 483 } 484 485 static void qed_fill_link(struct ecore_hwfn *hwfn, 486 struct qed_link_output *if_link) 487 { 488 struct ecore_mcp_link_params params; 489 struct ecore_mcp_link_state link; 490 struct ecore_mcp_link_capabilities link_caps; 491 uint32_t media_type; 492 uint8_t change = 0; 493 494 memset(if_link, 0, sizeof(*if_link)); 495 496 /* Prepare source inputs */ 497 if (IS_PF(hwfn->p_dev)) { 498 rte_memcpy(¶ms, ecore_mcp_get_link_params(hwfn), 499 sizeof(params)); 500 rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link)); 501 rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn), 502 sizeof(link_caps)); 503 } else { 504 ecore_vf_read_bulletin(hwfn, &change); 505 ecore_vf_get_link_params(hwfn, ¶ms); 506 ecore_vf_get_link_state(hwfn, &link); 507 ecore_vf_get_link_caps(hwfn, &link_caps); 508 } 509 510 /* Set the link parameters to pass to protocol driver */ 511 if (link.link_up) 512 if_link->link_up = true; 513 514 if (link.link_up) 515 if_link->speed = link.speed; 516 517 if_link->duplex = QEDE_DUPLEX_FULL; 518 519 /* Fill up the native advertised speed cap mask */ 520 if_link->adv_speed = params.speed.advertised_speeds; 521 522 if (params.speed.autoneg) 523 if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG; 524 525 if (params.pause.autoneg || params.pause.forced_rx || 526 params.pause.forced_tx) 527 if_link->supported_caps |= QEDE_SUPPORTED_PAUSE; 528 529 if (params.pause.autoneg) 530 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 531 532 if (params.pause.forced_rx) 533 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 534 535 if (params.pause.forced_tx) 536 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 537 } 538 539 static void 540 qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link) 541 { 542 qed_fill_link(&edev->hwfns[0], if_link); 543 544 #ifdef CONFIG_QED_SRIOV 545 for_each_hwfn(cdev, i) 546 qed_inform_vf_link_state(&cdev->hwfns[i]); 547 #endif 548 } 549 550 static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params) 551 { 552 struct ecore_hwfn *hwfn; 553 struct ecore_ptt *ptt; 554 struct ecore_mcp_link_params *link_params; 555 int rc; 556 557 if (IS_VF(edev)) 558 return 0; 559 560 /* The link should be set only once per PF */ 561 hwfn = &edev->hwfns[0]; 562 563 ptt = ecore_ptt_acquire(hwfn); 564 if (!ptt) 565 return -EBUSY; 566 567 link_params = ecore_mcp_get_link_params(hwfn); 568 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 569 link_params->speed.autoneg = params->autoneg; 570 571 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 572 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 573 link_params->pause.autoneg = true; 574 else 575 link_params->pause.autoneg = false; 576 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 577 link_params->pause.forced_rx = true; 578 else 579 link_params->pause.forced_rx = false; 580 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 581 link_params->pause.forced_tx = true; 582 else 583 link_params->pause.forced_tx = false; 584 } 585 586 rc = ecore_mcp_set_link(hwfn, ptt, params->link_up); 587 588 ecore_ptt_release(hwfn, ptt); 589 590 return rc; 591 } 592 593 void qed_link_update(struct ecore_hwfn *hwfn) 594 { 595 struct qed_link_output if_link; 596 597 qed_fill_link(hwfn, &if_link); 598 } 599 600 static int qed_drain(struct ecore_dev *edev) 601 { 602 struct ecore_hwfn *hwfn; 603 struct ecore_ptt *ptt; 604 int i, rc; 605 606 if (IS_VF(edev)) 607 return 0; 608 609 for_each_hwfn(edev, i) { 610 hwfn = &edev->hwfns[i]; 611 ptt = ecore_ptt_acquire(hwfn); 612 if (!ptt) { 613 DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n"); 614 return -EBUSY; 615 } 616 rc = ecore_mcp_drain(hwfn, ptt); 617 if (rc) 618 return rc; 619 ecore_ptt_release(hwfn, ptt); 620 } 621 622 return 0; 623 } 624 625 static int qed_nic_stop(struct ecore_dev *edev) 626 { 627 int i, rc; 628 629 rc = ecore_hw_stop(edev); 630 for (i = 0; i < edev->num_hwfns; i++) { 631 struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; 632 633 if (p_hwfn->b_sp_dpc_enabled) 634 p_hwfn->b_sp_dpc_enabled = false; 635 } 636 return rc; 637 } 638 639 static int qed_slowpath_stop(struct ecore_dev *edev) 640 { 641 #ifdef CONFIG_QED_SRIOV 642 int i; 643 #endif 644 645 if (!edev) 646 return -ENODEV; 647 648 if (IS_PF(edev)) { 649 #ifdef CONFIG_ECORE_ZIPPED_FW 650 qed_free_stream_mem(edev); 651 #endif 652 653 #ifdef CONFIG_QED_SRIOV 654 if (IS_QED_ETH_IF(edev)) 655 qed_sriov_disable(edev, true); 656 #endif 657 } 658 659 qed_nic_stop(edev); 660 661 ecore_resc_free(edev); 662 qed_stop_iov_task(edev); 663 664 return 0; 665 } 666 667 static void qed_remove(struct ecore_dev *edev) 668 { 669 if (!edev) 670 return; 671 672 ecore_hw_remove(edev); 673 } 674 675 static int qed_send_drv_state(struct ecore_dev *edev, bool active) 676 { 677 struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev); 678 struct ecore_ptt *ptt; 679 int status = 0; 680 681 ptt = ecore_ptt_acquire(hwfn); 682 if (!ptt) 683 return -EAGAIN; 684 685 status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ? 686 ECORE_OV_DRIVER_STATE_ACTIVE : 687 ECORE_OV_DRIVER_STATE_DISABLED); 688 689 ecore_ptt_release(hwfn, ptt); 690 691 return status; 692 } 693 694 static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb, 695 u16 qid, struct ecore_sb_info_dbg *sb_dbg) 696 { 697 struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns]; 698 struct ecore_ptt *ptt; 699 int rc; 700 701 if (IS_VF(edev)) 702 return -EINVAL; 703 704 ptt = ecore_ptt_acquire(hwfn); 705 if (!ptt) { 706 DP_NOTICE(hwfn, true, "Can't acquire PTT\n"); 707 return -EAGAIN; 708 } 709 710 memset(sb_dbg, 0, sizeof(*sb_dbg)); 711 rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); 712 713 ecore_ptt_release(hwfn, ptt); 714 return rc; 715 } 716 717 const struct qed_common_ops qed_common_ops_pass = { 718 INIT_STRUCT_FIELD(probe, &qed_probe), 719 INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params), 720 INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start), 721 INIT_STRUCT_FIELD(set_name, &qed_set_name), 722 INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc), 723 INIT_STRUCT_FIELD(chain_free, &ecore_chain_free), 724 INIT_STRUCT_FIELD(sb_init, &qed_sb_init), 725 INIT_STRUCT_FIELD(get_link, &qed_get_current_link), 726 INIT_STRUCT_FIELD(set_link, &qed_set_link), 727 INIT_STRUCT_FIELD(drain, &qed_drain), 728 INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop), 729 INIT_STRUCT_FIELD(remove, &qed_remove), 730 INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state), 731 }; 732