1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2007-2013 Broadcom Corporation. 3 * 4 * Eric Davis <edavis@broadcom.com> 5 * David Christensen <davidch@broadcom.com> 6 * Gary Zambrano <zambrano@broadcom.com> 7 * 8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. 9 * Copyright (c) 2015-2018 Cavium Inc. 10 * All rights reserved. 11 * www.cavium.com 12 */ 13 14 #define BNX2X_DRIVER_VERSION "1.78.18" 15 16 #include "bnx2x.h" 17 #include "bnx2x_vfpf.h" 18 #include "ecore_sp.h" 19 #include "ecore_init.h" 20 #include "ecore_init_ops.h" 21 22 #include "rte_version.h" 23 24 #include <sys/types.h> 25 #include <sys/stat.h> 26 #include <arpa/inet.h> 27 #include <fcntl.h> 28 #include <zlib.h> 29 30 #include <rte_bitops.h> 31 #include <rte_string_fns.h> 32 33 #include "eal_firmware.h" 34 35 #define BNX2X_PMD_VER_PREFIX "BNX2X PMD" 36 #define BNX2X_PMD_VERSION_MAJOR 1 37 #define BNX2X_PMD_VERSION_MINOR 1 38 #define BNX2X_PMD_VERSION_REVISION 0 39 #define BNX2X_PMD_VERSION_PATCH 1 40 41 static inline const char * 42 bnx2x_pmd_version(void) 43 { 44 static char version[32]; 45 46 snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d", 47 BNX2X_PMD_VER_PREFIX, 48 BNX2X_DRIVER_VERSION, 49 BNX2X_PMD_VERSION_MAJOR, 50 BNX2X_PMD_VERSION_MINOR, 51 BNX2X_PMD_VERSION_REVISION, 52 BNX2X_PMD_VERSION_PATCH); 53 54 return version; 55 } 56 57 static z_stream zlib_stream; 58 59 #define EVL_VLID_MASK 0x0FFF 60 61 #define BNX2X_DEF_SB_ATT_IDX 0x0001 62 #define BNX2X_DEF_SB_IDX 0x0002 63 64 /* 65 * FLR Support - bnx2x_pf_flr_clnup() is called during nic_load in the per 66 * function HW initialization. 67 */ 68 #define FLR_WAIT_USEC 10000 /* 10 msecs */ 69 #define FLR_WAIT_INTERVAL 50 /* usecs */ 70 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 71 72 struct pbf_pN_buf_regs { 73 int pN; 74 uint32_t init_crd; 75 uint32_t crd; 76 uint32_t crd_freed; 77 }; 78 79 struct pbf_pN_cmd_regs { 80 int pN; 81 uint32_t lines_occup; 82 uint32_t lines_freed; 83 }; 84 85 /* resources needed for unloading a previously loaded device */ 86 87 #define BNX2X_PREV_WAIT_NEEDED 1 88 rte_spinlock_t bnx2x_prev_mtx; 89 struct bnx2x_prev_list_node { 90 LIST_ENTRY(bnx2x_prev_list_node) node; 91 uint8_t bus; 92 uint8_t slot; 93 uint8_t path; 94 uint8_t aer; 95 uint8_t undi; 96 }; 97 98 static LIST_HEAD(, bnx2x_prev_list_node) bnx2x_prev_list 99 = LIST_HEAD_INITIALIZER(bnx2x_prev_list); 100 101 static int load_count[2][3] = { { 0 } }; 102 /* per-path: 0-common, 1-port0, 2-port1 */ 103 104 static void bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, 105 uint8_t cmng_type); 106 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc); 107 static void storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, 108 uint8_t port); 109 static void bnx2x_set_reset_global(struct bnx2x_softc *sc); 110 static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc); 111 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine); 112 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc); 113 static uint8_t bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, 114 uint8_t print); 115 static void bnx2x_int_disable(struct bnx2x_softc *sc); 116 static int bnx2x_release_leader_lock(struct bnx2x_softc *sc); 117 static void bnx2x_pf_disable(struct bnx2x_softc *sc); 118 static void bnx2x_update_rx_prod(struct bnx2x_softc *sc, 119 struct bnx2x_fastpath *fp, 120 uint16_t rx_bd_prod, uint16_t rx_cq_prod); 121 static void bnx2x_link_report_locked(struct bnx2x_softc *sc); 122 static void bnx2x_link_report(struct bnx2x_softc *sc); 123 void bnx2x_link_status_update(struct bnx2x_softc *sc); 124 static int bnx2x_alloc_mem(struct bnx2x_softc *sc); 125 static void bnx2x_free_mem(struct bnx2x_softc *sc); 126 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc); 127 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc); 128 static __rte_noinline 129 int bnx2x_nic_load(struct bnx2x_softc *sc); 130 131 static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); 132 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp); 133 static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, 134 uint8_t storm, uint16_t index, uint8_t op, 135 uint8_t update); 136 137 int bnx2x_cmpxchg(volatile int *addr, int old, int new) 138 { 139 return __sync_val_compare_and_swap(addr, old, new); 140 } 141 142 int 143 bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, 144 const char *msg, uint32_t align) 145 { 146 char mz_name[RTE_MEMZONE_NAMESIZE]; 147 const struct rte_memzone *z; 148 149 dma->sc = sc; 150 if (IS_PF(sc)) 151 snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, 152 rte_get_timer_cycles()); 153 else 154 snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, 155 rte_get_timer_cycles()); 156 157 /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ 158 z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size, 159 SOCKET_ID_ANY, 160 RTE_MEMZONE_IOVA_CONTIG, align); 161 if (z == NULL) { 162 PMD_DRV_LOG(ERR, sc, "DMA alloc failed for %s", msg); 163 return -ENOMEM; 164 } 165 dma->paddr = (uint64_t) z->iova; 166 dma->vaddr = z->addr; 167 dma->mzone = (const void *)z; 168 169 PMD_DRV_LOG(DEBUG, sc, 170 "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr); 171 172 return 0; 173 } 174 175 void bnx2x_dma_free(struct bnx2x_dma *dma) 176 { 177 if (dma->mzone == NULL) 178 return; 179 180 rte_memzone_free((const struct rte_memzone *)dma->mzone); 181 dma->sc = NULL; 182 dma->paddr = 0; 183 dma->vaddr = NULL; 184 dma->nseg = 0; 185 dma->mzone = NULL; 186 } 187 188 static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 189 { 190 uint32_t lock_status; 191 uint32_t resource_bit = (1 << resource); 192 int func = SC_FUNC(sc); 193 uint32_t hw_lock_control_reg; 194 int cnt; 195 196 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC 197 if (resource) 198 PMD_INIT_FUNC_TRACE(sc); 199 #else 200 PMD_INIT_FUNC_TRACE(sc); 201 #endif 202 203 /* validate the resource is within range */ 204 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 205 PMD_DRV_LOG(NOTICE, sc, 206 "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE", 207 resource); 208 return -1; 209 } 210 211 if (func <= 5) { 212 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 213 } else { 214 hw_lock_control_reg = 215 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 216 } 217 218 /* validate the resource is not already taken */ 219 lock_status = REG_RD(sc, hw_lock_control_reg); 220 if (lock_status & resource_bit) { 221 PMD_DRV_LOG(NOTICE, sc, 222 "resource in use (status 0x%x bit 0x%x)", 223 lock_status, resource_bit); 224 return -1; 225 } 226 227 /* try every 5ms for 5 seconds */ 228 for (cnt = 0; cnt < 1000; cnt++) { 229 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 230 lock_status = REG_RD(sc, hw_lock_control_reg); 231 if (lock_status & resource_bit) { 232 return 0; 233 } 234 DELAY(5000); 235 } 236 237 PMD_DRV_LOG(NOTICE, sc, "Resource 0x%x resource_bit 0x%x lock timeout!", 238 resource, resource_bit); 239 return -1; 240 } 241 242 static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 243 { 244 uint32_t lock_status; 245 uint32_t resource_bit = (1 << resource); 246 int func = SC_FUNC(sc); 247 uint32_t hw_lock_control_reg; 248 249 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC 250 if (resource) 251 PMD_INIT_FUNC_TRACE(sc); 252 #else 253 PMD_INIT_FUNC_TRACE(sc); 254 #endif 255 256 /* validate the resource is within range */ 257 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 258 PMD_DRV_LOG(NOTICE, sc, 259 "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 260 " resource_bit 0x%x", resource, resource_bit); 261 return -1; 262 } 263 264 if (func <= 5) { 265 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 266 } else { 267 hw_lock_control_reg = 268 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 269 } 270 271 /* validate the resource is currently taken */ 272 lock_status = REG_RD(sc, hw_lock_control_reg); 273 if (!(lock_status & resource_bit)) { 274 PMD_DRV_LOG(NOTICE, sc, 275 "resource not in use (status 0x%x bit 0x%x)", 276 lock_status, resource_bit); 277 return -1; 278 } 279 280 REG_WR(sc, hw_lock_control_reg, resource_bit); 281 return 0; 282 } 283 284 static void bnx2x_acquire_phy_lock(struct bnx2x_softc *sc) 285 { 286 BNX2X_PHY_LOCK(sc); 287 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); 288 } 289 290 static void bnx2x_release_phy_lock(struct bnx2x_softc *sc) 291 { 292 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); 293 BNX2X_PHY_UNLOCK(sc); 294 } 295 296 /* copy command into DMAE command memory and set DMAE command Go */ 297 void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx) 298 { 299 uint32_t cmd_offset; 300 uint32_t i; 301 302 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 303 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 304 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *) dmae) + i)); 305 } 306 307 REG_WR(sc, dmae_reg_go_c[idx], 1); 308 } 309 310 uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) 311 { 312 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 313 DMAE_COMMAND_C_TYPE_ENABLE); 314 } 315 316 uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode) 317 { 318 return opcode & ~DMAE_COMMAND_SRC_RESET; 319 } 320 321 uint32_t 322 bnx2x_dmae_opcode(struct bnx2x_softc * sc, uint8_t src_type, uint8_t dst_type, 323 uint8_t with_comp, uint8_t comp_type) 324 { 325 uint32_t opcode = 0; 326 327 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 328 (dst_type << DMAE_COMMAND_DST_SHIFT)); 329 330 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 331 332 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 333 334 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 335 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 336 337 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 338 339 #ifdef __BIG_ENDIAN 340 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 341 #else 342 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 343 #endif 344 345 if (with_comp) { 346 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 347 } 348 349 return opcode; 350 } 351 352 static void 353 bnx2x_prep_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae, 354 uint8_t src_type, uint8_t dst_type) 355 { 356 memset(dmae, 0, sizeof(struct dmae_command)); 357 358 /* set the opcode */ 359 dmae->opcode = bnx2x_dmae_opcode(sc, src_type, dst_type, 360 TRUE, DMAE_COMP_PCI); 361 362 /* fill in the completion parameters */ 363 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_comp)); 364 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_comp)); 365 dmae->comp_val = DMAE_COMP_VAL; 366 } 367 368 /* issue a DMAE command over the init channel and wait for completion */ 369 static int 370 bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae) 371 { 372 uint32_t *wb_comp = BNX2X_SP(sc, wb_comp); 373 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 374 375 /* reset completion */ 376 *wb_comp = 0; 377 378 /* post the command on the channel used for initializations */ 379 bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 380 381 /* wait for completion */ 382 DELAY(500); 383 384 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 385 if (!timeout || 386 (sc->recovery_state != BNX2X_RECOVERY_DONE && 387 sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 388 PMD_DRV_LOG(INFO, sc, "DMAE timeout!"); 389 return DMAE_TIMEOUT; 390 } 391 392 timeout--; 393 DELAY(50); 394 } 395 396 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 397 PMD_DRV_LOG(INFO, sc, "DMAE PCI error!"); 398 return DMAE_PCI_ERROR; 399 } 400 401 return 0; 402 } 403 404 void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32) 405 { 406 struct dmae_command dmae; 407 uint32_t *data; 408 uint32_t i; 409 int rc; 410 411 if (!sc->dmae_ready) { 412 data = BNX2X_SP(sc, wb_data[0]); 413 414 for (i = 0; i < len32; i++) { 415 data[i] = REG_RD(sc, (src_addr + (i * 4))); 416 } 417 418 return; 419 } 420 421 /* set opcode and fixed command fields */ 422 bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 423 424 /* fill in addresses and len */ 425 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 426 dmae.src_addr_hi = 0; 427 dmae.dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_data)); 428 dmae.dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_data)); 429 dmae.len = len32; 430 431 /* issue the command and wait for completion */ 432 if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { 433 rte_panic("DMAE failed (%d)", rc); 434 }; 435 } 436 437 void 438 bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr, 439 uint32_t len32) 440 { 441 struct dmae_command dmae; 442 int rc; 443 444 if (!sc->dmae_ready) { 445 ecore_init_str_wr(sc, dst_addr, BNX2X_SP(sc, wb_data[0]), len32); 446 return; 447 } 448 449 /* set opcode and fixed command fields */ 450 bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 451 452 /* fill in addresses and len */ 453 dmae.src_addr_lo = U64_LO(dma_addr); 454 dmae.src_addr_hi = U64_HI(dma_addr); 455 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 456 dmae.dst_addr_hi = 0; 457 dmae.len = len32; 458 459 /* issue the command and wait for completion */ 460 if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { 461 rte_panic("DMAE failed (%d)", rc); 462 } 463 } 464 465 static void 466 bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, 467 uint32_t addr, uint32_t len) 468 { 469 uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 470 uint32_t offset = 0; 471 472 while (len > dmae_wr_max) { 473 bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ 474 (addr + offset), /* dst GRC address */ 475 dmae_wr_max); 476 offset += (dmae_wr_max * 4); 477 len -= dmae_wr_max; 478 } 479 480 bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ 481 (addr + offset), /* dst GRC address */ 482 len); 483 } 484 485 void 486 bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt, 487 uint32_t cid) 488 { 489 /* ustorm cxt validation */ 490 cxt->ustorm_ag_context.cdu_usage = 491 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 492 CDU_REGION_NUMBER_UCM_AG, 493 ETH_CONNECTION_TYPE); 494 /* xcontext validation */ 495 cxt->xstorm_ag_context.cdu_reserved = 496 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 497 CDU_REGION_NUMBER_XCM_AG, 498 ETH_CONNECTION_TYPE); 499 } 500 501 static void 502 bnx2x_storm_memset_hc_timeout(struct bnx2x_softc *sc, uint8_t fw_sb_id, 503 uint8_t sb_index, uint8_t ticks) 504 { 505 uint32_t addr = 506 (BAR_CSTRORM_INTMEM + 507 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 508 509 REG_WR8(sc, addr, ticks); 510 } 511 512 static void 513 bnx2x_storm_memset_hc_disable(struct bnx2x_softc *sc, uint16_t fw_sb_id, 514 uint8_t sb_index, uint8_t disable) 515 { 516 uint32_t enable_flag = 517 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 518 uint32_t addr = 519 (BAR_CSTRORM_INTMEM + 520 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 521 uint8_t flags; 522 523 /* clear and set */ 524 flags = REG_RD8(sc, addr); 525 flags &= ~HC_INDEX_DATA_HC_ENABLED; 526 flags |= enable_flag; 527 REG_WR8(sc, addr, flags); 528 } 529 530 void 531 bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id, 532 uint8_t sb_index, uint8_t disable, uint16_t usec) 533 { 534 uint8_t ticks = (usec / 4); 535 536 bnx2x_storm_memset_hc_timeout(sc, fw_sb_id, sb_index, ticks); 537 538 disable = (disable) ? 1 : ((usec) ? 0 : 1); 539 bnx2x_storm_memset_hc_disable(sc, fw_sb_id, sb_index, disable); 540 } 541 542 uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr) 543 { 544 return REG_RD(sc, reg_addr); 545 } 546 547 void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val) 548 { 549 REG_WR(sc, reg_addr, val); 550 } 551 552 void 553 elink_cb_event_log(__rte_unused struct bnx2x_softc *sc, 554 __rte_unused const elink_log_id_t elink_log_id, ...) 555 { 556 PMD_DRV_LOG(DEBUG, sc, "ELINK EVENT LOG (%d)", elink_log_id); 557 } 558 559 static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode) 560 { 561 uint32_t spio_reg; 562 563 /* Only 2 SPIOs are configurable */ 564 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 565 PMD_DRV_LOG(NOTICE, sc, "Invalid SPIO 0x%x", spio); 566 return -1; 567 } 568 569 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 570 571 /* read SPIO and mask except the float bits */ 572 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 573 574 switch (mode) { 575 case MISC_SPIO_OUTPUT_LOW: 576 /* clear FLOAT and set CLR */ 577 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 578 spio_reg |= (spio << MISC_SPIO_CLR_POS); 579 break; 580 581 case MISC_SPIO_OUTPUT_HIGH: 582 /* clear FLOAT and set SET */ 583 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 584 spio_reg |= (spio << MISC_SPIO_SET_POS); 585 break; 586 587 case MISC_SPIO_INPUT_HI_Z: 588 /* set FLOAT */ 589 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 590 break; 591 592 default: 593 break; 594 } 595 596 REG_WR(sc, MISC_REG_SPIO, spio_reg); 597 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 598 599 return 0; 600 } 601 602 static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port) 603 { 604 /* The GPIO should be swapped if swap register is set and active */ 605 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 606 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 607 int gpio_shift = gpio_num; 608 if (gpio_port) 609 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 610 611 uint32_t gpio_mask = (1 << gpio_shift); 612 uint32_t gpio_reg; 613 614 if (gpio_num > MISC_REGISTERS_GPIO_3) { 615 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 616 return -1; 617 } 618 619 /* read GPIO value */ 620 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 621 622 /* get the requested pin value */ 623 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 624 } 625 626 static int 627 bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t port) 628 { 629 /* The GPIO should be swapped if swap register is set and active */ 630 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 631 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 632 int gpio_shift = gpio_num; 633 if (gpio_port) 634 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 635 636 uint32_t gpio_mask = (1 << gpio_shift); 637 uint32_t gpio_reg; 638 639 if (gpio_num > MISC_REGISTERS_GPIO_3) { 640 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 641 return -1; 642 } 643 644 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 645 646 /* read GPIO and mask except the float bits */ 647 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 648 649 switch (mode) { 650 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 651 /* clear FLOAT and set CLR */ 652 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 653 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 654 break; 655 656 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 657 /* clear FLOAT and set SET */ 658 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 659 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 660 break; 661 662 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 663 /* set FLOAT */ 664 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 665 break; 666 667 default: 668 break; 669 } 670 671 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 672 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 673 674 return 0; 675 } 676 677 static int 678 bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode) 679 { 680 uint32_t gpio_reg; 681 682 /* any port swapping should be handled by caller */ 683 684 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 685 686 /* read GPIO and mask except the float bits */ 687 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 688 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 689 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 690 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 691 692 switch (mode) { 693 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 694 /* set CLR */ 695 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 696 break; 697 698 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 699 /* set SET */ 700 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 701 break; 702 703 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 704 /* set FLOAT */ 705 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 706 break; 707 708 default: 709 PMD_DRV_LOG(NOTICE, sc, 710 "Invalid GPIO mode assignment %d", mode); 711 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 712 return -1; 713 } 714 715 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 716 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 717 718 return 0; 719 } 720 721 static int 722 bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, 723 uint8_t port) 724 { 725 /* The GPIO should be swapped if swap register is set and active */ 726 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 727 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 728 int gpio_shift = gpio_num; 729 if (gpio_port) 730 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 731 732 uint32_t gpio_mask = (1 << gpio_shift); 733 uint32_t gpio_reg; 734 735 if (gpio_num > MISC_REGISTERS_GPIO_3) { 736 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 737 return -1; 738 } 739 740 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 741 742 /* read GPIO int */ 743 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 744 745 switch (mode) { 746 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 747 /* clear SET and set CLR */ 748 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 749 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 750 break; 751 752 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 753 /* clear CLR and set SET */ 754 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 755 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 756 break; 757 758 default: 759 break; 760 } 761 762 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 763 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 764 765 return 0; 766 } 767 768 uint32_t 769 elink_cb_gpio_read(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t port) 770 { 771 return bnx2x_gpio_read(sc, gpio_num, port); 772 } 773 774 uint8_t elink_cb_gpio_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ 775 uint8_t port) 776 { 777 return bnx2x_gpio_write(sc, gpio_num, mode, port); 778 } 779 780 uint8_t 781 elink_cb_gpio_mult_write(struct bnx2x_softc * sc, uint8_t pins, 782 uint8_t mode /* 0=low 1=high */ ) 783 { 784 return bnx2x_gpio_mult_write(sc, pins, mode); 785 } 786 787 uint8_t elink_cb_gpio_int_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ 788 uint8_t port) 789 { 790 return bnx2x_gpio_int_write(sc, gpio_num, mode, port); 791 } 792 793 void elink_cb_notify_link_changed(struct bnx2x_softc *sc) 794 { 795 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 796 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 797 } 798 799 /* send the MCP a request, block until there is a reply */ 800 uint32_t 801 elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) 802 { 803 int mb_idx = SC_FW_MB_IDX(sc); 804 uint32_t seq; 805 uint32_t rc = 0; 806 uint32_t cnt = 1; 807 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 808 809 seq = ++sc->fw_seq; 810 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 811 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 812 813 PMD_DRV_LOG(DEBUG, sc, 814 "wrote command 0x%08x to FW MB param 0x%08x", 815 (command | seq), param); 816 817 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 818 do { 819 DELAY(delay * 1000); 820 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 821 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 822 823 /* is this a reply to our command? */ 824 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 825 rc &= FW_MSG_CODE_MASK; 826 } else { 827 /* Ruh-roh! */ 828 PMD_DRV_LOG(NOTICE, sc, "FW failed to respond!"); 829 rc = 0; 830 } 831 832 return rc; 833 } 834 835 static uint32_t 836 bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) 837 { 838 return elink_cb_fw_command(sc, command, param); 839 } 840 841 static void 842 __storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr, 843 rte_iova_t mapping) 844 { 845 REG_WR(sc, addr, U64_LO(mapping)); 846 REG_WR(sc, (addr + 4), U64_HI(mapping)); 847 } 848 849 static void 850 storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping, 851 uint16_t abs_fid) 852 { 853 uint32_t addr = (XSEM_REG_FAST_MEMORY + 854 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 855 __storm_memset_dma_mapping(sc, addr, mapping); 856 } 857 858 static void 859 storm_memset_vf_to_pf(struct bnx2x_softc *sc, uint16_t abs_fid, uint16_t pf_id) 860 { 861 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), 862 pf_id); 863 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), 864 pf_id); 865 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), 866 pf_id); 867 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), 868 pf_id); 869 } 870 871 static void 872 storm_memset_func_en(struct bnx2x_softc *sc, uint16_t abs_fid, uint8_t enable) 873 { 874 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), 875 enable); 876 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), 877 enable); 878 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), 879 enable); 880 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), 881 enable); 882 } 883 884 static void 885 storm_memset_eq_data(struct bnx2x_softc *sc, struct event_ring_data *eq_data, 886 uint16_t pfid) 887 { 888 uint32_t addr; 889 size_t size; 890 891 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 892 size = sizeof(struct event_ring_data); 893 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) eq_data); 894 } 895 896 static void 897 storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid) 898 { 899 uint32_t addr = (BAR_CSTRORM_INTMEM + 900 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 901 REG_WR16(sc, addr, eq_prod); 902 } 903 904 /* 905 * Post a slowpath command. 906 * 907 * A slowpath command is used to propagate a configuration change through 908 * the controller in a controlled manner, allowing each STORM processor and 909 * other H/W blocks to phase in the change. The commands sent on the 910 * slowpath are referred to as ramrods. Depending on the ramrod used the 911 * completion of the ramrod will occur in different ways. Here's a 912 * breakdown of ramrods and how they complete: 913 * 914 * RAMROD_CMD_ID_ETH_PORT_SETUP 915 * Used to setup the leading connection on a port. Completes on the 916 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 917 * 918 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 919 * Used to setup an additional connection on a port. Completes on the 920 * RCQ of the multi-queue/RSS connection being initialized. 921 * 922 * RAMROD_CMD_ID_ETH_STAT_QUERY 923 * Used to force the storm processors to update the statistics database 924 * in host memory. This ramrod is send on the leading connection CID and 925 * completes as an index increment of the CSTORM on the default status 926 * block. 927 * 928 * RAMROD_CMD_ID_ETH_UPDATE 929 * Used to update the state of the leading connection, usually to update 930 * the RSS indirection table. Completes on the RCQ of the leading 931 * connection. (Not currently used under FreeBSD until OS support becomes 932 * available.) 933 * 934 * RAMROD_CMD_ID_ETH_HALT 935 * Used when tearing down a connection prior to driver unload. Completes 936 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 937 * use this on the leading connection. 938 * 939 * RAMROD_CMD_ID_ETH_SET_MAC 940 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 941 * the RCQ of the leading connection. 942 * 943 * RAMROD_CMD_ID_ETH_CFC_DEL 944 * Used when tearing down a connection prior to driver unload. Completes 945 * on the RCQ of the leading connection (since the current connection 946 * has been completely removed from controller memory). 947 * 948 * RAMROD_CMD_ID_ETH_PORT_DEL 949 * Used to tear down the leading connection prior to driver unload, 950 * typically fp[0]. Completes as an index increment of the CSTORM on the 951 * default status block. 952 * 953 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 954 * Used for connection offload. Completes on the RCQ of the multi-queue 955 * RSS connection that is being offloaded. (Not currently used under 956 * FreeBSD.) 957 * 958 * There can only be one command pending per function. 959 * 960 * Returns: 961 * 0 = Success, !0 = Failure. 962 */ 963 964 /* must be called under the spq lock */ 965 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x_softc *sc) 966 { 967 struct eth_spe *next_spe = sc->spq_prod_bd; 968 969 if (sc->spq_prod_bd == sc->spq_last_bd) { 970 /* wrap back to the first eth_spq */ 971 sc->spq_prod_bd = sc->spq; 972 sc->spq_prod_idx = 0; 973 } else { 974 sc->spq_prod_bd++; 975 sc->spq_prod_idx++; 976 } 977 978 return next_spe; 979 } 980 981 /* must be called under the spq lock */ 982 static void bnx2x_sp_prod_update(struct bnx2x_softc *sc) 983 { 984 int func = SC_FUNC(sc); 985 986 /* 987 * Make sure that BD data is updated before writing the producer. 988 * BD data is written to the memory, the producer is read from the 989 * memory, thus we need a full memory barrier to ensure the ordering. 990 */ 991 mb(); 992 993 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 994 sc->spq_prod_idx); 995 996 mb(); 997 } 998 999 /** 1000 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 1001 * 1002 * @cmd: command to check 1003 * @cmd_type: command type 1004 */ 1005 static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 1006 { 1007 if ((cmd_type == NONE_CONNECTION_TYPE) || 1008 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 1009 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 1010 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 1011 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 1012 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 1013 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 1014 return TRUE; 1015 } else { 1016 return FALSE; 1017 } 1018 } 1019 1020 /** 1021 * bnx2x_sp_post - place a single command on an SP ring 1022 * 1023 * @sc: driver handle 1024 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 1025 * @cid: SW CID the command is related to 1026 * @data_hi: command private data address (high 32 bits) 1027 * @data_lo: command private data address (low 32 bits) 1028 * @cmd_type: command type (e.g. NONE, ETH) 1029 * 1030 * SP data is handled as if it's always an address pair, thus data fields are 1031 * not swapped to little endian in upper functions. Instead this function swaps 1032 * data as if it's two uint32 fields. 1033 */ 1034 int 1035 bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi, 1036 uint32_t data_lo, int cmd_type) 1037 { 1038 struct eth_spe *spe; 1039 uint16_t type; 1040 int common; 1041 1042 common = bnx2x_is_contextless_ramrod(command, cmd_type); 1043 1044 if (common) { 1045 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 1046 PMD_DRV_LOG(INFO, sc, "EQ ring is full!"); 1047 return -1; 1048 } 1049 } else { 1050 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 1051 PMD_DRV_LOG(INFO, sc, "SPQ ring is full!"); 1052 return -1; 1053 } 1054 } 1055 1056 spe = bnx2x_sp_get_next(sc); 1057 1058 /* CID needs port number to be encoded int it */ 1059 spe->hdr.conn_and_cmd_data = 1060 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 1061 1062 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1063 1064 /* TBD: Check if it works for VFs */ 1065 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 1066 SPE_HDR_FUNCTION_ID); 1067 1068 spe->hdr.type = htole16(type); 1069 1070 spe->data.update_data_addr.hi = htole32(data_hi); 1071 spe->data.update_data_addr.lo = htole32(data_lo); 1072 1073 /* 1074 * It's ok if the actual decrement is issued towards the memory 1075 * somewhere between the lock and unlock. Thus no more explicit 1076 * memory barrier is needed. 1077 */ 1078 if (common) { 1079 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 1080 } else { 1081 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 1082 } 1083 1084 PMD_DRV_LOG(DEBUG, sc, 1085 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x" 1086 "data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)", 1087 sc->spq_prod_idx, 1088 (uint32_t) U64_HI(sc->spq_dma.paddr), 1089 (uint32_t) (U64_LO(sc->spq_dma.paddr) + 1090 (uint8_t *) sc->spq_prod_bd - 1091 (uint8_t *) sc->spq), command, common, 1092 HW_CID(sc, cid), data_hi, data_lo, type, 1093 atomic_load_acq_long(&sc->cq_spq_left), 1094 atomic_load_acq_long(&sc->eq_spq_left)); 1095 1096 /* RAMROD completion is processed in bnx2x_intr_legacy() 1097 * which can run from different contexts. 1098 * Ask bnx2x_intr_intr() to process RAMROD 1099 * completion whenever it gets scheduled. 1100 */ 1101 rte_atomic32_set(&sc->scan_fp, 1); 1102 bnx2x_sp_prod_update(sc); 1103 1104 return 0; 1105 } 1106 1107 static void bnx2x_drv_pulse(struct bnx2x_softc *sc) 1108 { 1109 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 1110 sc->fw_drv_pulse_wr_seq); 1111 } 1112 1113 static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath *fp) 1114 { 1115 uint16_t hw_cons; 1116 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 1117 1118 if (unlikely(!txq)) { 1119 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 1120 return 0; 1121 } 1122 1123 mb(); /* status block fields can change */ 1124 hw_cons = le16toh(*fp->tx_cons_sb); 1125 return hw_cons != txq->tx_pkt_head; 1126 } 1127 1128 static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 1129 { 1130 /* expand this for multi-cos if ever supported */ 1131 return bnx2x_tx_queue_has_work(fp); 1132 } 1133 1134 static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 1135 { 1136 uint16_t rx_cq_cons_sb; 1137 struct bnx2x_rx_queue *rxq; 1138 rxq = fp->sc->rx_queues[fp->index]; 1139 if (unlikely(!rxq)) { 1140 PMD_RX_LOG(ERR, "ERROR: RX queue is NULL"); 1141 return 0; 1142 } 1143 1144 mb(); /* status block fields can change */ 1145 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 1146 if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) == 1147 MAX_RCQ_ENTRIES(rxq))) 1148 rx_cq_cons_sb++; 1149 1150 PMD_RX_LOG(DEBUG, "hw CQ cons = %d, sw CQ cons = %d", 1151 rx_cq_cons_sb, rxq->rx_cq_head); 1152 1153 return rxq->rx_cq_head != rx_cq_cons_sb; 1154 } 1155 1156 static void 1157 bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 1158 union eth_rx_cqe *rr_cqe) 1159 { 1160 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1161 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1162 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 1163 struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; 1164 1165 PMD_DRV_LOG(DEBUG, sc, 1166 "fp=%d cid=%d got ramrod #%d state is %x type is %d", 1167 fp->index, cid, command, sc->state, 1168 rr_cqe->ramrod_cqe.ramrod_type); 1169 1170 switch (command) { 1171 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1172 PMD_DRV_LOG(DEBUG, sc, "got UPDATE ramrod. CID %d", cid); 1173 drv_cmd = ECORE_Q_CMD_UPDATE; 1174 break; 1175 1176 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1177 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] setup ramrod", cid); 1178 drv_cmd = ECORE_Q_CMD_SETUP; 1179 break; 1180 1181 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1182 PMD_DRV_LOG(DEBUG, sc, 1183 "got MULTI[%d] tx-only setup ramrod", cid); 1184 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 1185 break; 1186 1187 case (RAMROD_CMD_ID_ETH_HALT): 1188 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] halt ramrod", cid); 1189 drv_cmd = ECORE_Q_CMD_HALT; 1190 break; 1191 1192 case (RAMROD_CMD_ID_ETH_TERMINATE): 1193 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] terminate ramrod", cid); 1194 drv_cmd = ECORE_Q_CMD_TERMINATE; 1195 break; 1196 1197 case (RAMROD_CMD_ID_ETH_EMPTY): 1198 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] empty ramrod", cid); 1199 drv_cmd = ECORE_Q_CMD_EMPTY; 1200 break; 1201 1202 default: 1203 PMD_DRV_LOG(DEBUG, sc, 1204 "ERROR: unexpected MC reply (%d)" 1205 "on fp[%d]", command, fp->index); 1206 return; 1207 } 1208 1209 if ((drv_cmd != ECORE_Q_CMD_MAX) && 1210 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 1211 /* 1212 * q_obj->complete_cmd() failure means that this was 1213 * an unexpected completion. 1214 * 1215 * In this case we don't want to increase the sc->spq_left 1216 * because apparently we haven't sent this command the first 1217 * place. 1218 */ 1219 // rte_panic("Unexpected SP completion"); 1220 return; 1221 } 1222 1223 atomic_add_acq_long(&sc->cq_spq_left, 1); 1224 1225 PMD_DRV_LOG(DEBUG, sc, "sc->cq_spq_left 0x%lx", 1226 atomic_load_acq_long(&sc->cq_spq_left)); 1227 } 1228 1229 static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) 1230 { 1231 struct bnx2x_rx_queue *rxq; 1232 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1233 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 1234 1235 rte_spinlock_lock(&(fp)->rx_mtx); 1236 1237 rxq = sc->rx_queues[fp->index]; 1238 if (!rxq) { 1239 PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index); 1240 rte_spinlock_unlock(&(fp)->rx_mtx); 1241 return 0; 1242 } 1243 1244 /* CQ "next element" is of the size of the regular element */ 1245 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 1246 if (unlikely((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == 1247 USABLE_RCQ_ENTRIES_PER_PAGE)) { 1248 hw_cq_cons++; 1249 } 1250 1251 bd_cons = rxq->rx_bd_head; 1252 bd_prod = rxq->rx_bd_tail; 1253 bd_prod_fw = bd_prod; 1254 sw_cq_cons = rxq->rx_cq_head; 1255 sw_cq_prod = rxq->rx_cq_tail; 1256 1257 /* 1258 * Memory barrier necessary as speculative reads of the rx 1259 * buffer can be ahead of the index in the status block 1260 */ 1261 rmb(); 1262 1263 while (sw_cq_cons != hw_cq_cons) { 1264 union eth_rx_cqe *cqe; 1265 struct eth_fast_path_rx_cqe *cqe_fp; 1266 uint8_t cqe_fp_flags; 1267 enum eth_rx_cqe_type cqe_fp_type; 1268 1269 comp_ring_cons = RCQ_ENTRY(sw_cq_cons, rxq); 1270 bd_prod = RX_BD(bd_prod, rxq); 1271 bd_cons = RX_BD(bd_cons, rxq); 1272 1273 cqe = &rxq->cq_ring[comp_ring_cons]; 1274 cqe_fp = &cqe->fast_path_cqe; 1275 cqe_fp_flags = cqe_fp->type_error_flags; 1276 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 1277 1278 /* is this a slowpath msg? */ 1279 if (CQE_TYPE_SLOW(cqe_fp_type)) { 1280 bnx2x_sp_event(sc, fp, cqe); 1281 goto next_cqe; 1282 } 1283 1284 /* is this an error packet? */ 1285 if (unlikely(cqe_fp_flags & 1286 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 1287 PMD_RX_LOG(DEBUG, "flags 0x%x rx packet %u", 1288 cqe_fp_flags, sw_cq_cons); 1289 goto next_rx; 1290 } 1291 1292 PMD_RX_LOG(DEBUG, "Dropping fastpath called from attn poller!"); 1293 1294 next_rx: 1295 bd_cons = NEXT_RX_BD(bd_cons); 1296 bd_prod = NEXT_RX_BD(bd_prod); 1297 bd_prod_fw = NEXT_RX_BD(bd_prod_fw); 1298 1299 next_cqe: 1300 sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); 1301 sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); 1302 1303 } /* while work to do */ 1304 1305 rxq->rx_bd_head = bd_cons; 1306 rxq->rx_bd_tail = bd_prod_fw; 1307 rxq->rx_cq_head = sw_cq_cons; 1308 rxq->rx_cq_tail = sw_cq_prod; 1309 1310 PMD_RX_LOG(DEBUG, "BD prod = %d, sw CQ prod = %d", 1311 bd_prod_fw, sw_cq_prod); 1312 1313 /* Update producers */ 1314 bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod); 1315 1316 rte_spinlock_unlock(&(fp)->rx_mtx); 1317 1318 return sw_cq_cons != hw_cq_cons; 1319 } 1320 1321 static uint16_t 1322 bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue *txq, 1323 uint16_t pkt_idx, uint16_t bd_idx) 1324 { 1325 struct eth_tx_start_bd *tx_start_bd = 1326 &txq->tx_ring[TX_BD(bd_idx, txq)].start_bd; 1327 uint16_t nbd = rte_le_to_cpu_16(tx_start_bd->nbd); 1328 struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)]; 1329 1330 if (likely(tx_mbuf != NULL)) { 1331 rte_pktmbuf_free_seg(tx_mbuf); 1332 } else { 1333 PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu", 1334 fp->index, (unsigned long)TX_BD(pkt_idx, txq)); 1335 } 1336 1337 txq->sw_ring[TX_BD(pkt_idx, txq)] = NULL; 1338 txq->nb_tx_avail += nbd; 1339 1340 while (nbd--) 1341 bd_idx = NEXT_TX_BD(bd_idx); 1342 1343 return bd_idx; 1344 } 1345 1346 /* processes transmit completions */ 1347 uint8_t bnx2x_txeof(__rte_unused struct bnx2x_softc * sc, struct bnx2x_fastpath * fp) 1348 { 1349 uint16_t bd_cons, hw_cons, sw_cons; 1350 __rte_unused uint16_t tx_bd_avail; 1351 1352 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 1353 1354 if (unlikely(!txq)) { 1355 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 1356 return 0; 1357 } 1358 1359 bd_cons = txq->tx_bd_head; 1360 hw_cons = rte_le_to_cpu_16(*fp->tx_cons_sb); 1361 sw_cons = txq->tx_pkt_head; 1362 1363 while (sw_cons != hw_cons) { 1364 bd_cons = bnx2x_free_tx_pkt(fp, txq, sw_cons, bd_cons); 1365 sw_cons++; 1366 } 1367 1368 txq->tx_pkt_head = sw_cons; 1369 txq->tx_bd_head = bd_cons; 1370 1371 tx_bd_avail = txq->nb_tx_avail; 1372 1373 PMD_TX_LOG(DEBUG, "fp[%02d] avail=%u cons_sb=%u, " 1374 "pkt_head=%u pkt_tail=%u bd_head=%u bd_tail=%u", 1375 fp->index, tx_bd_avail, hw_cons, 1376 txq->tx_pkt_head, txq->tx_pkt_tail, 1377 txq->tx_bd_head, txq->tx_bd_tail); 1378 return TRUE; 1379 } 1380 1381 static void bnx2x_drain_tx_queues(struct bnx2x_softc *sc) 1382 { 1383 struct bnx2x_fastpath *fp; 1384 int i, count; 1385 1386 /* wait until all TX fastpath tasks have completed */ 1387 for (i = 0; i < sc->num_queues; i++) { 1388 fp = &sc->fp[i]; 1389 1390 count = 1000; 1391 1392 while (bnx2x_has_tx_work(fp)) { 1393 bnx2x_txeof(sc, fp); 1394 1395 if (count == 0) { 1396 PMD_TX_LOG(ERR, 1397 "Timeout waiting for fp[%d] " 1398 "transmits to complete!", i); 1399 rte_panic("tx drain failure"); 1400 return; 1401 } 1402 1403 count--; 1404 DELAY(1000); 1405 rmb(); 1406 } 1407 } 1408 1409 return; 1410 } 1411 1412 static int 1413 bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj, 1414 int mac_type, uint8_t wait_for_comp) 1415 { 1416 uint32_t ramrod_flags = 0, vlan_mac_flags = 0; 1417 int rc; 1418 1419 /* wait for completion of requested */ 1420 if (wait_for_comp) { 1421 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 1422 } 1423 1424 /* Set the mac type of addresses we want to clear */ 1425 rte_bit_relaxed_set32(mac_type, &vlan_mac_flags); 1426 1427 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 1428 if (rc < 0) 1429 PMD_DRV_LOG(ERR, sc, "Failed to delete MACs (%d)", rc); 1430 1431 return rc; 1432 } 1433 1434 static int 1435 bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode, 1436 uint32_t *rx_accept_flags, uint32_t *tx_accept_flags) 1437 { 1438 /* Clear the flags first */ 1439 *rx_accept_flags = 0; 1440 *tx_accept_flags = 0; 1441 1442 switch (rx_mode) { 1443 case BNX2X_RX_MODE_NONE: 1444 /* 1445 * 'drop all' supersedes any accept flags that may have been 1446 * passed to the function. 1447 */ 1448 break; 1449 1450 case BNX2X_RX_MODE_NORMAL: 1451 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1452 rte_bit_relaxed_set32(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 1453 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1454 1455 /* internal switching mode */ 1456 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, tx_accept_flags); 1457 rte_bit_relaxed_set32(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 1458 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1459 1460 break; 1461 1462 case BNX2X_RX_MODE_ALLMULTI: 1463 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1464 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1465 rx_accept_flags); 1466 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1467 1468 /* internal switching mode */ 1469 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, tx_accept_flags); 1470 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1471 tx_accept_flags); 1472 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1473 1474 break; 1475 1476 case BNX2X_RX_MODE_ALLMULTI_PROMISC: 1477 case BNX2X_RX_MODE_PROMISC: 1478 /* 1479 * According to definition of SI mode, iface in promisc mode 1480 * should receive matched and unmatched (in resolution of port) 1481 * unicast packets. 1482 */ 1483 rte_bit_relaxed_set32(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 1484 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1485 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1486 rx_accept_flags); 1487 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1488 1489 /* internal switching mode */ 1490 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1491 tx_accept_flags); 1492 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1493 1494 if (IS_MF_SI(sc)) { 1495 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_UNICAST, 1496 tx_accept_flags); 1497 } else { 1498 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, 1499 tx_accept_flags); 1500 } 1501 1502 break; 1503 1504 default: 1505 PMD_RX_LOG(ERR, "Unknown rx_mode (%d)", rx_mode); 1506 return -1; 1507 } 1508 1509 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 1510 if (rx_mode != BNX2X_RX_MODE_NONE) { 1511 rte_bit_relaxed_set32(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 1512 rte_bit_relaxed_set32(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 1513 } 1514 1515 return 0; 1516 } 1517 1518 static int 1519 bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id, 1520 unsigned long rx_mode_flags, 1521 unsigned long rx_accept_flags, 1522 unsigned long tx_accept_flags, unsigned long ramrod_flags) 1523 { 1524 struct ecore_rx_mode_ramrod_params ramrod_param; 1525 int rc; 1526 1527 memset(&ramrod_param, 0, sizeof(ramrod_param)); 1528 1529 /* Prepare ramrod parameters */ 1530 ramrod_param.cid = 0; 1531 ramrod_param.cl_id = cl_id; 1532 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 1533 ramrod_param.func_id = SC_FUNC(sc); 1534 1535 ramrod_param.pstate = &sc->sp_state; 1536 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 1537 1538 ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata); 1539 ramrod_param.rdata_mapping = 1540 (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata), 1541 rte_bit_relaxed_set32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 1542 1543 ramrod_param.ramrod_flags = ramrod_flags; 1544 ramrod_param.rx_mode_flags = rx_mode_flags; 1545 1546 ramrod_param.rx_accept_flags = rx_accept_flags; 1547 ramrod_param.tx_accept_flags = tx_accept_flags; 1548 1549 rc = ecore_config_rx_mode(sc, &ramrod_param); 1550 if (rc < 0) { 1551 PMD_RX_LOG(ERR, "Set rx_mode %d failed", sc->rx_mode); 1552 return rc; 1553 } 1554 1555 return 0; 1556 } 1557 1558 int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc) 1559 { 1560 uint32_t rx_mode_flags = 0, ramrod_flags = 0; 1561 uint32_t rx_accept_flags = 0, tx_accept_flags = 0; 1562 int rc; 1563 1564 rc = bnx2x_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 1565 &tx_accept_flags); 1566 if (rc) { 1567 return rc; 1568 } 1569 1570 rte_bit_relaxed_set32(RAMROD_RX, &ramrod_flags); 1571 rte_bit_relaxed_set32(RAMROD_TX, &ramrod_flags); 1572 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 1573 1574 return bnx2x_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 1575 rx_accept_flags, tx_accept_flags, 1576 ramrod_flags); 1577 } 1578 1579 /* returns the "mcp load_code" according to global load_count array */ 1580 static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc) 1581 { 1582 int path = SC_PATH(sc); 1583 int port = SC_PORT(sc); 1584 1585 PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", 1586 path, load_count[path][0], load_count[path][1], 1587 load_count[path][2]); 1588 1589 load_count[path][0]++; 1590 load_count[path][1 + port]++; 1591 PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", 1592 path, load_count[path][0], load_count[path][1], 1593 load_count[path][2]); 1594 if (load_count[path][0] == 1) 1595 return FW_MSG_CODE_DRV_LOAD_COMMON; 1596 else if (load_count[path][1 + port] == 1) 1597 return FW_MSG_CODE_DRV_LOAD_PORT; 1598 else 1599 return FW_MSG_CODE_DRV_LOAD_FUNCTION; 1600 } 1601 1602 /* returns the "mcp load_code" according to global load_count array */ 1603 static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc) 1604 { 1605 int port = SC_PORT(sc); 1606 int path = SC_PATH(sc); 1607 1608 PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", 1609 path, load_count[path][0], load_count[path][1], 1610 load_count[path][2]); 1611 load_count[path][0]--; 1612 load_count[path][1 + port]--; 1613 PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", 1614 path, load_count[path][0], load_count[path][1], 1615 load_count[path][2]); 1616 if (load_count[path][0] == 0) { 1617 return FW_MSG_CODE_DRV_UNLOAD_COMMON; 1618 } else if (load_count[path][1 + port] == 0) { 1619 return FW_MSG_CODE_DRV_UNLOAD_PORT; 1620 } else { 1621 return FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 1622 } 1623 } 1624 1625 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 1626 static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode __rte_unused) 1627 { 1628 uint32_t reset_code = 0; 1629 1630 /* Select the UNLOAD request mode */ 1631 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 1632 1633 /* Send the request to the MCP */ 1634 if (!BNX2X_NOMCP(sc)) { 1635 reset_code = bnx2x_fw_command(sc, reset_code, 0); 1636 } else { 1637 reset_code = bnx2x_nic_unload_no_mcp(sc); 1638 } 1639 1640 return reset_code; 1641 } 1642 1643 /* send UNLOAD_DONE command to the MCP */ 1644 static void bnx2x_send_unload_done(struct bnx2x_softc *sc, uint8_t keep_link) 1645 { 1646 uint32_t reset_param = 1647 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 1648 1649 /* Report UNLOAD_DONE to MCP */ 1650 if (!BNX2X_NOMCP(sc)) { 1651 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 1652 } 1653 } 1654 1655 static int bnx2x_func_wait_started(struct bnx2x_softc *sc) 1656 { 1657 int tout = 50; 1658 1659 if (!sc->port.pmf) { 1660 return 0; 1661 } 1662 1663 /* 1664 * (assumption: No Attention from MCP at this stage) 1665 * PMF probably in the middle of TX disable/enable transaction 1666 * 1. Sync IRS for default SB 1667 * 2. Sync SP queue - this guarantees us that attention handling started 1668 * 3. Wait, that TX disable/enable transaction completes 1669 * 1670 * 1+2 guarantee that if DCBX attention was scheduled it already changed 1671 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 1672 * received completion for the transaction the state is TX_STOPPED. 1673 * State will return to STARTED after completion of TX_STOPPED-->STARTED 1674 * transaction. 1675 */ 1676 1677 while (ecore_func_get_state(sc, &sc->func_obj) != 1678 ECORE_F_STATE_STARTED && tout--) { 1679 DELAY(20000); 1680 } 1681 1682 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 1683 /* 1684 * Failed to complete the transaction in a "good way" 1685 * Force both transactions with CLR bit. 1686 */ 1687 struct ecore_func_state_params func_params = { NULL }; 1688 1689 PMD_DRV_LOG(NOTICE, sc, "Unexpected function state! " 1690 "Forcing STARTED-->TX_STOPPED-->STARTED"); 1691 1692 func_params.f_obj = &sc->func_obj; 1693 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, 1694 &func_params.ramrod_flags); 1695 1696 /* STARTED-->TX_STOPPED */ 1697 func_params.cmd = ECORE_F_CMD_TX_STOP; 1698 ecore_func_state_change(sc, &func_params); 1699 1700 /* TX_STOPPED-->STARTED */ 1701 func_params.cmd = ECORE_F_CMD_TX_START; 1702 return ecore_func_state_change(sc, &func_params); 1703 } 1704 1705 return 0; 1706 } 1707 1708 static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index) 1709 { 1710 struct bnx2x_fastpath *fp = &sc->fp[index]; 1711 struct ecore_queue_state_params q_params = { NULL }; 1712 int rc; 1713 1714 PMD_DRV_LOG(DEBUG, sc, "stopping queue %d cid %d", index, fp->index); 1715 1716 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 1717 /* We want to wait for completion in this context */ 1718 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 1719 1720 /* Stop the primary connection: */ 1721 1722 /* ...halt the connection */ 1723 q_params.cmd = ECORE_Q_CMD_HALT; 1724 rc = ecore_queue_state_change(sc, &q_params); 1725 if (rc) { 1726 return rc; 1727 } 1728 1729 /* ...terminate the connection */ 1730 q_params.cmd = ECORE_Q_CMD_TERMINATE; 1731 memset(&q_params.params.terminate, 0, 1732 sizeof(q_params.params.terminate)); 1733 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 1734 rc = ecore_queue_state_change(sc, &q_params); 1735 if (rc) { 1736 return rc; 1737 } 1738 1739 /* ...delete cfc entry */ 1740 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 1741 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 1742 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 1743 return ecore_queue_state_change(sc, &q_params); 1744 } 1745 1746 /* wait for the outstanding SP commands */ 1747 static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, uint32_t mask) 1748 { 1749 uint32_t tmp; 1750 int tout = 5000; /* wait for 5 secs tops */ 1751 1752 while (tout--) { 1753 mb(); 1754 if (!(atomic_load_acq_int(&sc->sp_state) & mask)) 1755 return TRUE; 1756 1757 DELAY(1000); 1758 } 1759 1760 mb(); 1761 1762 tmp = atomic_load_acq_int(&sc->sp_state); 1763 if (tmp & mask) { 1764 PMD_DRV_LOG(INFO, sc, "Filtering completion timed out: " 1765 "sp_state 0x%x, mask 0x%x", tmp, mask); 1766 return FALSE; 1767 } 1768 1769 return FALSE; 1770 } 1771 1772 static int bnx2x_func_stop(struct bnx2x_softc *sc) 1773 { 1774 struct ecore_func_state_params func_params = { NULL }; 1775 int rc; 1776 1777 /* prepare parameters for function state transitions */ 1778 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 1779 func_params.f_obj = &sc->func_obj; 1780 func_params.cmd = ECORE_F_CMD_STOP; 1781 1782 /* 1783 * Try to stop the function the 'good way'. If it fails (in case 1784 * of a parity error during bnx2x_chip_cleanup()) and we are 1785 * not in a debug mode, perform a state transaction in order to 1786 * enable further HW_RESET transaction. 1787 */ 1788 rc = ecore_func_state_change(sc, &func_params); 1789 if (rc) { 1790 PMD_DRV_LOG(NOTICE, sc, "FUNC_STOP ramrod failed. " 1791 "Running a dry transaction"); 1792 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, 1793 &func_params.ramrod_flags); 1794 return ecore_func_state_change(sc, &func_params); 1795 } 1796 1797 return 0; 1798 } 1799 1800 static int bnx2x_reset_hw(struct bnx2x_softc *sc, uint32_t load_code) 1801 { 1802 struct ecore_func_state_params func_params = { NULL }; 1803 1804 /* Prepare parameters for function state transitions */ 1805 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 1806 1807 func_params.f_obj = &sc->func_obj; 1808 func_params.cmd = ECORE_F_CMD_HW_RESET; 1809 1810 func_params.params.hw_init.load_phase = load_code; 1811 1812 return ecore_func_state_change(sc, &func_params); 1813 } 1814 1815 static void bnx2x_int_disable_sync(struct bnx2x_softc *sc, int disable_hw) 1816 { 1817 if (disable_hw) { 1818 /* prevent the HW from sending interrupts */ 1819 bnx2x_int_disable(sc); 1820 } 1821 } 1822 1823 static void 1824 bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) 1825 { 1826 int port = SC_PORT(sc); 1827 struct ecore_mcast_ramrod_params rparam = { NULL }; 1828 uint32_t reset_code; 1829 int i, rc = 0; 1830 1831 bnx2x_drain_tx_queues(sc); 1832 1833 /* give HW time to discard old tx messages */ 1834 DELAY(1000); 1835 1836 /* Clean all ETH MACs */ 1837 rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, 1838 FALSE); 1839 if (rc < 0) { 1840 PMD_DRV_LOG(NOTICE, sc, 1841 "Failed to delete all ETH MACs (%d)", rc); 1842 } 1843 1844 /* Clean up UC list */ 1845 rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, 1846 TRUE); 1847 if (rc < 0) { 1848 PMD_DRV_LOG(NOTICE, sc, 1849 "Failed to delete UC MACs list (%d)", rc); 1850 } 1851 1852 /* Disable LLH */ 1853 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); 1854 1855 /* Set "drop all" to stop Rx */ 1856 1857 /* 1858 * We need to take the if_maddr_lock() here in order to prevent 1859 * a race between the completion code and this code. 1860 */ 1861 1862 if (rte_bit_relaxed_get32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) 1863 rte_bit_relaxed_set32(ECORE_FILTER_RX_MODE_SCHED, 1864 &sc->sp_state); 1865 else 1866 bnx2x_set_storm_rx_mode(sc); 1867 1868 /* Clean up multicast configuration */ 1869 rparam.mcast_obj = &sc->mcast_obj; 1870 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 1871 if (rc < 0) { 1872 PMD_DRV_LOG(NOTICE, sc, 1873 "Failed to send DEL MCAST command (%d)", rc); 1874 } 1875 1876 /* 1877 * Send the UNLOAD_REQUEST to the MCP. This will return if 1878 * this function should perform FUNCTION, PORT, or COMMON HW 1879 * reset. 1880 */ 1881 reset_code = bnx2x_send_unload_req(sc, unload_mode); 1882 1883 /* 1884 * (assumption: No Attention from MCP at this stage) 1885 * PMF probably in the middle of TX disable/enable transaction 1886 */ 1887 rc = bnx2x_func_wait_started(sc); 1888 if (rc) { 1889 PMD_DRV_LOG(NOTICE, sc, "bnx2x_func_wait_started failed"); 1890 } 1891 1892 /* 1893 * Close multi and leading connections 1894 * Completions for ramrods are collected in a synchronous way 1895 */ 1896 for (i = 0; i < sc->num_queues; i++) { 1897 if (bnx2x_stop_queue(sc, i)) { 1898 goto unload_error; 1899 } 1900 } 1901 1902 /* 1903 * If SP settings didn't get completed so far - something 1904 * very wrong has happen. 1905 */ 1906 if (!bnx2x_wait_sp_comp(sc, ~0x0U)) 1907 PMD_DRV_LOG(NOTICE, sc, "Common slow path ramrods got stuck!"); 1908 1909 unload_error: 1910 1911 rc = bnx2x_func_stop(sc); 1912 if (rc) { 1913 PMD_DRV_LOG(NOTICE, sc, "Function stop failed!"); 1914 } 1915 1916 /* disable HW interrupts */ 1917 bnx2x_int_disable_sync(sc, TRUE); 1918 1919 /* Reset the chip */ 1920 rc = bnx2x_reset_hw(sc, reset_code); 1921 if (rc) { 1922 PMD_DRV_LOG(NOTICE, sc, "Hardware reset failed"); 1923 } 1924 1925 /* Report UNLOAD_DONE to MCP */ 1926 bnx2x_send_unload_done(sc, keep_link); 1927 } 1928 1929 static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc) 1930 { 1931 uint32_t val; 1932 1933 PMD_DRV_LOG(DEBUG, sc, "Disabling 'close the gates'"); 1934 1935 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 1936 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 1937 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 1938 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 1939 } 1940 1941 /* 1942 * Cleans the object that have internal lists without sending 1943 * ramrods. Should be run when interrupts are disabled. 1944 */ 1945 static void bnx2x_squeeze_objects(struct bnx2x_softc *sc) 1946 { 1947 uint32_t ramrod_flags = 0, vlan_mac_flags = 0; 1948 struct ecore_mcast_ramrod_params rparam = { NULL }; 1949 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 1950 int rc; 1951 1952 /* Cleanup MACs' object first... */ 1953 1954 /* Wait for completion of requested */ 1955 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 1956 /* Perform a dry cleanup */ 1957 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 1958 1959 /* Clean ETH primary MAC */ 1960 rte_bit_relaxed_set32(ECORE_ETH_MAC, &vlan_mac_flags); 1961 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 1962 &ramrod_flags); 1963 if (rc != 0) { 1964 PMD_DRV_LOG(NOTICE, sc, "Failed to clean ETH MACs (%d)", rc); 1965 } 1966 1967 /* Cleanup UC list */ 1968 vlan_mac_flags = 0; 1969 rte_bit_relaxed_set32(ECORE_UC_LIST_MAC, &vlan_mac_flags); 1970 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 1971 if (rc != 0) { 1972 PMD_DRV_LOG(NOTICE, sc, 1973 "Failed to clean UC list MACs (%d)", rc); 1974 } 1975 1976 /* Now clean mcast object... */ 1977 1978 rparam.mcast_obj = &sc->mcast_obj; 1979 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 1980 1981 /* Add a DEL command... */ 1982 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 1983 if (rc < 0) { 1984 PMD_DRV_LOG(NOTICE, sc, 1985 "Failed to send DEL MCAST command (%d)", rc); 1986 } 1987 1988 /* now wait until all pending commands are cleared */ 1989 1990 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 1991 while (rc != 0) { 1992 if (rc < 0) { 1993 PMD_DRV_LOG(NOTICE, sc, 1994 "Failed to clean MCAST object (%d)", rc); 1995 return; 1996 } 1997 1998 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 1999 } 2000 } 2001 2002 /* stop the controller */ 2003 __rte_noinline 2004 int 2005 bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) 2006 { 2007 uint8_t global = FALSE; 2008 uint32_t val; 2009 2010 PMD_INIT_FUNC_TRACE(sc); 2011 2012 PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload..."); 2013 2014 /* mark driver as unloaded in shmem2 */ 2015 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 2016 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 2017 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 2018 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 2019 } 2020 2021 if (IS_PF(sc) && sc->recovery_state != BNX2X_RECOVERY_DONE && 2022 (sc->state == BNX2X_STATE_CLOSED || sc->state == BNX2X_STATE_ERROR)) { 2023 /* 2024 * We can get here if the driver has been unloaded 2025 * during parity error recovery and is either waiting for a 2026 * leader to complete or for other functions to unload and 2027 * then ifconfig down has been issued. In this case we want to 2028 * unload and let other functions to complete a recovery 2029 * process. 2030 */ 2031 sc->recovery_state = BNX2X_RECOVERY_DONE; 2032 sc->is_leader = 0; 2033 bnx2x_release_leader_lock(sc); 2034 mb(); 2035 2036 PMD_DRV_LOG(NOTICE, sc, "Can't unload in closed or error state"); 2037 return -1; 2038 } 2039 2040 /* 2041 * Nothing to do during unload if previous bnx2x_nic_load() 2042 * did not complete successfully - all resources are released. 2043 */ 2044 if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) { 2045 return 0; 2046 } 2047 2048 sc->state = BNX2X_STATE_CLOSING_WAITING_HALT; 2049 mb(); 2050 2051 sc->rx_mode = BNX2X_RX_MODE_NONE; 2052 bnx2x_set_rx_mode(sc); 2053 mb(); 2054 2055 if (IS_PF(sc)) { 2056 /* set ALWAYS_ALIVE bit in shmem */ 2057 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 2058 2059 bnx2x_drv_pulse(sc); 2060 2061 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 2062 bnx2x_save_statistics(sc); 2063 } 2064 2065 /* wait till consumers catch up with producers in all queues */ 2066 bnx2x_drain_tx_queues(sc); 2067 2068 /* if VF indicate to PF this function is going down (PF will delete sp 2069 * elements and clear initializations 2070 */ 2071 if (IS_VF(sc)) { 2072 bnx2x_vf_unload(sc); 2073 } else if (unload_mode != UNLOAD_RECOVERY) { 2074 /* if this is a normal/close unload need to clean up chip */ 2075 bnx2x_chip_cleanup(sc, unload_mode, keep_link); 2076 } else { 2077 /* Send the UNLOAD_REQUEST to the MCP */ 2078 bnx2x_send_unload_req(sc, unload_mode); 2079 2080 /* 2081 * Prevent transactions to host from the functions on the 2082 * engine that doesn't reset global blocks in case of global 2083 * attention once global blocks are reset and gates are opened 2084 * (the engine which leader will perform the recovery 2085 * last). 2086 */ 2087 if (!CHIP_IS_E1x(sc)) { 2088 bnx2x_pf_disable(sc); 2089 } 2090 2091 /* disable HW interrupts */ 2092 bnx2x_int_disable_sync(sc, TRUE); 2093 2094 /* Report UNLOAD_DONE to MCP */ 2095 bnx2x_send_unload_done(sc, FALSE); 2096 } 2097 2098 /* 2099 * At this stage no more interrupts will arrive so we may safely clean 2100 * the queueable objects here in case they failed to get cleaned so far. 2101 */ 2102 if (IS_PF(sc)) { 2103 bnx2x_squeeze_objects(sc); 2104 } 2105 2106 /* There should be no more pending SP commands at this stage */ 2107 sc->sp_state = 0; 2108 2109 sc->port.pmf = 0; 2110 2111 if (IS_PF(sc)) { 2112 bnx2x_free_mem(sc); 2113 } 2114 2115 /* free the host hardware/software hsi structures */ 2116 bnx2x_free_hsi_mem(sc); 2117 2118 bnx2x_free_fw_stats_mem(sc); 2119 2120 sc->state = BNX2X_STATE_CLOSED; 2121 2122 /* 2123 * Check if there are pending parity attentions. If there are - set 2124 * RECOVERY_IN_PROGRESS. 2125 */ 2126 if (IS_PF(sc) && bnx2x_chk_parity_attn(sc, &global, FALSE)) { 2127 bnx2x_set_reset_in_progress(sc); 2128 2129 /* Set RESET_IS_GLOBAL if needed */ 2130 if (global) { 2131 bnx2x_set_reset_global(sc); 2132 } 2133 } 2134 2135 /* 2136 * The last driver must disable a "close the gate" if there is no 2137 * parity attention or "process kill" pending. 2138 */ 2139 if (IS_PF(sc) && !bnx2x_clear_pf_load(sc) && 2140 bnx2x_reset_is_done(sc, SC_PATH(sc))) { 2141 bnx2x_disable_close_the_gate(sc); 2142 } 2143 2144 PMD_DRV_LOG(DEBUG, sc, "Ended NIC unload"); 2145 2146 return 0; 2147 } 2148 2149 /* 2150 * Encapsulate an mbuf cluster into the Tx BD chain and makes the memory 2151 * visible to the controller. 2152 * 2153 * If an mbuf is submitted to this routine and cannot be given to the 2154 * controller (e.g. it has too many fragments) then the function may free 2155 * the mbuf and return to the caller. 2156 * 2157 * Returns: 2158 * int: Number of TX BDs used for the mbuf 2159 * 2160 * Note the side effect that an mbuf may be freed if it causes a problem. 2161 */ 2162 int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0) 2163 { 2164 struct eth_tx_start_bd *tx_start_bd; 2165 uint16_t bd_prod, pkt_prod; 2166 struct bnx2x_softc *sc; 2167 uint32_t nbds = 0; 2168 2169 sc = txq->sc; 2170 bd_prod = txq->tx_bd_tail; 2171 pkt_prod = txq->tx_pkt_tail; 2172 2173 txq->sw_ring[TX_BD(pkt_prod, txq)] = m0; 2174 2175 tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; 2176 2177 tx_start_bd->addr_lo = 2178 rte_cpu_to_le_32(U64_LO(rte_mbuf_data_iova(m0))); 2179 tx_start_bd->addr_hi = 2180 rte_cpu_to_le_32(U64_HI(rte_mbuf_data_iova(m0))); 2181 tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); 2182 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2183 tx_start_bd->general_data = 2184 (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 2185 2186 tx_start_bd->nbd = rte_cpu_to_le_16(2); 2187 2188 if (m0->ol_flags & RTE_MBUF_F_TX_VLAN) { 2189 tx_start_bd->vlan_or_ethertype = 2190 rte_cpu_to_le_16(m0->vlan_tci); 2191 tx_start_bd->bd_flags.as_bitfield |= 2192 (X_ETH_OUTBAND_VLAN << 2193 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 2194 } else { 2195 if (IS_PF(sc)) 2196 tx_start_bd->vlan_or_ethertype = 2197 rte_cpu_to_le_16(pkt_prod); 2198 else { 2199 /* when transmitting in a vf, start bd 2200 * must hold the ethertype for fw to enforce it 2201 */ 2202 struct rte_ether_hdr *eh = 2203 rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); 2204 2205 /* Still need to consider inband vlan for enforced */ 2206 if (eh->ether_type == 2207 rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { 2208 struct rte_vlan_hdr *vh = 2209 (struct rte_vlan_hdr *)(eh + 1); 2210 tx_start_bd->bd_flags.as_bitfield |= 2211 (X_ETH_INBAND_VLAN << 2212 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 2213 tx_start_bd->vlan_or_ethertype = 2214 rte_cpu_to_le_16(ntohs(vh->vlan_tci)); 2215 } else { 2216 tx_start_bd->vlan_or_ethertype = 2217 (rte_cpu_to_le_16 2218 (rte_be_to_cpu_16(eh->ether_type))); 2219 } 2220 } 2221 } 2222 2223 bd_prod = NEXT_TX_BD(bd_prod); 2224 if (IS_VF(sc)) { 2225 struct eth_tx_parse_bd_e2 *tx_parse_bd; 2226 const struct rte_ether_hdr *eh = 2227 rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); 2228 uint8_t mac_type = UNICAST_ADDRESS; 2229 2230 tx_parse_bd = 2231 &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2; 2232 if (rte_is_multicast_ether_addr(&eh->dst_addr)) { 2233 if (rte_is_broadcast_ether_addr(&eh->dst_addr)) 2234 mac_type = BROADCAST_ADDRESS; 2235 else 2236 mac_type = MULTICAST_ADDRESS; 2237 } 2238 tx_parse_bd->parsing_data = 2239 (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); 2240 2241 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi, 2242 &eh->dst_addr.addr_bytes[0], 2); 2243 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid, 2244 &eh->dst_addr.addr_bytes[2], 2); 2245 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo, 2246 &eh->dst_addr.addr_bytes[4], 2); 2247 rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi, 2248 &eh->src_addr.addr_bytes[0], 2); 2249 rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid, 2250 &eh->src_addr.addr_bytes[2], 2); 2251 rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo, 2252 &eh->src_addr.addr_bytes[4], 2); 2253 2254 tx_parse_bd->data.mac_addr.dst_hi = 2255 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi); 2256 tx_parse_bd->data.mac_addr.dst_mid = 2257 rte_cpu_to_be_16(tx_parse_bd->data. 2258 mac_addr.dst_mid); 2259 tx_parse_bd->data.mac_addr.dst_lo = 2260 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo); 2261 tx_parse_bd->data.mac_addr.src_hi = 2262 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi); 2263 tx_parse_bd->data.mac_addr.src_mid = 2264 rte_cpu_to_be_16(tx_parse_bd->data. 2265 mac_addr.src_mid); 2266 tx_parse_bd->data.mac_addr.src_lo = 2267 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo); 2268 2269 PMD_TX_LOG(DEBUG, 2270 "PBD dst %x %x %x src %x %x %x p_data %x", 2271 tx_parse_bd->data.mac_addr.dst_hi, 2272 tx_parse_bd->data.mac_addr.dst_mid, 2273 tx_parse_bd->data.mac_addr.dst_lo, 2274 tx_parse_bd->data.mac_addr.src_hi, 2275 tx_parse_bd->data.mac_addr.src_mid, 2276 tx_parse_bd->data.mac_addr.src_lo, 2277 tx_parse_bd->parsing_data); 2278 } 2279 2280 PMD_TX_LOG(DEBUG, 2281 "start bd: nbytes %d flags %x vlan %x", 2282 tx_start_bd->nbytes, 2283 tx_start_bd->bd_flags.as_bitfield, 2284 tx_start_bd->vlan_or_ethertype); 2285 2286 bd_prod = NEXT_TX_BD(bd_prod); 2287 pkt_prod++; 2288 2289 if (TX_IDX(bd_prod) < 2) 2290 nbds++; 2291 2292 txq->nb_tx_avail -= 2; 2293 txq->tx_bd_tail = bd_prod; 2294 txq->tx_pkt_tail = pkt_prod; 2295 2296 return nbds + 2; 2297 } 2298 2299 static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc) 2300 { 2301 return L2_ILT_LINES(sc); 2302 } 2303 2304 static void bnx2x_ilt_set_info(struct bnx2x_softc *sc) 2305 { 2306 struct ilt_client_info *ilt_client; 2307 struct ecore_ilt *ilt = sc->ilt; 2308 uint16_t line = 0; 2309 2310 PMD_INIT_FUNC_TRACE(sc); 2311 2312 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 2313 2314 /* CDU */ 2315 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 2316 ilt_client->client_num = ILT_CLIENT_CDU; 2317 ilt_client->page_size = CDU_ILT_PAGE_SZ; 2318 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 2319 ilt_client->start = line; 2320 line += bnx2x_cid_ilt_lines(sc); 2321 2322 if (CNIC_SUPPORT(sc)) { 2323 line += CNIC_ILT_LINES; 2324 } 2325 2326 ilt_client->end = (line - 1); 2327 2328 /* QM */ 2329 if (QM_INIT(sc->qm_cid_count)) { 2330 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 2331 ilt_client->client_num = ILT_CLIENT_QM; 2332 ilt_client->page_size = QM_ILT_PAGE_SZ; 2333 ilt_client->flags = 0; 2334 ilt_client->start = line; 2335 2336 /* 4 bytes for each cid */ 2337 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 2338 QM_ILT_PAGE_SZ); 2339 2340 ilt_client->end = (line - 1); 2341 } 2342 2343 if (CNIC_SUPPORT(sc)) { 2344 /* SRC */ 2345 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 2346 ilt_client->client_num = ILT_CLIENT_SRC; 2347 ilt_client->page_size = SRC_ILT_PAGE_SZ; 2348 ilt_client->flags = 0; 2349 ilt_client->start = line; 2350 line += SRC_ILT_LINES; 2351 ilt_client->end = (line - 1); 2352 2353 /* TM */ 2354 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 2355 ilt_client->client_num = ILT_CLIENT_TM; 2356 ilt_client->page_size = TM_ILT_PAGE_SZ; 2357 ilt_client->flags = 0; 2358 ilt_client->start = line; 2359 line += TM_ILT_LINES; 2360 ilt_client->end = (line - 1); 2361 } 2362 2363 assert((line <= ILT_MAX_LINES)); 2364 } 2365 2366 static void bnx2x_set_fp_rx_buf_size(struct bnx2x_softc *sc) 2367 { 2368 int i; 2369 2370 for (i = 0; i < sc->num_queues; i++) { 2371 /* get the Rx buffer size for RX frames */ 2372 sc->fp[i].rx_buf_size = 2373 (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 2374 } 2375 } 2376 2377 int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) 2378 { 2379 2380 sc->ilt = rte_malloc("", sizeof(struct ecore_ilt), RTE_CACHE_LINE_SIZE); 2381 2382 return sc->ilt == NULL; 2383 } 2384 2385 static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) 2386 { 2387 sc->ilt->lines = rte_calloc("", 2388 ILT_MAX_LINES, sizeof(struct ilt_line), 2389 RTE_CACHE_LINE_SIZE); 2390 return sc->ilt->lines == NULL; 2391 } 2392 2393 void bnx2x_free_ilt_mem(struct bnx2x_softc *sc) 2394 { 2395 rte_free(sc->ilt); 2396 sc->ilt = NULL; 2397 } 2398 2399 static void bnx2x_free_ilt_lines_mem(struct bnx2x_softc *sc) 2400 { 2401 if (sc->ilt->lines != NULL) { 2402 rte_free(sc->ilt->lines); 2403 sc->ilt->lines = NULL; 2404 } 2405 } 2406 2407 static void bnx2x_free_mem(struct bnx2x_softc *sc) 2408 { 2409 uint32_t i; 2410 2411 for (i = 0; i < L2_ILT_LINES(sc); i++) { 2412 sc->context[i].vcxt = NULL; 2413 sc->context[i].size = 0; 2414 } 2415 2416 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 2417 2418 bnx2x_free_ilt_lines_mem(sc); 2419 } 2420 2421 static int bnx2x_alloc_mem(struct bnx2x_softc *sc) 2422 { 2423 int context_size; 2424 int allocated; 2425 int i; 2426 char cdu_name[RTE_MEMZONE_NAMESIZE]; 2427 2428 /* 2429 * Allocate memory for CDU context: 2430 * This memory is allocated separately and not in the generic ILT 2431 * functions because CDU differs in few aspects: 2432 * 1. There can be multiple entities allocating memory for context - 2433 * regular L2, CNIC, and SRIOV drivers. Each separately controls 2434 * its own ILT lines. 2435 * 2. Since CDU page-size is not a single 4KB page (which is the case 2436 * for the other ILT clients), to be efficient we want to support 2437 * allocation of sub-page-size in the last entry. 2438 * 3. Context pointers are used by the driver to pass to FW / update 2439 * the context (for the other ILT clients the pointers are used just to 2440 * free the memory during unload). 2441 */ 2442 context_size = (sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(sc)); 2443 for (i = 0, allocated = 0; allocated < context_size; i++) { 2444 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 2445 (context_size - allocated)); 2446 2447 snprintf(cdu_name, sizeof(cdu_name), "cdu_%d", i); 2448 if (bnx2x_dma_alloc(sc, sc->context[i].size, 2449 &sc->context[i].vcxt_dma, 2450 cdu_name, BNX2X_PAGE_SIZE) != 0) { 2451 bnx2x_free_mem(sc); 2452 return -1; 2453 } 2454 2455 sc->context[i].vcxt = 2456 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 2457 2458 allocated += sc->context[i].size; 2459 } 2460 2461 bnx2x_alloc_ilt_lines_mem(sc); 2462 2463 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 2464 PMD_DRV_LOG(NOTICE, sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed"); 2465 bnx2x_free_mem(sc); 2466 return -1; 2467 } 2468 2469 return 0; 2470 } 2471 2472 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc) 2473 { 2474 bnx2x_dma_free(&sc->fw_stats_dma); 2475 sc->fw_stats_num = 0; 2476 2477 sc->fw_stats_req_size = 0; 2478 sc->fw_stats_req = NULL; 2479 sc->fw_stats_req_mapping = 0; 2480 2481 sc->fw_stats_data_size = 0; 2482 sc->fw_stats_data = NULL; 2483 sc->fw_stats_data_mapping = 0; 2484 } 2485 2486 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc) 2487 { 2488 uint8_t num_queue_stats; 2489 int num_groups, vf_headroom = 0; 2490 2491 /* number of queues for statistics is number of eth queues */ 2492 num_queue_stats = BNX2X_NUM_ETH_QUEUES(sc); 2493 2494 /* 2495 * Total number of FW statistics requests = 2496 * 1 for port stats + 1 for PF stats + num of queues 2497 */ 2498 sc->fw_stats_num = (2 + num_queue_stats); 2499 2500 /* 2501 * Request is built from stats_query_header and an array of 2502 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 2503 * rules. The real number or requests is configured in the 2504 * stats_query_header. 2505 */ 2506 num_groups = (sc->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT; 2507 if ((sc->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) 2508 num_groups++; 2509 2510 sc->fw_stats_req_size = 2511 (sizeof(struct stats_query_header) + 2512 (num_groups * sizeof(struct stats_query_cmd_group))); 2513 2514 /* 2515 * Data for statistics requests + stats_counter. 2516 * stats_counter holds per-STORM counters that are incremented when 2517 * STORM has finished with the current request. Memory for FCoE 2518 * offloaded statistics are counted anyway, even if they will not be sent. 2519 * VF stats are not accounted for here as the data of VF stats is stored 2520 * in memory allocated by the VF, not here. 2521 */ 2522 sc->fw_stats_data_size = 2523 (sizeof(struct stats_counter) + 2524 sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + 2525 /* sizeof(struct fcoe_statistics_params) + */ 2526 (sizeof(struct per_queue_stats) * num_queue_stats)); 2527 2528 if (bnx2x_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 2529 &sc->fw_stats_dma, "fw_stats", 2530 RTE_CACHE_LINE_SIZE) != 0) { 2531 bnx2x_free_fw_stats_mem(sc); 2532 return -1; 2533 } 2534 2535 /* set up the shortcuts */ 2536 2537 sc->fw_stats_req = (struct bnx2x_fw_stats_req *)sc->fw_stats_dma.vaddr; 2538 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 2539 2540 sc->fw_stats_data = 2541 (struct bnx2x_fw_stats_data *)((uint8_t *) sc->fw_stats_dma.vaddr + 2542 sc->fw_stats_req_size); 2543 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 2544 sc->fw_stats_req_size); 2545 2546 return 0; 2547 } 2548 2549 /* 2550 * Bits map: 2551 * 0-7 - Engine0 load counter. 2552 * 8-15 - Engine1 load counter. 2553 * 16 - Engine0 RESET_IN_PROGRESS bit. 2554 * 17 - Engine1 RESET_IN_PROGRESS bit. 2555 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 2556 * function on the engine 2557 * 19 - Engine1 ONE_IS_LOADED. 2558 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 2559 * leader to complete (check for both RESET_IN_PROGRESS bits and not 2560 * for just the one belonging to its engine). 2561 */ 2562 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 2563 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 2564 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 2565 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 2566 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 2567 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 2568 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 2569 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 2570 2571 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ 2572 static void bnx2x_set_reset_global(struct bnx2x_softc *sc) 2573 { 2574 uint32_t val; 2575 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2576 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2577 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 2578 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2579 } 2580 2581 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 2582 static void bnx2x_clear_reset_global(struct bnx2x_softc *sc) 2583 { 2584 uint32_t val; 2585 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2586 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2587 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 2588 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2589 } 2590 2591 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 2592 static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc) 2593 { 2594 return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT; 2595 } 2596 2597 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 2598 static void bnx2x_set_reset_done(struct bnx2x_softc *sc) 2599 { 2600 uint32_t val; 2601 uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : 2602 BNX2X_PATH0_RST_IN_PROG_BIT; 2603 2604 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2605 2606 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2607 /* Clear the bit */ 2608 val &= ~bit; 2609 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2610 2611 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2612 } 2613 2614 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 2615 static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc) 2616 { 2617 uint32_t val; 2618 uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : 2619 BNX2X_PATH0_RST_IN_PROG_BIT; 2620 2621 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2622 2623 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2624 /* Set the bit */ 2625 val |= bit; 2626 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2627 2628 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2629 } 2630 2631 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 2632 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine) 2633 { 2634 uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2635 uint32_t bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : 2636 BNX2X_PATH0_RST_IN_PROG_BIT; 2637 2638 /* return false if bit is set */ 2639 return (val & bit) ? FALSE : TRUE; 2640 } 2641 2642 /* get the load status for an engine, should be run under rtnl lock */ 2643 static uint8_t bnx2x_get_load_status(struct bnx2x_softc *sc, int engine) 2644 { 2645 uint32_t mask = engine ? BNX2X_PATH1_LOAD_CNT_MASK : 2646 BNX2X_PATH0_LOAD_CNT_MASK; 2647 uint32_t shift = engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2648 BNX2X_PATH0_LOAD_CNT_SHIFT; 2649 uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2650 2651 val = ((val & mask) >> shift); 2652 2653 return val != 0; 2654 } 2655 2656 /* set pf load mark */ 2657 static void bnx2x_set_pf_load(struct bnx2x_softc *sc) 2658 { 2659 uint32_t val; 2660 uint32_t val1; 2661 uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : 2662 BNX2X_PATH0_LOAD_CNT_MASK; 2663 uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2664 BNX2X_PATH0_LOAD_CNT_SHIFT; 2665 2666 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2667 2668 PMD_INIT_FUNC_TRACE(sc); 2669 2670 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2671 2672 /* get the current counter value */ 2673 val1 = ((val & mask) >> shift); 2674 2675 /* set bit of this PF */ 2676 val1 |= (1 << SC_ABS_FUNC(sc)); 2677 2678 /* clear the old value */ 2679 val &= ~mask; 2680 2681 /* set the new one */ 2682 val |= ((val1 << shift) & mask); 2683 2684 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2685 2686 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2687 } 2688 2689 /* clear pf load mark */ 2690 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc) 2691 { 2692 uint32_t val1, val; 2693 uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : 2694 BNX2X_PATH0_LOAD_CNT_MASK; 2695 uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2696 BNX2X_PATH0_LOAD_CNT_SHIFT; 2697 2698 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2699 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2700 2701 /* get the current counter value */ 2702 val1 = (val & mask) >> shift; 2703 2704 /* clear bit of that PF */ 2705 val1 &= ~(1 << SC_ABS_FUNC(sc)); 2706 2707 /* clear the old value */ 2708 val &= ~mask; 2709 2710 /* set the new one */ 2711 val |= ((val1 << shift) & mask); 2712 2713 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2714 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2715 return val1 != 0; 2716 } 2717 2718 /* send load request to MCP and analyze response */ 2719 static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code) 2720 { 2721 PMD_INIT_FUNC_TRACE(sc); 2722 2723 /* init fw_seq */ 2724 sc->fw_seq = 2725 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 2726 DRV_MSG_SEQ_NUMBER_MASK); 2727 2728 PMD_DRV_LOG(DEBUG, sc, "initial fw_seq 0x%04x", sc->fw_seq); 2729 2730 #ifdef BNX2X_PULSE 2731 /* get the current FW pulse sequence */ 2732 sc->fw_drv_pulse_wr_seq = 2733 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 2734 DRV_PULSE_SEQ_MASK); 2735 #else 2736 /* set ALWAYS_ALIVE bit in shmem */ 2737 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 2738 bnx2x_drv_pulse(sc); 2739 #endif 2740 2741 /* load request */ 2742 (*load_code) = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 2743 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 2744 2745 /* if the MCP fails to respond we must abort */ 2746 if (!(*load_code)) { 2747 PMD_DRV_LOG(NOTICE, sc, "MCP response failure!"); 2748 return -1; 2749 } 2750 2751 /* if MCP refused then must abort */ 2752 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 2753 PMD_DRV_LOG(NOTICE, sc, "MCP refused load request"); 2754 return -1; 2755 } 2756 2757 return 0; 2758 } 2759 2760 /* 2761 * Check whether another PF has already loaded FW to chip. In virtualized 2762 * environments a pf from anoth VM may have already initialized the device 2763 * including loading FW. 2764 */ 2765 static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code) 2766 { 2767 uint32_t my_fw, loaded_fw; 2768 2769 /* is another pf loaded on this engine? */ 2770 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 2771 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 2772 /* build my FW version dword */ 2773 my_fw = (BNX2X_5710_FW_MAJOR_VERSION + 2774 (BNX2X_5710_FW_MINOR_VERSION << 8) + 2775 (BNX2X_5710_FW_REVISION_VERSION << 16) + 2776 (BNX2X_5710_FW_ENGINEERING_VERSION << 24)); 2777 2778 /* read loaded FW from chip */ 2779 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 2780 PMD_DRV_LOG(DEBUG, sc, "loaded FW 0x%08x / my FW 0x%08x", 2781 loaded_fw, my_fw); 2782 2783 /* abort nic load if version mismatch */ 2784 if (my_fw != loaded_fw) { 2785 PMD_DRV_LOG(NOTICE, sc, 2786 "FW 0x%08x already loaded (mine is 0x%08x)", 2787 loaded_fw, my_fw); 2788 return -1; 2789 } 2790 } 2791 2792 return 0; 2793 } 2794 2795 /* mark PMF if applicable */ 2796 static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code) 2797 { 2798 uint32_t ncsi_oem_data_addr; 2799 2800 PMD_INIT_FUNC_TRACE(sc); 2801 2802 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 2803 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 2804 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 2805 /* 2806 * Barrier here for ordering between the writing to sc->port.pmf here 2807 * and reading it from the periodic task. 2808 */ 2809 sc->port.pmf = 1; 2810 mb(); 2811 } else { 2812 sc->port.pmf = 0; 2813 } 2814 2815 PMD_DRV_LOG(DEBUG, sc, "pmf %d", sc->port.pmf); 2816 2817 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 2818 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 2819 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 2820 if (ncsi_oem_data_addr) { 2821 REG_WR(sc, 2822 (ncsi_oem_data_addr + 2823 offsetof(struct glob_ncsi_oem_data, 2824 driver_version)), 0); 2825 } 2826 } 2827 } 2828 } 2829 2830 static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc) 2831 { 2832 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 2833 int abs_func; 2834 int vn; 2835 2836 if (BNX2X_NOMCP(sc)) { 2837 return; /* what should be the default bvalue in this case */ 2838 } 2839 2840 /* 2841 * The formula for computing the absolute function number is... 2842 * For 2 port configuration (4 functions per port): 2843 * abs_func = 2 * vn + SC_PORT + SC_PATH 2844 * For 4 port configuration (2 functions per port): 2845 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 2846 */ 2847 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 2848 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 2849 if (abs_func >= E1H_FUNC_MAX) { 2850 break; 2851 } 2852 sc->devinfo.mf_info.mf_config[vn] = 2853 MFCFG_RD(sc, func_mf_config[abs_func].config); 2854 } 2855 2856 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 2857 FUNC_MF_CFG_FUNC_DISABLED) { 2858 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); 2859 sc->flags |= BNX2X_MF_FUNC_DIS; 2860 } else { 2861 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); 2862 sc->flags &= ~BNX2X_MF_FUNC_DIS; 2863 } 2864 } 2865 2866 /* acquire split MCP access lock register */ 2867 static int bnx2x_acquire_alr(struct bnx2x_softc *sc) 2868 { 2869 uint32_t j, val; 2870 2871 for (j = 0; j < 1000; j++) { 2872 val = (1UL << 31); 2873 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 2874 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 2875 if (val & (1L << 31)) 2876 break; 2877 2878 DELAY(5000); 2879 } 2880 2881 if (!(val & (1L << 31))) { 2882 PMD_DRV_LOG(NOTICE, sc, "Cannot acquire MCP access lock register"); 2883 return -1; 2884 } 2885 2886 return 0; 2887 } 2888 2889 /* release split MCP access lock register */ 2890 static void bnx2x_release_alr(struct bnx2x_softc *sc) 2891 { 2892 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 2893 } 2894 2895 static void bnx2x_fan_failure(struct bnx2x_softc *sc) 2896 { 2897 int port = SC_PORT(sc); 2898 uint32_t ext_phy_config; 2899 2900 /* mark the failure */ 2901 ext_phy_config = 2902 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 2903 2904 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2905 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 2906 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 2907 ext_phy_config); 2908 2909 /* log the failure */ 2910 PMD_DRV_LOG(INFO, sc, 2911 "Fan Failure has caused the driver to shutdown " 2912 "the card to prevent permanent damage. " 2913 "Please contact OEM Support for assistance"); 2914 2915 rte_panic("Schedule task to handle fan failure"); 2916 } 2917 2918 /* this function is called upon a link interrupt */ 2919 static void bnx2x_link_attn(struct bnx2x_softc *sc) 2920 { 2921 uint32_t pause_enabled = 0; 2922 struct host_port_stats *pstats; 2923 int cmng_fns; 2924 2925 /* Make sure that we are synced with the current statistics */ 2926 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 2927 2928 elink_link_update(&sc->link_params, &sc->link_vars); 2929 2930 if (sc->link_vars.link_up) { 2931 2932 /* dropless flow control */ 2933 if (sc->dropless_fc) { 2934 pause_enabled = 0; 2935 2936 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 2937 pause_enabled = 1; 2938 } 2939 2940 REG_WR(sc, 2941 (BAR_USTRORM_INTMEM + 2942 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 2943 pause_enabled); 2944 } 2945 2946 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 2947 pstats = BNX2X_SP(sc, port_stats); 2948 /* reset old mac stats */ 2949 memset(&(pstats->mac_stx[0]), 0, 2950 sizeof(struct mac_stx)); 2951 } 2952 2953 if (sc->state == BNX2X_STATE_OPEN) { 2954 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 2955 } 2956 } 2957 2958 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 2959 cmng_fns = bnx2x_get_cmng_fns_mode(sc); 2960 2961 if (cmng_fns != CMNG_FNS_NONE) { 2962 bnx2x_cmng_fns_init(sc, FALSE, cmng_fns); 2963 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 2964 } 2965 } 2966 2967 bnx2x_link_report_locked(sc); 2968 2969 if (IS_MF(sc)) { 2970 bnx2x_link_sync_notify(sc); 2971 } 2972 } 2973 2974 static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted) 2975 { 2976 int port = SC_PORT(sc); 2977 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2978 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2979 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2980 NIG_REG_MASK_INTERRUPT_PORT0; 2981 uint32_t aeu_mask; 2982 uint32_t nig_mask = 0; 2983 uint32_t reg_addr; 2984 uint32_t igu_acked; 2985 uint32_t cnt; 2986 2987 if (sc->attn_state & asserted) { 2988 PMD_DRV_LOG(ERR, sc, "IGU ERROR attn=0x%08x", asserted); 2989 } 2990 2991 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 2992 2993 aeu_mask = REG_RD(sc, aeu_addr); 2994 2995 aeu_mask &= ~(asserted & 0x3ff); 2996 2997 REG_WR(sc, aeu_addr, aeu_mask); 2998 2999 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3000 3001 sc->attn_state |= asserted; 3002 3003 if (asserted & ATTN_HARD_WIRED_MASK) { 3004 if (asserted & ATTN_NIG_FOR_FUNC) { 3005 3006 bnx2x_acquire_phy_lock(sc); 3007 /* save nig interrupt mask */ 3008 nig_mask = REG_RD(sc, nig_int_mask_addr); 3009 3010 /* If nig_mask is not set, no need to call the update function */ 3011 if (nig_mask) { 3012 REG_WR(sc, nig_int_mask_addr, 0); 3013 3014 bnx2x_link_attn(sc); 3015 } 3016 3017 /* handle unicore attn? */ 3018 } 3019 3020 if (asserted & ATTN_SW_TIMER_4_FUNC) { 3021 PMD_DRV_LOG(DEBUG, sc, "ATTN_SW_TIMER_4_FUNC!"); 3022 } 3023 3024 if (asserted & GPIO_2_FUNC) { 3025 PMD_DRV_LOG(DEBUG, sc, "GPIO_2_FUNC!"); 3026 } 3027 3028 if (asserted & GPIO_3_FUNC) { 3029 PMD_DRV_LOG(DEBUG, sc, "GPIO_3_FUNC!"); 3030 } 3031 3032 if (asserted & GPIO_4_FUNC) { 3033 PMD_DRV_LOG(DEBUG, sc, "GPIO_4_FUNC!"); 3034 } 3035 3036 if (port == 0) { 3037 if (asserted & ATTN_GENERAL_ATTN_1) { 3038 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_1!"); 3039 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3040 } 3041 if (asserted & ATTN_GENERAL_ATTN_2) { 3042 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_2!"); 3043 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3044 } 3045 if (asserted & ATTN_GENERAL_ATTN_3) { 3046 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_3!"); 3047 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3048 } 3049 } else { 3050 if (asserted & ATTN_GENERAL_ATTN_4) { 3051 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_4!"); 3052 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3053 } 3054 if (asserted & ATTN_GENERAL_ATTN_5) { 3055 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_5!"); 3056 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3057 } 3058 if (asserted & ATTN_GENERAL_ATTN_6) { 3059 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_6!"); 3060 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3061 } 3062 } 3063 } 3064 /* hardwired */ 3065 if (sc->devinfo.int_block == INT_BLOCK_HC) { 3066 reg_addr = 3067 (HC_REG_COMMAND_REG + port * 32 + 3068 COMMAND_REG_ATTN_BITS_SET); 3069 } else { 3070 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8); 3071 } 3072 3073 PMD_DRV_LOG(DEBUG, sc, "about to mask 0x%08x at %s addr 0x%08x", 3074 asserted, 3075 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", 3076 reg_addr); 3077 REG_WR(sc, reg_addr, asserted); 3078 3079 /* now set back the mask */ 3080 if (asserted & ATTN_NIG_FOR_FUNC) { 3081 /* 3082 * Verify that IGU ack through BAR was written before restoring 3083 * NIG mask. This loop should exit after 2-3 iterations max. 3084 */ 3085 if (sc->devinfo.int_block != INT_BLOCK_HC) { 3086 cnt = 0; 3087 3088 do { 3089 igu_acked = 3090 REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 3091 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) 3092 && (++cnt < MAX_IGU_ATTN_ACK_TO)); 3093 3094 if (!igu_acked) { 3095 PMD_DRV_LOG(ERR, sc, 3096 "Failed to verify IGU ack on time"); 3097 } 3098 3099 mb(); 3100 } 3101 3102 REG_WR(sc, nig_int_mask_addr, nig_mask); 3103 3104 bnx2x_release_phy_lock(sc); 3105 } 3106 } 3107 3108 static void 3109 bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx, 3110 __rte_unused const char *blk) 3111 { 3112 PMD_DRV_LOG(INFO, sc, "%s%s", idx ? ", " : "", blk); 3113 } 3114 3115 static int 3116 bnx2x_check_blocks_with_parity0(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3117 uint8_t print) 3118 { 3119 uint32_t cur_bit = 0; 3120 int i = 0; 3121 3122 for (i = 0; sig; i++) { 3123 cur_bit = ((uint32_t) 0x1 << i); 3124 if (sig & cur_bit) { 3125 switch (cur_bit) { 3126 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 3127 if (print) 3128 bnx2x_print_next_block(sc, par_num++, 3129 "BRB"); 3130 break; 3131 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 3132 if (print) 3133 bnx2x_print_next_block(sc, par_num++, 3134 "PARSER"); 3135 break; 3136 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 3137 if (print) 3138 bnx2x_print_next_block(sc, par_num++, 3139 "TSDM"); 3140 break; 3141 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 3142 if (print) 3143 bnx2x_print_next_block(sc, par_num++, 3144 "SEARCHER"); 3145 break; 3146 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 3147 if (print) 3148 bnx2x_print_next_block(sc, par_num++, 3149 "TCM"); 3150 break; 3151 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 3152 if (print) 3153 bnx2x_print_next_block(sc, par_num++, 3154 "TSEMI"); 3155 break; 3156 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 3157 if (print) 3158 bnx2x_print_next_block(sc, par_num++, 3159 "XPB"); 3160 break; 3161 } 3162 3163 /* Clear the bit */ 3164 sig &= ~cur_bit; 3165 } 3166 } 3167 3168 return par_num; 3169 } 3170 3171 static int 3172 bnx2x_check_blocks_with_parity1(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3173 uint8_t * global, uint8_t print) 3174 { 3175 int i = 0; 3176 uint32_t cur_bit = 0; 3177 for (i = 0; sig; i++) { 3178 cur_bit = ((uint32_t) 0x1 << i); 3179 if (sig & cur_bit) { 3180 switch (cur_bit) { 3181 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 3182 if (print) 3183 bnx2x_print_next_block(sc, par_num++, 3184 "PBF"); 3185 break; 3186 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 3187 if (print) 3188 bnx2x_print_next_block(sc, par_num++, 3189 "QM"); 3190 break; 3191 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 3192 if (print) 3193 bnx2x_print_next_block(sc, par_num++, 3194 "TM"); 3195 break; 3196 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 3197 if (print) 3198 bnx2x_print_next_block(sc, par_num++, 3199 "XSDM"); 3200 break; 3201 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 3202 if (print) 3203 bnx2x_print_next_block(sc, par_num++, 3204 "XCM"); 3205 break; 3206 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 3207 if (print) 3208 bnx2x_print_next_block(sc, par_num++, 3209 "XSEMI"); 3210 break; 3211 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 3212 if (print) 3213 bnx2x_print_next_block(sc, par_num++, 3214 "DOORBELLQ"); 3215 break; 3216 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 3217 if (print) 3218 bnx2x_print_next_block(sc, par_num++, 3219 "NIG"); 3220 break; 3221 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 3222 if (print) 3223 bnx2x_print_next_block(sc, par_num++, 3224 "VAUX PCI CORE"); 3225 *global = TRUE; 3226 break; 3227 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 3228 if (print) 3229 bnx2x_print_next_block(sc, par_num++, 3230 "DEBUG"); 3231 break; 3232 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 3233 if (print) 3234 bnx2x_print_next_block(sc, par_num++, 3235 "USDM"); 3236 break; 3237 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 3238 if (print) 3239 bnx2x_print_next_block(sc, par_num++, 3240 "UCM"); 3241 break; 3242 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 3243 if (print) 3244 bnx2x_print_next_block(sc, par_num++, 3245 "USEMI"); 3246 break; 3247 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 3248 if (print) 3249 bnx2x_print_next_block(sc, par_num++, 3250 "UPB"); 3251 break; 3252 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 3253 if (print) 3254 bnx2x_print_next_block(sc, par_num++, 3255 "CSDM"); 3256 break; 3257 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 3258 if (print) 3259 bnx2x_print_next_block(sc, par_num++, 3260 "CCM"); 3261 break; 3262 } 3263 3264 /* Clear the bit */ 3265 sig &= ~cur_bit; 3266 } 3267 } 3268 3269 return par_num; 3270 } 3271 3272 static int 3273 bnx2x_check_blocks_with_parity2(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3274 uint8_t print) 3275 { 3276 uint32_t cur_bit = 0; 3277 int i = 0; 3278 3279 for (i = 0; sig; i++) { 3280 cur_bit = ((uint32_t) 0x1 << i); 3281 if (sig & cur_bit) { 3282 switch (cur_bit) { 3283 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 3284 if (print) 3285 bnx2x_print_next_block(sc, par_num++, 3286 "CSEMI"); 3287 break; 3288 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 3289 if (print) 3290 bnx2x_print_next_block(sc, par_num++, 3291 "PXP"); 3292 break; 3293 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 3294 if (print) 3295 bnx2x_print_next_block(sc, par_num++, 3296 "PXPPCICLOCKCLIENT"); 3297 break; 3298 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 3299 if (print) 3300 bnx2x_print_next_block(sc, par_num++, 3301 "CFC"); 3302 break; 3303 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 3304 if (print) 3305 bnx2x_print_next_block(sc, par_num++, 3306 "CDU"); 3307 break; 3308 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 3309 if (print) 3310 bnx2x_print_next_block(sc, par_num++, 3311 "DMAE"); 3312 break; 3313 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 3314 if (print) 3315 bnx2x_print_next_block(sc, par_num++, 3316 "IGU"); 3317 break; 3318 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 3319 if (print) 3320 bnx2x_print_next_block(sc, par_num++, 3321 "MISC"); 3322 break; 3323 } 3324 3325 /* Clear the bit */ 3326 sig &= ~cur_bit; 3327 } 3328 } 3329 3330 return par_num; 3331 } 3332 3333 static int 3334 bnx2x_check_blocks_with_parity3(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3335 uint8_t * global, uint8_t print) 3336 { 3337 uint32_t cur_bit = 0; 3338 int i = 0; 3339 3340 for (i = 0; sig; i++) { 3341 cur_bit = ((uint32_t) 0x1 << i); 3342 if (sig & cur_bit) { 3343 switch (cur_bit) { 3344 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 3345 if (print) 3346 bnx2x_print_next_block(sc, par_num++, 3347 "MCP ROM"); 3348 *global = TRUE; 3349 break; 3350 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 3351 if (print) 3352 bnx2x_print_next_block(sc, par_num++, 3353 "MCP UMP RX"); 3354 *global = TRUE; 3355 break; 3356 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 3357 if (print) 3358 bnx2x_print_next_block(sc, par_num++, 3359 "MCP UMP TX"); 3360 *global = TRUE; 3361 break; 3362 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 3363 if (print) 3364 bnx2x_print_next_block(sc, par_num++, 3365 "MCP SCPAD"); 3366 *global = TRUE; 3367 break; 3368 } 3369 3370 /* Clear the bit */ 3371 sig &= ~cur_bit; 3372 } 3373 } 3374 3375 return par_num; 3376 } 3377 3378 static int 3379 bnx2x_check_blocks_with_parity4(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3380 uint8_t print) 3381 { 3382 uint32_t cur_bit = 0; 3383 int i = 0; 3384 3385 for (i = 0; sig; i++) { 3386 cur_bit = ((uint32_t) 0x1 << i); 3387 if (sig & cur_bit) { 3388 switch (cur_bit) { 3389 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 3390 if (print) 3391 bnx2x_print_next_block(sc, par_num++, 3392 "PGLUE_B"); 3393 break; 3394 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 3395 if (print) 3396 bnx2x_print_next_block(sc, par_num++, 3397 "ATC"); 3398 break; 3399 } 3400 3401 /* Clear the bit */ 3402 sig &= ~cur_bit; 3403 } 3404 } 3405 3406 return par_num; 3407 } 3408 3409 static uint8_t 3410 bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print, 3411 uint32_t * sig) 3412 { 3413 int par_num = 0; 3414 3415 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 3416 (sig[1] & HW_PRTY_ASSERT_SET_1) || 3417 (sig[2] & HW_PRTY_ASSERT_SET_2) || 3418 (sig[3] & HW_PRTY_ASSERT_SET_3) || 3419 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 3420 PMD_DRV_LOG(ERR, sc, 3421 "Parity error: HW block parity attention:" 3422 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x", 3423 (uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0), 3424 (uint32_t) (sig[1] & HW_PRTY_ASSERT_SET_1), 3425 (uint32_t) (sig[2] & HW_PRTY_ASSERT_SET_2), 3426 (uint32_t) (sig[3] & HW_PRTY_ASSERT_SET_3), 3427 (uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4)); 3428 3429 if (print) 3430 PMD_DRV_LOG(INFO, sc, "Parity errors detected in blocks: "); 3431 3432 par_num = 3433 bnx2x_check_blocks_with_parity0(sc, sig[0] & 3434 HW_PRTY_ASSERT_SET_0, 3435 par_num, print); 3436 par_num = 3437 bnx2x_check_blocks_with_parity1(sc, sig[1] & 3438 HW_PRTY_ASSERT_SET_1, 3439 par_num, global, print); 3440 par_num = 3441 bnx2x_check_blocks_with_parity2(sc, sig[2] & 3442 HW_PRTY_ASSERT_SET_2, 3443 par_num, print); 3444 par_num = 3445 bnx2x_check_blocks_with_parity3(sc, sig[3] & 3446 HW_PRTY_ASSERT_SET_3, 3447 par_num, global, print); 3448 par_num = 3449 bnx2x_check_blocks_with_parity4(sc, sig[4] & 3450 HW_PRTY_ASSERT_SET_4, 3451 par_num, print); 3452 3453 if (print) 3454 PMD_DRV_LOG(INFO, sc, ""); 3455 3456 return TRUE; 3457 } 3458 3459 return FALSE; 3460 } 3461 3462 static uint8_t 3463 bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print) 3464 { 3465 struct attn_route attn = { {0} }; 3466 int port = SC_PORT(sc); 3467 3468 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); 3469 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); 3470 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); 3471 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); 3472 3473 if (!CHIP_IS_E1x(sc)) 3474 attn.sig[4] = 3475 REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); 3476 3477 return bnx2x_parity_attn(sc, global, print, attn.sig); 3478 } 3479 3480 static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn) 3481 { 3482 uint32_t val; 3483 3484 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 3485 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 3486 PMD_DRV_LOG(INFO, sc, "ERROR: PGLUE hw attention 0x%08x", val); 3487 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 3488 PMD_DRV_LOG(INFO, sc, 3489 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR"); 3490 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 3491 PMD_DRV_LOG(INFO, sc, 3492 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR"); 3493 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 3494 PMD_DRV_LOG(INFO, sc, 3495 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN"); 3496 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 3497 PMD_DRV_LOG(INFO, sc, 3498 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN"); 3499 if (val & 3500 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 3501 PMD_DRV_LOG(INFO, sc, 3502 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN"); 3503 if (val & 3504 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 3505 PMD_DRV_LOG(INFO, sc, 3506 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN"); 3507 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 3508 PMD_DRV_LOG(INFO, sc, 3509 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN"); 3510 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 3511 PMD_DRV_LOG(INFO, sc, 3512 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN"); 3513 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 3514 PMD_DRV_LOG(INFO, sc, 3515 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW"); 3516 } 3517 3518 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 3519 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 3520 PMD_DRV_LOG(INFO, sc, "ERROR: ATC hw attention 0x%08x", val); 3521 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 3522 PMD_DRV_LOG(INFO, sc, 3523 "ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR"); 3524 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 3525 PMD_DRV_LOG(INFO, sc, 3526 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND"); 3527 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 3528 PMD_DRV_LOG(INFO, sc, 3529 "ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS"); 3530 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 3531 PMD_DRV_LOG(INFO, sc, 3532 "ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT"); 3533 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 3534 PMD_DRV_LOG(INFO, sc, 3535 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR"); 3536 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 3537 PMD_DRV_LOG(INFO, sc, 3538 "ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU"); 3539 } 3540 3541 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 3542 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 3543 PMD_DRV_LOG(INFO, sc, 3544 "ERROR: FATAL parity attention set4 0x%08x", 3545 (uint32_t) (attn & 3546 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR 3547 | 3548 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 3549 } 3550 } 3551 3552 static void bnx2x_e1h_disable(struct bnx2x_softc *sc) 3553 { 3554 int port = SC_PORT(sc); 3555 3556 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); 3557 } 3558 3559 static void bnx2x_e1h_enable(struct bnx2x_softc *sc) 3560 { 3561 int port = SC_PORT(sc); 3562 3563 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 3564 } 3565 3566 /* 3567 * called due to MCP event (on pmf): 3568 * reread new bandwidth configuration 3569 * configure FW 3570 * notify others function about the change 3571 */ 3572 static void bnx2x_config_mf_bw(struct bnx2x_softc *sc) 3573 { 3574 if (sc->link_vars.link_up) { 3575 bnx2x_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 3576 bnx2x_link_sync_notify(sc); 3577 } 3578 3579 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 3580 } 3581 3582 static void bnx2x_set_mf_bw(struct bnx2x_softc *sc) 3583 { 3584 bnx2x_config_mf_bw(sc); 3585 bnx2x_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3586 } 3587 3588 static void bnx2x_handle_eee_event(struct bnx2x_softc *sc) 3589 { 3590 bnx2x_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3591 } 3592 3593 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3594 3595 static void bnx2x_drv_info_ether_stat(struct bnx2x_softc *sc) 3596 { 3597 struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; 3598 3599 strncpy(ether_stat->version, BNX2X_DRIVER_VERSION, 3600 ETH_STAT_INFO_VERSION_LEN); 3601 3602 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 3603 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3604 ether_stat->mac_local + MAC_PAD, 3605 MAC_PAD, ETH_ALEN); 3606 3607 ether_stat->mtu_size = sc->mtu; 3608 3609 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3610 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 3611 3612 ether_stat->txq_size = sc->tx_ring_size; 3613 ether_stat->rxq_size = sc->rx_ring_size; 3614 } 3615 3616 static void bnx2x_handle_drv_info_req(struct bnx2x_softc *sc) 3617 { 3618 enum drv_info_opcode op_code; 3619 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 3620 3621 /* if drv_info version supported by MFW doesn't match - send NACK */ 3622 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3623 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3624 return; 3625 } 3626 3627 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3628 DRV_INFO_CONTROL_OP_CODE_SHIFT); 3629 3630 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 3631 3632 switch (op_code) { 3633 case ETH_STATS_OPCODE: 3634 bnx2x_drv_info_ether_stat(sc); 3635 break; 3636 case FCOE_STATS_OPCODE: 3637 case ISCSI_STATS_OPCODE: 3638 default: 3639 /* if op code isn't supported - send NACK */ 3640 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3641 return; 3642 } 3643 3644 /* 3645 * If we got drv_info attn from MFW then these fields are defined in 3646 * shmem2 for sure 3647 */ 3648 SHMEM2_WR(sc, drv_info_host_addr_lo, 3649 U64_LO(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); 3650 SHMEM2_WR(sc, drv_info_host_addr_hi, 3651 U64_HI(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); 3652 3653 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3654 } 3655 3656 static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event) 3657 { 3658 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3659 /* 3660 * This is the only place besides the function initialization 3661 * where the sc->flags can change so it is done without any 3662 * locks 3663 */ 3664 if (sc->devinfo. 3665 mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 3666 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); 3667 sc->flags |= BNX2X_MF_FUNC_DIS; 3668 bnx2x_e1h_disable(sc); 3669 } else { 3670 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); 3671 sc->flags &= ~BNX2X_MF_FUNC_DIS; 3672 bnx2x_e1h_enable(sc); 3673 } 3674 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3675 } 3676 3677 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3678 bnx2x_config_mf_bw(sc); 3679 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3680 } 3681 3682 /* Report results to MCP */ 3683 if (dcc_event) 3684 bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 3685 else 3686 bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 3687 } 3688 3689 static void bnx2x_pmf_update(struct bnx2x_softc *sc) 3690 { 3691 int port = SC_PORT(sc); 3692 uint32_t val; 3693 3694 sc->port.pmf = 1; 3695 3696 /* 3697 * We need the mb() to ensure the ordering between the writing to 3698 * sc->port.pmf here and reading it from the bnx2x_periodic_task(). 3699 */ 3700 mb(); 3701 3702 /* enable nig attention */ 3703 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 3704 if (sc->devinfo.int_block == INT_BLOCK_HC) { 3705 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val); 3706 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val); 3707 } else if (!CHIP_IS_E1x(sc)) { 3708 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 3709 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 3710 } 3711 3712 bnx2x_stats_handle(sc, STATS_EVENT_PMF); 3713 } 3714 3715 static int bnx2x_mc_assert(struct bnx2x_softc *sc) 3716 { 3717 char last_idx; 3718 int i, rc = 0; 3719 __rte_unused uint32_t row0, row1, row2, row3; 3720 3721 /* XSTORM */ 3722 last_idx = 3723 REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 3724 if (last_idx) 3725 PMD_DRV_LOG(ERR, sc, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3726 3727 /* print the asserts */ 3728 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3729 3730 row0 = 3731 REG_RD(sc, 3732 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 3733 row1 = 3734 REG_RD(sc, 3735 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3736 4); 3737 row2 = 3738 REG_RD(sc, 3739 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3740 8); 3741 row3 = 3742 REG_RD(sc, 3743 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3744 12); 3745 3746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3747 PMD_DRV_LOG(ERR, sc, 3748 "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3749 i, row3, row2, row1, row0); 3750 rc++; 3751 } else { 3752 break; 3753 } 3754 } 3755 3756 /* TSTORM */ 3757 last_idx = 3758 REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 3759 if (last_idx) { 3760 PMD_DRV_LOG(ERR, sc, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3761 } 3762 3763 /* print the asserts */ 3764 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3765 3766 row0 = 3767 REG_RD(sc, 3768 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 3769 row1 = 3770 REG_RD(sc, 3771 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3772 4); 3773 row2 = 3774 REG_RD(sc, 3775 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3776 8); 3777 row3 = 3778 REG_RD(sc, 3779 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3780 12); 3781 3782 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3783 PMD_DRV_LOG(ERR, sc, 3784 "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3785 i, row3, row2, row1, row0); 3786 rc++; 3787 } else { 3788 break; 3789 } 3790 } 3791 3792 /* CSTORM */ 3793 last_idx = 3794 REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 3795 if (last_idx) { 3796 PMD_DRV_LOG(ERR, sc, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3797 } 3798 3799 /* print the asserts */ 3800 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3801 3802 row0 = 3803 REG_RD(sc, 3804 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 3805 row1 = 3806 REG_RD(sc, 3807 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3808 4); 3809 row2 = 3810 REG_RD(sc, 3811 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3812 8); 3813 row3 = 3814 REG_RD(sc, 3815 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3816 12); 3817 3818 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3819 PMD_DRV_LOG(ERR, sc, 3820 "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3821 i, row3, row2, row1, row0); 3822 rc++; 3823 } else { 3824 break; 3825 } 3826 } 3827 3828 /* USTORM */ 3829 last_idx = 3830 REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 3831 if (last_idx) { 3832 PMD_DRV_LOG(ERR, sc, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3833 } 3834 3835 /* print the asserts */ 3836 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3837 3838 row0 = 3839 REG_RD(sc, 3840 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 3841 row1 = 3842 REG_RD(sc, 3843 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3844 4); 3845 row2 = 3846 REG_RD(sc, 3847 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3848 8); 3849 row3 = 3850 REG_RD(sc, 3851 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3852 12); 3853 3854 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3855 PMD_DRV_LOG(ERR, sc, 3856 "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3857 i, row3, row2, row1, row0); 3858 rc++; 3859 } else { 3860 break; 3861 } 3862 } 3863 3864 return rc; 3865 } 3866 3867 static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn) 3868 { 3869 int func = SC_FUNC(sc); 3870 uint32_t val; 3871 3872 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 3873 3874 if (attn & BNX2X_PMF_LINK_ASSERT(sc)) { 3875 3876 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 3877 bnx2x_read_mf_cfg(sc); 3878 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 3879 MFCFG_RD(sc, 3880 func_mf_config[SC_ABS_FUNC(sc)].config); 3881 val = 3882 SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 3883 3884 if (val & DRV_STATUS_DCC_EVENT_MASK) 3885 bnx2x_dcc_event(sc, 3886 (val & 3887 DRV_STATUS_DCC_EVENT_MASK)); 3888 3889 if (val & DRV_STATUS_SET_MF_BW) 3890 bnx2x_set_mf_bw(sc); 3891 3892 if (val & DRV_STATUS_DRV_INFO_REQ) 3893 bnx2x_handle_drv_info_req(sc); 3894 3895 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3896 bnx2x_pmf_update(sc); 3897 3898 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 3899 bnx2x_handle_eee_event(sc); 3900 3901 if (sc->link_vars.periodic_flags & 3902 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 3903 /* sync with link */ 3904 bnx2x_acquire_phy_lock(sc); 3905 sc->link_vars.periodic_flags &= 3906 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 3907 bnx2x_release_phy_lock(sc); 3908 if (IS_MF(sc)) { 3909 bnx2x_link_sync_notify(sc); 3910 } 3911 bnx2x_link_report(sc); 3912 } 3913 3914 /* 3915 * Always call it here: bnx2x_link_report() will 3916 * prevent the link indication duplication. 3917 */ 3918 bnx2x_link_status_update(sc); 3919 3920 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3921 3922 PMD_DRV_LOG(ERR, sc, "MC assert!"); 3923 bnx2x_mc_assert(sc); 3924 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 3925 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 3926 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 3927 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 3928 rte_panic("MC assert!"); 3929 3930 } else if (attn & BNX2X_MCP_ASSERT) { 3931 3932 PMD_DRV_LOG(ERR, sc, "MCP assert!"); 3933 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 3934 3935 } else { 3936 PMD_DRV_LOG(ERR, sc, 3937 "Unknown HW assert! (attn 0x%08x)", attn); 3938 } 3939 } 3940 3941 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3942 PMD_DRV_LOG(ERR, sc, "LATCHED attention 0x%08x (masked)", attn); 3943 if (attn & BNX2X_GRC_TIMEOUT) { 3944 val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 3945 PMD_DRV_LOG(ERR, sc, "GRC time-out 0x%08x", val); 3946 } 3947 if (attn & BNX2X_GRC_RSV) { 3948 val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 3949 PMD_DRV_LOG(ERR, sc, "GRC reserved 0x%08x", val); 3950 } 3951 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3952 } 3953 } 3954 3955 static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn) 3956 { 3957 int port = SC_PORT(sc); 3958 int reg_offset; 3959 uint32_t val0, mask0, val1, mask1; 3960 uint32_t val; 3961 3962 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3963 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 3964 PMD_DRV_LOG(ERR, sc, "CFC hw attention 0x%08x", val); 3965 /* CFC error attention */ 3966 if (val & 0x2) { 3967 PMD_DRV_LOG(ERR, sc, "FATAL error from CFC"); 3968 } 3969 } 3970 3971 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3972 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 3973 PMD_DRV_LOG(ERR, sc, "PXP hw attention-0 0x%08x", val); 3974 /* RQ_USDMDP_FIFO_OVERFLOW */ 3975 if (val & 0x18000) { 3976 PMD_DRV_LOG(ERR, sc, "FATAL error from PXP"); 3977 } 3978 3979 if (!CHIP_IS_E1x(sc)) { 3980 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 3981 PMD_DRV_LOG(ERR, sc, "PXP hw attention-1 0x%08x", val); 3982 } 3983 } 3984 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 3985 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 3986 3987 if (attn & AEU_PXP2_HW_INT_BIT) { 3988 /* CQ47854 workaround do not panic on 3989 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 3990 */ 3991 if (!CHIP_IS_E1x(sc)) { 3992 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 3993 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 3994 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 3995 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 3996 /* 3997 * If the only PXP2_EOP_ERROR_BIT is set in 3998 * STS0 and STS1 - clear it 3999 * 4000 * probably we lose additional attentions between 4001 * STS0 and STS_CLR0, in this case user will not 4002 * be notified about them 4003 */ 4004 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 4005 !(val1 & mask1)) 4006 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 4007 4008 /* print the register, since no one can restore it */ 4009 PMD_DRV_LOG(ERR, sc, 4010 "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0); 4011 4012 /* 4013 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 4014 * then notify 4015 */ 4016 if (val0 & PXP2_EOP_ERROR_BIT) { 4017 PMD_DRV_LOG(ERR, sc, "PXP2_WR_PGLUE_EOP_ERROR"); 4018 4019 /* 4020 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 4021 * set then clear attention from PXP2 block without panic 4022 */ 4023 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 4024 ((val1 & mask1) == 0)) 4025 attn &= ~AEU_PXP2_HW_INT_BIT; 4026 } 4027 } 4028 } 4029 4030 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 4032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4033 4034 val = REG_RD(sc, reg_offset); 4035 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4036 REG_WR(sc, reg_offset, val); 4037 4038 PMD_DRV_LOG(ERR, sc, 4039 "FATAL HW block attention set2 0x%x", 4040 (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2)); 4041 rte_panic("HW block attention set2"); 4042 } 4043 } 4044 4045 static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn) 4046 { 4047 int port = SC_PORT(sc); 4048 int reg_offset; 4049 uint32_t val; 4050 4051 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 4052 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 4053 PMD_DRV_LOG(ERR, sc, "DB hw attention 0x%08x", val); 4054 /* DORQ discard attention */ 4055 if (val & 0x2) { 4056 PMD_DRV_LOG(ERR, sc, "FATAL error from DORQ"); 4057 } 4058 } 4059 4060 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4061 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 4062 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4063 4064 val = REG_RD(sc, reg_offset); 4065 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4066 REG_WR(sc, reg_offset, val); 4067 4068 PMD_DRV_LOG(ERR, sc, 4069 "FATAL HW block attention set1 0x%08x", 4070 (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1)); 4071 rte_panic("HW block attention set1"); 4072 } 4073 } 4074 4075 static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn) 4076 { 4077 int port = SC_PORT(sc); 4078 int reg_offset; 4079 uint32_t val; 4080 4081 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 4083 4084 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 4085 val = REG_RD(sc, reg_offset); 4086 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 4087 REG_WR(sc, reg_offset, val); 4088 4089 PMD_DRV_LOG(WARNING, sc, "SPIO5 hw attention"); 4090 4091 /* Fan failure attention */ 4092 elink_hw_reset_phy(&sc->link_params); 4093 bnx2x_fan_failure(sc); 4094 } 4095 4096 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 4097 bnx2x_acquire_phy_lock(sc); 4098 elink_handle_module_detect_int(&sc->link_params); 4099 bnx2x_release_phy_lock(sc); 4100 } 4101 4102 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4103 val = REG_RD(sc, reg_offset); 4104 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4105 REG_WR(sc, reg_offset, val); 4106 4107 rte_panic("FATAL HW block attention set0 0x%lx", 4108 (attn & (unsigned long)HW_INTERRUT_ASSERT_SET_0)); 4109 } 4110 } 4111 4112 static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserted) 4113 { 4114 struct attn_route attn; 4115 struct attn_route *group_mask; 4116 int port = SC_PORT(sc); 4117 int index; 4118 uint32_t reg_addr; 4119 uint32_t val; 4120 uint32_t aeu_mask; 4121 uint8_t global = FALSE; 4122 4123 /* 4124 * Need to take HW lock because MCP or other port might also 4125 * try to handle this event. 4126 */ 4127 bnx2x_acquire_alr(sc); 4128 4129 if (bnx2x_chk_parity_attn(sc, &global, TRUE)) { 4130 sc->recovery_state = BNX2X_RECOVERY_INIT; 4131 4132 /* disable HW interrupts */ 4133 bnx2x_int_disable(sc); 4134 bnx2x_release_alr(sc); 4135 return; 4136 } 4137 4138 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); 4139 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); 4140 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); 4141 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); 4142 if (!CHIP_IS_E1x(sc)) { 4143 attn.sig[4] = 4144 REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); 4145 } else { 4146 attn.sig[4] = 0; 4147 } 4148 4149 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4150 if (deasserted & (1 << index)) { 4151 group_mask = &sc->attn_group[index]; 4152 4153 bnx2x_attn_int_deasserted4(sc, 4154 attn. 4155 sig[4] & group_mask->sig[4]); 4156 bnx2x_attn_int_deasserted3(sc, 4157 attn. 4158 sig[3] & group_mask->sig[3]); 4159 bnx2x_attn_int_deasserted1(sc, 4160 attn. 4161 sig[1] & group_mask->sig[1]); 4162 bnx2x_attn_int_deasserted2(sc, 4163 attn. 4164 sig[2] & group_mask->sig[2]); 4165 bnx2x_attn_int_deasserted0(sc, 4166 attn. 4167 sig[0] & group_mask->sig[0]); 4168 } 4169 } 4170 4171 bnx2x_release_alr(sc); 4172 4173 if (sc->devinfo.int_block == INT_BLOCK_HC) { 4174 reg_addr = (HC_REG_COMMAND_REG + port * 32 + 4175 COMMAND_REG_ATTN_BITS_CLR); 4176 } else { 4177 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER * 8); 4178 } 4179 4180 val = ~deasserted; 4181 PMD_DRV_LOG(DEBUG, sc, 4182 "about to mask 0x%08x at %s addr 0x%08x", val, 4183 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", 4184 reg_addr); 4185 REG_WR(sc, reg_addr, val); 4186 4187 if (~sc->attn_state & deasserted) { 4188 PMD_DRV_LOG(ERR, sc, "IGU error"); 4189 } 4190 4191 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4192 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4193 4194 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4195 4196 aeu_mask = REG_RD(sc, reg_addr); 4197 4198 aeu_mask |= (deasserted & 0x3ff); 4199 4200 REG_WR(sc, reg_addr, aeu_mask); 4201 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4202 4203 sc->attn_state &= ~deasserted; 4204 } 4205 4206 static void bnx2x_attn_int(struct bnx2x_softc *sc) 4207 { 4208 /* read local copy of bits */ 4209 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 4210 uint32_t attn_ack = 4211 le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 4212 uint32_t attn_state = sc->attn_state; 4213 4214 /* look for changed bits */ 4215 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 4216 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 4217 4218 PMD_DRV_LOG(DEBUG, sc, 4219 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x", 4220 attn_bits, attn_ack, asserted, deasserted); 4221 4222 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 4223 PMD_DRV_LOG(ERR, sc, "BAD attention state"); 4224 } 4225 4226 /* handle bits that were raised */ 4227 if (asserted) { 4228 bnx2x_attn_int_asserted(sc, asserted); 4229 } 4230 4231 if (deasserted) { 4232 bnx2x_attn_int_deasserted(sc, deasserted); 4233 } 4234 } 4235 4236 static uint16_t bnx2x_update_dsb_idx(struct bnx2x_softc *sc) 4237 { 4238 struct host_sp_status_block *def_sb = sc->def_sb; 4239 uint16_t rc = 0; 4240 4241 if (!def_sb) 4242 return 0; 4243 4244 mb(); /* status block is written to by the chip */ 4245 4246 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 4247 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 4248 rc |= BNX2X_DEF_SB_ATT_IDX; 4249 } 4250 4251 if (sc->def_idx != def_sb->sp_sb.running_index) { 4252 sc->def_idx = def_sb->sp_sb.running_index; 4253 rc |= BNX2X_DEF_SB_IDX; 4254 } 4255 4256 mb(); 4257 4258 return rc; 4259 } 4260 4261 static struct ecore_queue_sp_obj *bnx2x_cid_to_q_obj(struct bnx2x_softc *sc, 4262 uint32_t cid) 4263 { 4264 return &sc->sp_objs[CID_TO_FP(cid, sc)].q_obj; 4265 } 4266 4267 static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc) 4268 { 4269 struct ecore_mcast_ramrod_params rparam; 4270 int rc; 4271 4272 memset(&rparam, 0, sizeof(rparam)); 4273 4274 rparam.mcast_obj = &sc->mcast_obj; 4275 4276 /* clear pending state for the last command */ 4277 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 4278 4279 /* if there are pending mcast commands - send them */ 4280 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 4281 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4282 if (rc < 0) { 4283 PMD_DRV_LOG(INFO, sc, 4284 "Failed to send pending mcast commands (%d)", 4285 rc); 4286 } 4287 } 4288 } 4289 4290 static void 4291 bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *elem) 4292 { 4293 uint32_t ramrod_flags = 0; 4294 int rc = 0; 4295 uint32_t cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4296 struct ecore_vlan_mac_obj *vlan_mac_obj; 4297 4298 /* always push next commands out, don't wait here */ 4299 rte_bit_relaxed_set32(RAMROD_CONT, &ramrod_flags); 4300 4301 switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) { 4302 case ECORE_FILTER_MAC_PENDING: 4303 PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MAC completions"); 4304 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 4305 break; 4306 4307 case ECORE_FILTER_MCAST_PENDING: 4308 PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MCAST completions"); 4309 bnx2x_handle_mcast_eqe(sc); 4310 return; 4311 4312 default: 4313 PMD_DRV_LOG(NOTICE, sc, "Unsupported classification command: %d", 4314 elem->message.data.eth_event.echo); 4315 return; 4316 } 4317 4318 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 4319 4320 if (rc < 0) { 4321 PMD_DRV_LOG(NOTICE, sc, 4322 "Failed to schedule new commands (%d)", rc); 4323 } else if (rc > 0) { 4324 PMD_DRV_LOG(DEBUG, sc, "Scheduled next pending commands..."); 4325 } 4326 } 4327 4328 static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc *sc) 4329 { 4330 rte_bit_relaxed_clear32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 4331 4332 /* send rx_mode command again if was requested */ 4333 if (rte_bit_relaxed_test_and_clear32(ECORE_FILTER_RX_MODE_SCHED, 4334 &sc->sp_state)) 4335 bnx2x_set_storm_rx_mode(sc); 4336 } 4337 4338 static void bnx2x_update_eq_prod(struct bnx2x_softc *sc, uint16_t prod) 4339 { 4340 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 4341 wmb(); /* keep prod updates ordered */ 4342 } 4343 4344 static void bnx2x_eq_int(struct bnx2x_softc *sc) 4345 { 4346 uint16_t hw_cons, sw_cons, sw_prod; 4347 union event_ring_elem *elem; 4348 uint8_t echo; 4349 uint32_t cid; 4350 uint8_t opcode; 4351 int spqe_cnt = 0; 4352 struct ecore_queue_sp_obj *q_obj; 4353 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 4354 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 4355 4356 hw_cons = le16toh(*sc->eq_cons_sb); 4357 4358 /* 4359 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 4360 * when we get to the next-page we need to adjust so the loop 4361 * condition below will be met. The next element is the size of a 4362 * regular element and hence incrementing by 1 4363 */ 4364 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 4365 hw_cons++; 4366 } 4367 4368 /* 4369 * This function may never run in parallel with itself for a 4370 * specific sc and no need for a read memory barrier here. 4371 */ 4372 sw_cons = sc->eq_cons; 4373 sw_prod = sc->eq_prod; 4374 4375 for (; 4376 sw_cons != hw_cons; 4377 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 4378 4379 elem = &sc->eq[EQ_DESC(sw_cons)]; 4380 4381 /* elem CID originates from FW, actually LE */ 4382 cid = SW_CID(elem->message.data.cfc_del_event.cid); 4383 opcode = elem->message.opcode; 4384 4385 /* handle eq element */ 4386 switch (opcode) { 4387 case EVENT_RING_OPCODE_STAT_QUERY: 4388 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "got statistics completion event %d", 4389 sc->stats_comp++); 4390 /* nothing to do with stats comp */ 4391 goto next_spqe; 4392 4393 case EVENT_RING_OPCODE_CFC_DEL: 4394 /* handle according to cid range */ 4395 /* we may want to verify here that the sc state is HALTING */ 4396 PMD_DRV_LOG(DEBUG, sc, "got delete ramrod for MULTI[%d]", 4397 cid); 4398 q_obj = bnx2x_cid_to_q_obj(sc, cid); 4399 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 4400 break; 4401 } 4402 goto next_spqe; 4403 4404 case EVENT_RING_OPCODE_STOP_TRAFFIC: 4405 PMD_DRV_LOG(DEBUG, sc, "got STOP TRAFFIC"); 4406 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 4407 break; 4408 } 4409 goto next_spqe; 4410 4411 case EVENT_RING_OPCODE_START_TRAFFIC: 4412 PMD_DRV_LOG(DEBUG, sc, "got START TRAFFIC"); 4413 if (f_obj->complete_cmd 4414 (sc, f_obj, ECORE_F_CMD_TX_START)) { 4415 break; 4416 } 4417 goto next_spqe; 4418 4419 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4420 echo = elem->message.data.function_update_event.echo; 4421 if (echo == SWITCH_UPDATE) { 4422 PMD_DRV_LOG(DEBUG, sc, 4423 "got FUNC_SWITCH_UPDATE ramrod"); 4424 if (f_obj->complete_cmd(sc, f_obj, 4425 ECORE_F_CMD_SWITCH_UPDATE)) 4426 { 4427 break; 4428 } 4429 } else { 4430 PMD_DRV_LOG(DEBUG, sc, 4431 "AFEX: ramrod completed FUNCTION_UPDATE"); 4432 f_obj->complete_cmd(sc, f_obj, 4433 ECORE_F_CMD_AFEX_UPDATE); 4434 } 4435 goto next_spqe; 4436 4437 case EVENT_RING_OPCODE_FORWARD_SETUP: 4438 q_obj = &bnx2x_fwd_sp_obj(sc, q_obj); 4439 if (q_obj->complete_cmd(sc, q_obj, 4440 ECORE_Q_CMD_SETUP_TX_ONLY)) { 4441 break; 4442 } 4443 goto next_spqe; 4444 4445 case EVENT_RING_OPCODE_FUNCTION_START: 4446 PMD_DRV_LOG(DEBUG, sc, "got FUNC_START ramrod"); 4447 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 4448 break; 4449 } 4450 goto next_spqe; 4451 4452 case EVENT_RING_OPCODE_FUNCTION_STOP: 4453 PMD_DRV_LOG(DEBUG, sc, "got FUNC_STOP ramrod"); 4454 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 4455 break; 4456 } 4457 goto next_spqe; 4458 } 4459 4460 switch (opcode | sc->state) { 4461 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPEN): 4462 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT): 4463 cid = 4464 elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4465 PMD_DRV_LOG(DEBUG, sc, "got RSS_UPDATE ramrod. CID %d", 4466 cid); 4467 rss_raw->clear_pending(rss_raw); 4468 break; 4469 4470 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 4471 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 4472 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAITING_HALT): 4473 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN): 4474 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG): 4475 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4476 PMD_DRV_LOG(DEBUG, sc, 4477 "got (un)set mac ramrod"); 4478 bnx2x_handle_classification_eqe(sc, elem); 4479 break; 4480 4481 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN): 4482 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG): 4483 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4484 PMD_DRV_LOG(DEBUG, sc, 4485 "got mcast ramrod"); 4486 bnx2x_handle_mcast_eqe(sc); 4487 break; 4488 4489 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN): 4490 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG): 4491 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4492 PMD_DRV_LOG(DEBUG, sc, 4493 "got rx_mode ramrod"); 4494 bnx2x_handle_rx_mode_eqe(sc); 4495 break; 4496 4497 default: 4498 /* unknown event log error and continue */ 4499 PMD_DRV_LOG(INFO, sc, "Unknown EQ event %d, sc->state 0x%x", 4500 elem->message.opcode, sc->state); 4501 } 4502 4503 next_spqe: 4504 spqe_cnt++; 4505 } /* for */ 4506 4507 mb(); 4508 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 4509 4510 sc->eq_cons = sw_cons; 4511 sc->eq_prod = sw_prod; 4512 4513 /* make sure that above mem writes were issued towards the memory */ 4514 wmb(); 4515 4516 /* update producer */ 4517 bnx2x_update_eq_prod(sc, sc->eq_prod); 4518 } 4519 4520 static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc) 4521 { 4522 uint16_t status; 4523 int rc = 0; 4524 4525 PMD_DRV_LOG(DEBUG, sc, "---> SP TASK <---"); 4526 4527 /* what work needs to be performed? */ 4528 status = bnx2x_update_dsb_idx(sc); 4529 4530 PMD_DRV_LOG(DEBUG, sc, "dsb status 0x%04x", status); 4531 4532 /* HW attentions */ 4533 if (status & BNX2X_DEF_SB_ATT_IDX) { 4534 PMD_DRV_LOG(DEBUG, sc, "---> ATTN INTR <---"); 4535 bnx2x_attn_int(sc); 4536 status &= ~BNX2X_DEF_SB_ATT_IDX; 4537 rc = 1; 4538 } 4539 4540 /* SP events: STAT_QUERY and others */ 4541 if (status & BNX2X_DEF_SB_IDX) { 4542 /* handle EQ completions */ 4543 PMD_DRV_LOG(DEBUG, sc, "---> EQ INTR <---"); 4544 bnx2x_eq_int(sc); 4545 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 4546 le16toh(sc->def_idx), IGU_INT_NOP, 1); 4547 status &= ~BNX2X_DEF_SB_IDX; 4548 } 4549 4550 /* if status is non zero then something went wrong */ 4551 if (unlikely(status)) { 4552 PMD_DRV_LOG(INFO, sc, 4553 "Got an unknown SP interrupt! (0x%04x)", status); 4554 } 4555 4556 /* ack status block only if something was actually handled */ 4557 bnx2x_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 4558 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 4559 4560 return rc; 4561 } 4562 4563 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp) 4564 { 4565 struct bnx2x_softc *sc = fp->sc; 4566 uint8_t more_rx = FALSE; 4567 4568 /* Make sure FP is initialized */ 4569 if (!fp->sb_running_index) 4570 return; 4571 4572 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, 4573 "---> FP TASK QUEUE (%d) <--", fp->index); 4574 4575 /* update the fastpath index */ 4576 bnx2x_update_fp_sb_idx(fp); 4577 4578 if (rte_atomic32_read(&sc->scan_fp) == 1) { 4579 if (bnx2x_has_rx_work(fp)) { 4580 more_rx = bnx2x_rxeof(sc, fp); 4581 } 4582 4583 if (more_rx) { 4584 /* still more work to do */ 4585 bnx2x_handle_fp_tq(fp); 4586 return; 4587 } 4588 /* We have completed slow path completion, clear the flag */ 4589 rte_atomic32_set(&sc->scan_fp, 0); 4590 } 4591 4592 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 4593 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 4594 } 4595 4596 /* 4597 * Legacy interrupt entry point. 4598 * 4599 * Verifies that the controller generated the interrupt and 4600 * then calls a separate routine to handle the various 4601 * interrupt causes: link, RX, and TX. 4602 */ 4603 int bnx2x_intr_legacy(struct bnx2x_softc *sc) 4604 { 4605 struct bnx2x_fastpath *fp; 4606 uint32_t status, mask; 4607 int i, rc = 0; 4608 4609 /* 4610 * 0 for ustorm, 1 for cstorm 4611 * the bits returned from ack_int() are 0-15 4612 * bit 0 = attention status block 4613 * bit 1 = fast path status block 4614 * a mask of 0x2 or more = tx/rx event 4615 * a mask of 1 = slow path event 4616 */ 4617 4618 status = bnx2x_ack_int(sc); 4619 4620 /* the interrupt is not for us */ 4621 if (unlikely(status == 0)) { 4622 return 0; 4623 } 4624 4625 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "Interrupt status 0x%04x", status); 4626 //bnx2x_dump_status_block(sc); 4627 4628 FOR_EACH_ETH_QUEUE(sc, i) { 4629 fp = &sc->fp[i]; 4630 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 4631 if (status & mask) { 4632 /* acknowledge and disable further fastpath interrupts */ 4633 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 4634 0, IGU_INT_DISABLE, 0); 4635 bnx2x_handle_fp_tq(fp); 4636 status &= ~mask; 4637 } 4638 } 4639 4640 if (unlikely(status & 0x1)) { 4641 /* acknowledge and disable further slowpath interrupts */ 4642 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 4643 0, IGU_INT_DISABLE, 0); 4644 rc = bnx2x_handle_sp_tq(sc); 4645 status &= ~0x1; 4646 } 4647 4648 if (unlikely(status)) { 4649 PMD_DRV_LOG(WARNING, sc, 4650 "Unexpected fastpath status (0x%08x)!", status); 4651 } 4652 4653 return rc; 4654 } 4655 4656 static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc); 4657 static int bnx2x_init_hw_common(struct bnx2x_softc *sc); 4658 static int bnx2x_init_hw_port(struct bnx2x_softc *sc); 4659 static int bnx2x_init_hw_func(struct bnx2x_softc *sc); 4660 static void bnx2x_reset_common(struct bnx2x_softc *sc); 4661 static void bnx2x_reset_port(struct bnx2x_softc *sc); 4662 static void bnx2x_reset_func(struct bnx2x_softc *sc); 4663 static int bnx2x_init_firmware(struct bnx2x_softc *sc); 4664 static void bnx2x_release_firmware(struct bnx2x_softc *sc); 4665 4666 static struct 4667 ecore_func_sp_drv_ops bnx2x_func_sp_drv = { 4668 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 4669 .init_hw_cmn = bnx2x_init_hw_common, 4670 .init_hw_port = bnx2x_init_hw_port, 4671 .init_hw_func = bnx2x_init_hw_func, 4672 4673 .reset_hw_cmn = bnx2x_reset_common, 4674 .reset_hw_port = bnx2x_reset_port, 4675 .reset_hw_func = bnx2x_reset_func, 4676 4677 .init_fw = bnx2x_init_firmware, 4678 .release_fw = bnx2x_release_firmware, 4679 }; 4680 4681 static void bnx2x_init_func_obj(struct bnx2x_softc *sc) 4682 { 4683 sc->dmae_ready = 0; 4684 4685 PMD_INIT_FUNC_TRACE(sc); 4686 4687 ecore_init_func_obj(sc, 4688 &sc->func_obj, 4689 BNX2X_SP(sc, func_rdata), 4690 (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata), 4691 BNX2X_SP(sc, func_afex_rdata), 4692 (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata), 4693 &bnx2x_func_sp_drv); 4694 } 4695 4696 static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code) 4697 { 4698 struct ecore_func_state_params func_params = { NULL }; 4699 int rc; 4700 4701 PMD_INIT_FUNC_TRACE(sc); 4702 4703 /* prepare the parameters for function state transitions */ 4704 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4705 4706 func_params.f_obj = &sc->func_obj; 4707 func_params.cmd = ECORE_F_CMD_HW_INIT; 4708 4709 func_params.params.hw_init.load_phase = load_code; 4710 4711 /* 4712 * Via a plethora of function pointers, we will eventually reach 4713 * bnx2x_init_hw_common(), bnx2x_init_hw_port(), or bnx2x_init_hw_func(). 4714 */ 4715 rc = ecore_func_state_change(sc, &func_params); 4716 4717 return rc; 4718 } 4719 4720 static void 4721 bnx2x_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, uint32_t len) 4722 { 4723 uint32_t i; 4724 4725 if (!(len % 4) && !(addr % 4)) { 4726 for (i = 0; i < len; i += 4) { 4727 REG_WR(sc, (addr + i), fill); 4728 } 4729 } else { 4730 for (i = 0; i < len; i++) { 4731 REG_WR8(sc, (addr + i), fill); 4732 } 4733 } 4734 } 4735 4736 /* writes FP SP data to FW - data_size in dwords */ 4737 static void 4738 bnx2x_wr_fp_sb_data(struct bnx2x_softc *sc, int fw_sb_id, uint32_t * sb_data_p, 4739 uint32_t data_size) 4740 { 4741 uint32_t index; 4742 4743 for (index = 0; index < data_size; index++) { 4744 REG_WR(sc, 4745 (BAR_CSTRORM_INTMEM + 4746 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 4747 (sizeof(uint32_t) * index)), *(sb_data_p + index)); 4748 } 4749 } 4750 4751 static void bnx2x_zero_fp_sb(struct bnx2x_softc *sc, int fw_sb_id) 4752 { 4753 struct hc_status_block_data_e2 sb_data_e2; 4754 struct hc_status_block_data_e1x sb_data_e1x; 4755 uint32_t *sb_data_p; 4756 uint32_t data_size = 0; 4757 4758 if (!CHIP_IS_E1x(sc)) { 4759 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 4760 sb_data_e2.common.state = SB_DISABLED; 4761 sb_data_e2.common.p_func.vf_valid = FALSE; 4762 sb_data_p = (uint32_t *) & sb_data_e2; 4763 data_size = (sizeof(struct hc_status_block_data_e2) / 4764 sizeof(uint32_t)); 4765 } else { 4766 memset(&sb_data_e1x, 0, 4767 sizeof(struct hc_status_block_data_e1x)); 4768 sb_data_e1x.common.state = SB_DISABLED; 4769 sb_data_e1x.common.p_func.vf_valid = FALSE; 4770 sb_data_p = (uint32_t *) & sb_data_e1x; 4771 data_size = (sizeof(struct hc_status_block_data_e1x) / 4772 sizeof(uint32_t)); 4773 } 4774 4775 bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 4776 4777 bnx2x_fill(sc, 4778 (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, 4779 CSTORM_STATUS_BLOCK_SIZE); 4780 bnx2x_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 4781 0, CSTORM_SYNC_BLOCK_SIZE); 4782 } 4783 4784 static void 4785 bnx2x_wr_sp_sb_data(struct bnx2x_softc *sc, 4786 struct hc_sp_status_block_data *sp_sb_data) 4787 { 4788 uint32_t i; 4789 4790 for (i = 0; 4791 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 4792 i++) { 4793 REG_WR(sc, 4794 (BAR_CSTRORM_INTMEM + 4795 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 4796 (i * sizeof(uint32_t))), 4797 *((uint32_t *) sp_sb_data + i)); 4798 } 4799 } 4800 4801 static void bnx2x_zero_sp_sb(struct bnx2x_softc *sc) 4802 { 4803 struct hc_sp_status_block_data sp_sb_data; 4804 4805 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 4806 4807 sp_sb_data.state = SB_DISABLED; 4808 sp_sb_data.p_func.vf_valid = FALSE; 4809 4810 bnx2x_wr_sp_sb_data(sc, &sp_sb_data); 4811 4812 bnx2x_fill(sc, 4813 (BAR_CSTRORM_INTMEM + 4814 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 4815 0, CSTORM_SP_STATUS_BLOCK_SIZE); 4816 bnx2x_fill(sc, 4817 (BAR_CSTRORM_INTMEM + 4818 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 4819 0, CSTORM_SP_SYNC_BLOCK_SIZE); 4820 } 4821 4822 static void 4823 bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, 4824 int igu_seg_id) 4825 { 4826 hc_sm->igu_sb_id = igu_sb_id; 4827 hc_sm->igu_seg_id = igu_seg_id; 4828 hc_sm->timer_value = 0xFF; 4829 hc_sm->time_to_expire = 0xFFFFFFFF; 4830 } 4831 4832 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 4833 { 4834 /* zero out state machine indices */ 4835 4836 /* rx indices */ 4837 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4838 4839 /* tx indices */ 4840 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4841 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 4842 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 4843 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 4844 4845 /* map indices */ 4846 4847 /* rx indices */ 4848 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 4849 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4850 4851 /* tx indices */ 4852 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 4853 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 4855 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4856 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 4857 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4858 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 4859 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4860 } 4861 4862 static void 4863 bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid, 4864 uint8_t vf_valid, int fw_sb_id, int igu_sb_id) 4865 { 4866 struct hc_status_block_data_e2 sb_data_e2; 4867 struct hc_status_block_data_e1x sb_data_e1x; 4868 struct hc_status_block_sm *hc_sm_p; 4869 uint32_t *sb_data_p; 4870 int igu_seg_id; 4871 int data_size; 4872 4873 if (CHIP_INT_MODE_IS_BC(sc)) { 4874 igu_seg_id = HC_SEG_ACCESS_NORM; 4875 } else { 4876 igu_seg_id = IGU_SEG_ACCESS_NORM; 4877 } 4878 4879 bnx2x_zero_fp_sb(sc, fw_sb_id); 4880 4881 if (!CHIP_IS_E1x(sc)) { 4882 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 4883 sb_data_e2.common.state = SB_ENABLED; 4884 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 4885 sb_data_e2.common.p_func.vf_id = vfid; 4886 sb_data_e2.common.p_func.vf_valid = vf_valid; 4887 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 4888 sb_data_e2.common.same_igu_sb_1b = TRUE; 4889 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 4890 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 4891 hc_sm_p = sb_data_e2.common.state_machine; 4892 sb_data_p = (uint32_t *) & sb_data_e2; 4893 data_size = (sizeof(struct hc_status_block_data_e2) / 4894 sizeof(uint32_t)); 4895 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 4896 } else { 4897 memset(&sb_data_e1x, 0, 4898 sizeof(struct hc_status_block_data_e1x)); 4899 sb_data_e1x.common.state = SB_ENABLED; 4900 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 4901 sb_data_e1x.common.p_func.vf_id = 0xff; 4902 sb_data_e1x.common.p_func.vf_valid = FALSE; 4903 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 4904 sb_data_e1x.common.same_igu_sb_1b = TRUE; 4905 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 4906 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 4907 hc_sm_p = sb_data_e1x.common.state_machine; 4908 sb_data_p = (uint32_t *) & sb_data_e1x; 4909 data_size = (sizeof(struct hc_status_block_data_e1x) / 4910 sizeof(uint32_t)); 4911 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 4912 } 4913 4914 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 4915 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 4916 4917 /* write indices to HW - PCI guarantees endianity of regpairs */ 4918 bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 4919 } 4920 4921 static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 4922 { 4923 if (CHIP_IS_E1x(fp->sc)) { 4924 return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H; 4925 } else { 4926 return fp->cl_id; 4927 } 4928 } 4929 4930 static uint32_t 4931 bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) 4932 { 4933 uint32_t offset = BAR_USTRORM_INTMEM; 4934 4935 if (IS_VF(sc)) { 4936 return PXP_VF_ADDR_USDM_QUEUES_START + 4937 (sc->acquire_resp.resc.hw_qid[fp->index] * 4938 sizeof(struct ustorm_queue_zone_data)); 4939 } else if (!CHIP_IS_E1x(sc)) { 4940 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 4941 } else { 4942 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 4943 } 4944 4945 return offset; 4946 } 4947 4948 static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx) 4949 { 4950 struct bnx2x_fastpath *fp = &sc->fp[idx]; 4951 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 4952 uint32_t q_type = 0; 4953 int cos; 4954 4955 fp->sc = sc; 4956 fp->index = idx; 4957 4958 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 4959 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 4960 4961 if (CHIP_IS_E1x(sc)) 4962 fp->cl_id = SC_L_ID(sc) + idx; 4963 else 4964 /* want client ID same as IGU SB ID for non-E1 */ 4965 fp->cl_id = fp->igu_sb_id; 4966 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 4967 4968 /* setup sb indices */ 4969 if (!CHIP_IS_E1x(sc)) { 4970 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 4971 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 4972 } else { 4973 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 4974 fp->sb_running_index = 4975 fp->status_block.e1x_sb->sb.running_index; 4976 } 4977 4978 /* init shortcut */ 4979 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(sc, fp); 4980 4981 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 4982 4983 for (cos = 0; cos < sc->max_cos; cos++) { 4984 cids[cos] = idx; 4985 } 4986 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 4987 4988 /* nothing more for a VF to do */ 4989 if (IS_VF(sc)) { 4990 return; 4991 } 4992 4993 bnx2x_init_sb(sc, fp->sb_dma.paddr, BNX2X_VF_ID_INVALID, FALSE, 4994 fp->fw_sb_id, fp->igu_sb_id); 4995 4996 bnx2x_update_fp_sb_idx(fp); 4997 4998 /* Configure Queue State object */ 4999 rte_bit_relaxed_set32(ECORE_Q_TYPE_HAS_RX, &q_type); 5000 rte_bit_relaxed_set32(ECORE_Q_TYPE_HAS_TX, &q_type); 5001 5002 ecore_init_queue_obj(sc, 5003 &sc->sp_objs[idx].q_obj, 5004 fp->cl_id, 5005 cids, 5006 sc->max_cos, 5007 SC_FUNC(sc), 5008 BNX2X_SP(sc, q_rdata), 5009 (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata), 5010 q_type); 5011 5012 /* configure classification DBs */ 5013 ecore_init_mac_obj(sc, 5014 &sc->sp_objs[idx].mac_obj, 5015 fp->cl_id, 5016 idx, 5017 SC_FUNC(sc), 5018 BNX2X_SP(sc, mac_rdata), 5019 (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata), 5020 ECORE_FILTER_MAC_PENDING, &sc->sp_state, 5021 ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); 5022 } 5023 5024 static void 5025 bnx2x_update_rx_prod(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 5026 uint16_t rx_bd_prod, uint16_t rx_cq_prod) 5027 { 5028 struct ustorm_eth_rx_producers rx_prods; 5029 uint32_t i; 5030 5031 memset(&rx_prods, 0, sizeof(rx_prods)); 5032 5033 /* update producers */ 5034 rx_prods.bd_prod = rx_bd_prod; 5035 rx_prods.cqe_prod = rx_cq_prod; 5036 5037 /* 5038 * Make sure that the BD and SGE data is updated before updating the 5039 * producers since FW might read the BD/SGE right after the producer 5040 * is updated. 5041 * This is only applicable for weak-ordered memory model archs such 5042 * as IA-64. The following barrier is also mandatory since FW will 5043 * assumes BDs must have buffers. 5044 */ 5045 wmb(); 5046 5047 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 5048 REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), 5049 ((uint32_t *)&rx_prods)[i]); 5050 } 5051 5052 wmb(); /* keep prod updates ordered */ 5053 } 5054 5055 static void bnx2x_init_rx_rings(struct bnx2x_softc *sc) 5056 { 5057 struct bnx2x_fastpath *fp; 5058 int i; 5059 struct bnx2x_rx_queue *rxq; 5060 5061 for (i = 0; i < sc->num_queues; i++) { 5062 fp = &sc->fp[i]; 5063 rxq = sc->rx_queues[fp->index]; 5064 if (!rxq) { 5065 PMD_RX_LOG(ERR, "RX queue is NULL"); 5066 return; 5067 } 5068 5069 rxq->rx_bd_head = 0; 5070 rxq->rx_bd_tail = rxq->nb_rx_desc; 5071 rxq->rx_cq_head = 0; 5072 rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); 5073 *fp->rx_cq_cons_sb = 0; 5074 5075 /* 5076 * Activate the BD ring... 5077 * Warning, this will generate an interrupt (to the TSTORM) 5078 * so this can only be done after the chip is initialized 5079 */ 5080 bnx2x_update_rx_prod(sc, fp, rxq->rx_bd_tail, rxq->rx_cq_tail); 5081 5082 if (i != 0) { 5083 continue; 5084 } 5085 } 5086 } 5087 5088 static void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp) 5089 { 5090 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 5091 5092 fp->tx_db.data.header.header = 1 << DOORBELL_HDR_DB_TYPE_SHIFT; 5093 fp->tx_db.data.zero_fill1 = 0; 5094 fp->tx_db.data.prod = 0; 5095 5096 if (!txq) { 5097 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 5098 return; 5099 } 5100 5101 txq->tx_pkt_tail = 0; 5102 txq->tx_pkt_head = 0; 5103 txq->tx_bd_tail = 0; 5104 txq->tx_bd_head = 0; 5105 } 5106 5107 static void bnx2x_init_tx_rings(struct bnx2x_softc *sc) 5108 { 5109 int i; 5110 5111 for (i = 0; i < sc->num_queues; i++) { 5112 bnx2x_init_tx_ring_one(&sc->fp[i]); 5113 } 5114 } 5115 5116 static void bnx2x_init_def_sb(struct bnx2x_softc *sc) 5117 { 5118 struct host_sp_status_block *def_sb = sc->def_sb; 5119 rte_iova_t mapping = sc->def_sb_dma.paddr; 5120 int igu_sp_sb_index; 5121 int igu_seg_id; 5122 int port = SC_PORT(sc); 5123 int func = SC_FUNC(sc); 5124 int reg_offset, reg_offset_en5; 5125 uint64_t section; 5126 int index, sindex; 5127 struct hc_sp_status_block_data sp_sb_data; 5128 5129 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5130 5131 if (CHIP_INT_MODE_IS_BC(sc)) { 5132 igu_sp_sb_index = DEF_SB_IGU_ID; 5133 igu_seg_id = HC_SEG_ACCESS_DEF; 5134 } else { 5135 igu_sp_sb_index = sc->igu_dsb_id; 5136 igu_seg_id = IGU_SEG_ACCESS_DEF; 5137 } 5138 5139 /* attentions */ 5140 section = ((uint64_t) mapping + 5141 offsetof(struct host_sp_status_block, atten_status_block)); 5142 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5143 sc->attn_state = 0; 5144 5145 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5146 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 5147 5148 reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5149 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 5150 5151 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5152 /* take care of sig[0]..sig[4] */ 5153 for (sindex = 0; sindex < 4; sindex++) { 5154 sc->attn_group[index].sig[sindex] = 5155 REG_RD(sc, 5156 (reg_offset + (sindex * 0x4) + 5157 (0x10 * index))); 5158 } 5159 5160 if (!CHIP_IS_E1x(sc)) { 5161 /* 5162 * enable5 is separate from the rest of the registers, 5163 * and the address skip is 4 and not 16 between the 5164 * different groups 5165 */ 5166 sc->attn_group[index].sig[4] = 5167 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 5168 } else { 5169 sc->attn_group[index].sig[4] = 0; 5170 } 5171 } 5172 5173 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5174 reg_offset = 5175 port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; 5176 REG_WR(sc, reg_offset, U64_LO(section)); 5177 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 5178 } else if (!CHIP_IS_E1x(sc)) { 5179 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5180 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5181 } 5182 5183 section = ((uint64_t) mapping + 5184 offsetof(struct host_sp_status_block, sp_sb)); 5185 5186 bnx2x_zero_sp_sb(sc); 5187 5188 /* PCI guarantees endianity of regpair */ 5189 sp_sb_data.state = SB_ENABLED; 5190 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5191 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5192 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5193 sp_sb_data.igu_seg_id = igu_seg_id; 5194 sp_sb_data.p_func.pf_id = func; 5195 sp_sb_data.p_func.vnic_id = SC_VN(sc); 5196 sp_sb_data.p_func.vf_id = 0xff; 5197 5198 bnx2x_wr_sp_sb_data(sc, &sp_sb_data); 5199 5200 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5201 } 5202 5203 static void bnx2x_init_sp_ring(struct bnx2x_softc *sc) 5204 { 5205 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 5206 sc->spq_prod_idx = 0; 5207 sc->dsb_sp_prod = 5208 &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 5209 sc->spq_prod_bd = sc->spq; 5210 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 5211 } 5212 5213 static void bnx2x_init_eq_ring(struct bnx2x_softc *sc) 5214 { 5215 union event_ring_elem *elem; 5216 int i; 5217 5218 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5219 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 5220 5221 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 5222 BNX2X_PAGE_SIZE * 5223 (i % NUM_EQ_PAGES))); 5224 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 5225 BNX2X_PAGE_SIZE * 5226 (i % NUM_EQ_PAGES))); 5227 } 5228 5229 sc->eq_cons = 0; 5230 sc->eq_prod = NUM_EQ_DESC; 5231 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 5232 5233 atomic_store_rel_long(&sc->eq_spq_left, 5234 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 5235 NUM_EQ_DESC) - 1)); 5236 } 5237 5238 static void bnx2x_init_internal_common(struct bnx2x_softc *sc) 5239 { 5240 int i; 5241 5242 /* 5243 * Zero this manually as its initialization is currently missing 5244 * in the initTool. 5245 */ 5246 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 5247 REG_WR(sc, 5248 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 5249 0); 5250 } 5251 5252 if (!CHIP_IS_E1x(sc)) { 5253 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 5254 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : 5255 HC_IGU_NBC_MODE); 5256 } 5257 } 5258 5259 static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code) 5260 { 5261 switch (load_code) { 5262 case FW_MSG_CODE_DRV_LOAD_COMMON: 5263 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5264 bnx2x_init_internal_common(sc); 5265 /* no break */ 5266 5267 case FW_MSG_CODE_DRV_LOAD_PORT: 5268 /* nothing to do */ 5269 /* no break */ 5270 5271 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5272 /* internal memory per function is initialized inside bnx2x_pf_init */ 5273 break; 5274 5275 default: 5276 PMD_DRV_LOG(NOTICE, sc, "Unknown load_code (0x%x) from MCP", 5277 load_code); 5278 break; 5279 } 5280 } 5281 5282 static void 5283 storm_memset_func_cfg(struct bnx2x_softc *sc, 5284 struct tstorm_eth_function_common_config *tcfg, 5285 uint16_t abs_fid) 5286 { 5287 uint32_t addr; 5288 size_t size; 5289 5290 addr = (BAR_TSTRORM_INTMEM + 5291 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 5292 size = sizeof(struct tstorm_eth_function_common_config); 5293 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) tcfg); 5294 } 5295 5296 static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_params *p) 5297 { 5298 struct tstorm_eth_function_common_config tcfg = { 0 }; 5299 5300 if (CHIP_IS_E1x(sc)) { 5301 storm_memset_func_cfg(sc, &tcfg, p->func_id); 5302 } 5303 5304 /* Enable the function in the FW */ 5305 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 5306 storm_memset_func_en(sc, p->func_id, 1); 5307 5308 /* spq */ 5309 if (p->func_flgs & FUNC_FLG_SPQ) { 5310 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 5311 REG_WR(sc, 5312 (XSEM_REG_FAST_MEMORY + 5313 XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); 5314 } 5315 } 5316 5317 /* 5318 * Calculates the sum of vn_min_rates. 5319 * It's needed for further normalizing of the min_rates. 5320 * Returns: 5321 * sum of vn_min_rates. 5322 * or 5323 * 0 - if all the min_rates are 0. 5324 * In the later case fairness algorithm should be deactivated. 5325 * If all min rates are not zero then those that are zeroes will be set to 1. 5326 */ 5327 static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input) 5328 { 5329 uint32_t vn_cfg; 5330 uint32_t vn_min_rate; 5331 int all_zero = 1; 5332 int vn; 5333 5334 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5335 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 5336 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 5337 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 5338 5339 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 5340 /* skip hidden VNs */ 5341 vn_min_rate = 0; 5342 } else if (!vn_min_rate) { 5343 /* If min rate is zero - set it to 100 */ 5344 vn_min_rate = DEF_MIN_RATE; 5345 } else { 5346 all_zero = 0; 5347 } 5348 5349 input->vnic_min_rate[vn] = vn_min_rate; 5350 } 5351 5352 /* if ETS or all min rates are zeros - disable fairness */ 5353 if (all_zero) { 5354 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 5355 } else { 5356 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 5357 } 5358 } 5359 5360 static uint16_t 5361 bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg) 5362 { 5363 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 5364 FUNC_MF_CFG_MAX_BW_SHIFT); 5365 5366 if (!max_cfg) { 5367 PMD_DRV_LOG(DEBUG, sc, 5368 "Max BW configured to 0 - using 100 instead"); 5369 max_cfg = 100; 5370 } 5371 5372 return max_cfg; 5373 } 5374 5375 static void 5376 bnx2x_calc_vn_max(struct bnx2x_softc *sc, int vn, struct cmng_init_input *input) 5377 { 5378 uint16_t vn_max_rate; 5379 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 5380 uint32_t max_cfg; 5381 5382 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 5383 vn_max_rate = 0; 5384 } else { 5385 max_cfg = bnx2x_extract_max_cfg(sc, vn_cfg); 5386 5387 if (IS_MF_SI(sc)) { 5388 /* max_cfg in percents of linkspeed */ 5389 vn_max_rate = 5390 ((sc->link_vars.line_speed * max_cfg) / 100); 5391 } else { /* SD modes */ 5392 /* max_cfg is absolute in 100Mb units */ 5393 vn_max_rate = (max_cfg * 100); 5394 } 5395 } 5396 5397 input->vnic_max_rate[vn] = vn_max_rate; 5398 } 5399 5400 static void 5401 bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, uint8_t cmng_type) 5402 { 5403 struct cmng_init_input input; 5404 int vn; 5405 5406 memset(&input, 0, sizeof(struct cmng_init_input)); 5407 5408 input.port_rate = sc->link_vars.line_speed; 5409 5410 if (cmng_type == CMNG_FNS_MINMAX) { 5411 /* read mf conf from shmem */ 5412 if (read_cfg) { 5413 bnx2x_read_mf_cfg(sc); 5414 } 5415 5416 /* get VN min rate and enable fairness if not 0 */ 5417 bnx2x_calc_vn_min(sc, &input); 5418 5419 /* get VN max rate */ 5420 if (sc->port.pmf) { 5421 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5422 bnx2x_calc_vn_max(sc, vn, &input); 5423 } 5424 } 5425 5426 /* always enable rate shaping and fairness */ 5427 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 5428 5429 ecore_init_cmng(&input, &sc->cmng); 5430 return; 5431 } 5432 } 5433 5434 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc) 5435 { 5436 if (CHIP_REV_IS_SLOW(sc)) { 5437 return CMNG_FNS_NONE; 5438 } 5439 5440 if (IS_MF(sc)) { 5441 return CMNG_FNS_MINMAX; 5442 } 5443 5444 return CMNG_FNS_NONE; 5445 } 5446 5447 static void 5448 storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, uint8_t port) 5449 { 5450 int vn; 5451 int func; 5452 uint32_t addr; 5453 size_t size; 5454 5455 addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 5456 size = sizeof(struct cmng_struct_per_port); 5457 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) & cmng->port); 5458 5459 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5460 func = func_by_vn(sc, vn); 5461 5462 addr = (BAR_XSTRORM_INTMEM + 5463 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 5464 size = sizeof(struct rate_shaping_vars_per_vn); 5465 ecore_storm_memset_struct(sc, addr, size, 5466 (uint32_t *) & cmng-> 5467 vnic.vnic_max_rate[vn]); 5468 5469 addr = (BAR_XSTRORM_INTMEM + 5470 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 5471 size = sizeof(struct fairness_vars_per_vn); 5472 ecore_storm_memset_struct(sc, addr, size, 5473 (uint32_t *) & cmng-> 5474 vnic.vnic_min_rate[vn]); 5475 } 5476 } 5477 5478 static void bnx2x_pf_init(struct bnx2x_softc *sc) 5479 { 5480 struct bnx2x_func_init_params func_init; 5481 struct event_ring_data eq_data; 5482 uint16_t flags; 5483 5484 memset(&eq_data, 0, sizeof(struct event_ring_data)); 5485 memset(&func_init, 0, sizeof(struct bnx2x_func_init_params)); 5486 5487 if (!CHIP_IS_E1x(sc)) { 5488 /* reset IGU PF statistics: MSIX + ATTN */ 5489 /* PF */ 5490 REG_WR(sc, 5491 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 5492 (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + 5493 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 5494 4)), 0); 5495 /* ATTN */ 5496 REG_WR(sc, 5497 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 5498 (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + 5499 (BNX2X_IGU_STAS_MSG_PF_CNT * 4) + 5500 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 5501 4)), 0); 5502 } 5503 5504 /* function setup flags */ 5505 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 5506 5507 func_init.func_flgs = flags; 5508 func_init.pf_id = SC_FUNC(sc); 5509 func_init.func_id = SC_FUNC(sc); 5510 func_init.spq_map = sc->spq_dma.paddr; 5511 func_init.spq_prod = sc->spq_prod_idx; 5512 5513 bnx2x_func_init(sc, &func_init); 5514 5515 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 5516 5517 /* 5518 * Congestion management values depend on the link rate. 5519 * There is no active link so initial link rate is set to 10Gbps. 5520 * When the link comes up the congestion management values are 5521 * re-calculated according to the actual link rate. 5522 */ 5523 sc->link_vars.line_speed = SPEED_10000; 5524 bnx2x_cmng_fns_init(sc, TRUE, bnx2x_get_cmng_fns_mode(sc)); 5525 5526 /* Only the PMF sets the HW */ 5527 if (sc->port.pmf) { 5528 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 5529 } 5530 5531 /* init Event Queue - PCI bus guarantees correct endainity */ 5532 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 5533 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 5534 eq_data.producer = sc->eq_prod; 5535 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 5536 eq_data.sb_id = DEF_SB_ID; 5537 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 5538 } 5539 5540 static void bnx2x_hc_int_enable(struct bnx2x_softc *sc) 5541 { 5542 int port = SC_PORT(sc); 5543 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 5544 uint32_t val = REG_RD(sc, addr); 5545 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) 5546 || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5547 uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5548 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); 5549 5550 if (msix) { 5551 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5552 HC_CONFIG_0_REG_INT_LINE_EN_0); 5553 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5554 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5555 if (single_msix) { 5556 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 5557 } 5558 } else if (msi) { 5559 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 5560 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5561 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5562 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5563 } else { 5564 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5565 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5566 HC_CONFIG_0_REG_INT_LINE_EN_0 | 5567 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5568 5569 REG_WR(sc, addr, val); 5570 5571 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 5572 } 5573 5574 REG_WR(sc, addr, val); 5575 5576 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 5577 mb(); 5578 5579 /* init leading/trailing edge */ 5580 if (IS_MF(sc)) { 5581 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 5582 if (sc->port.pmf) { 5583 /* enable nig and gpio3 attention */ 5584 val |= 0x1100; 5585 } 5586 } else { 5587 val = 0xffff; 5588 } 5589 5590 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port * 8), val); 5591 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port * 8), val); 5592 5593 /* make sure that interrupts are indeed enabled from here on */ 5594 mb(); 5595 } 5596 5597 static void bnx2x_igu_int_enable(struct bnx2x_softc *sc) 5598 { 5599 uint32_t val; 5600 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) 5601 || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5602 uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5603 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); 5604 5605 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 5606 5607 if (msix) { 5608 val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5609 val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); 5610 if (single_msix) { 5611 val |= IGU_PF_CONF_SINGLE_ISR_EN; 5612 } 5613 } else if (msi) { 5614 val &= ~IGU_PF_CONF_INT_LINE_EN; 5615 val |= (IGU_PF_CONF_MSI_MSIX_EN | 5616 IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5617 } else { 5618 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 5619 val |= (IGU_PF_CONF_INT_LINE_EN | 5620 IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5621 } 5622 5623 /* clean previous status - need to configure igu prior to ack */ 5624 if ((!msix) || single_msix) { 5625 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5626 bnx2x_ack_int(sc); 5627 } 5628 5629 val |= IGU_PF_CONF_FUNC_EN; 5630 5631 PMD_DRV_LOG(DEBUG, sc, "write 0x%x to IGU mode %s", 5632 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 5633 5634 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5635 5636 mb(); 5637 5638 /* init leading/trailing edge */ 5639 if (IS_MF(sc)) { 5640 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 5641 if (sc->port.pmf) { 5642 /* enable nig and gpio3 attention */ 5643 val |= 0x1100; 5644 } 5645 } else { 5646 val = 0xffff; 5647 } 5648 5649 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 5650 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 5651 5652 /* make sure that interrupts are indeed enabled from here on */ 5653 mb(); 5654 } 5655 5656 static void bnx2x_int_enable(struct bnx2x_softc *sc) 5657 { 5658 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5659 bnx2x_hc_int_enable(sc); 5660 } else { 5661 bnx2x_igu_int_enable(sc); 5662 } 5663 } 5664 5665 static void bnx2x_hc_int_disable(struct bnx2x_softc *sc) 5666 { 5667 int port = SC_PORT(sc); 5668 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 5669 uint32_t val = REG_RD(sc, addr); 5670 5671 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5672 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5673 HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5674 /* flush all outstanding writes */ 5675 mb(); 5676 5677 REG_WR(sc, addr, val); 5678 if (REG_RD(sc, addr) != val) { 5679 PMD_DRV_LOG(ERR, sc, "proper val not read from HC IGU!"); 5680 } 5681 } 5682 5683 static void bnx2x_igu_int_disable(struct bnx2x_softc *sc) 5684 { 5685 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 5686 5687 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 5688 IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); 5689 5690 PMD_DRV_LOG(DEBUG, sc, "write %x to IGU", val); 5691 5692 /* flush all outstanding writes */ 5693 mb(); 5694 5695 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5696 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 5697 PMD_DRV_LOG(ERR, sc, "proper val not read from IGU!"); 5698 } 5699 } 5700 5701 static void bnx2x_int_disable(struct bnx2x_softc *sc) 5702 { 5703 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5704 bnx2x_hc_int_disable(sc); 5705 } else { 5706 bnx2x_igu_int_disable(sc); 5707 } 5708 } 5709 5710 static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code) 5711 { 5712 int i; 5713 5714 PMD_INIT_FUNC_TRACE(sc); 5715 5716 for (i = 0; i < sc->num_queues; i++) { 5717 bnx2x_init_eth_fp(sc, i); 5718 } 5719 5720 rmb(); /* ensure status block indices were read */ 5721 5722 bnx2x_init_rx_rings(sc); 5723 bnx2x_init_tx_rings(sc); 5724 5725 if (IS_VF(sc)) { 5726 bnx2x_memset_stats(sc); 5727 return; 5728 } 5729 5730 /* initialize MOD_ABS interrupts */ 5731 elink_init_mod_abs_int(sc, &sc->link_vars, 5732 sc->devinfo.chip_id, 5733 sc->devinfo.shmem_base, 5734 sc->devinfo.shmem2_base, SC_PORT(sc)); 5735 5736 bnx2x_init_def_sb(sc); 5737 bnx2x_update_dsb_idx(sc); 5738 bnx2x_init_sp_ring(sc); 5739 bnx2x_init_eq_ring(sc); 5740 bnx2x_init_internal(sc, load_code); 5741 bnx2x_pf_init(sc); 5742 bnx2x_stats_init(sc); 5743 5744 /* flush all before enabling interrupts */ 5745 mb(); 5746 5747 bnx2x_int_enable(sc); 5748 5749 /* check for SPIO5 */ 5750 bnx2x_attn_int_deasserted0(sc, 5751 REG_RD(sc, 5752 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 5753 SC_PORT(sc) * 4)) & 5754 AEU_INPUTS_ATTN_BITS_SPIO5); 5755 } 5756 5757 static void bnx2x_init_objs(struct bnx2x_softc *sc) 5758 { 5759 /* mcast rules must be added to tx if tx switching is enabled */ 5760 ecore_obj_type o_type; 5761 if (sc->flags & BNX2X_TX_SWITCHING) 5762 o_type = ECORE_OBJ_TYPE_RX_TX; 5763 else 5764 o_type = ECORE_OBJ_TYPE_RX; 5765 5766 /* RX_MODE controlling object */ 5767 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 5768 5769 /* multicast configuration controlling object */ 5770 ecore_init_mcast_obj(sc, 5771 &sc->mcast_obj, 5772 sc->fp[0].cl_id, 5773 sc->fp[0].index, 5774 SC_FUNC(sc), 5775 SC_FUNC(sc), 5776 BNX2X_SP(sc, mcast_rdata), 5777 (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata), 5778 ECORE_FILTER_MCAST_PENDING, 5779 &sc->sp_state, o_type); 5780 5781 /* Setup CAM credit pools */ 5782 ecore_init_mac_credit_pool(sc, 5783 &sc->macs_pool, 5784 SC_FUNC(sc), 5785 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 5786 VNICS_PER_PATH(sc)); 5787 5788 ecore_init_vlan_credit_pool(sc, 5789 &sc->vlans_pool, 5790 SC_ABS_FUNC(sc) >> 1, 5791 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 5792 VNICS_PER_PATH(sc)); 5793 5794 /* RSS configuration object */ 5795 ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp->cl_id, 5796 sc->fp->index, SC_FUNC(sc), SC_FUNC(sc), 5797 BNX2X_SP(sc, rss_rdata), 5798 (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata), 5799 ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, 5800 ECORE_OBJ_TYPE_RX); 5801 } 5802 5803 /* 5804 * Initialize the function. This must be called before sending CLIENT_SETUP 5805 * for the first client. 5806 */ 5807 static int bnx2x_func_start(struct bnx2x_softc *sc) 5808 { 5809 struct ecore_func_state_params func_params = { NULL }; 5810 struct ecore_func_start_params *start_params = 5811 &func_params.params.start; 5812 5813 /* Prepare parameters for function state transitions */ 5814 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 5815 5816 func_params.f_obj = &sc->func_obj; 5817 func_params.cmd = ECORE_F_CMD_START; 5818 5819 /* Function parameters */ 5820 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 5821 start_params->sd_vlan_tag = OVLAN(sc); 5822 5823 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 5824 start_params->network_cos_mode = STATIC_COS; 5825 } else { /* CHIP_IS_E1X */ 5826 start_params->network_cos_mode = FW_WRR; 5827 } 5828 5829 return ecore_func_state_change(sc, &func_params); 5830 } 5831 5832 static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state) 5833 { 5834 uint16_t pmcsr; 5835 5836 /* If there is no power capability, silently succeed */ 5837 if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) { 5838 PMD_DRV_LOG(INFO, sc, "No power capability"); 5839 return 0; 5840 } 5841 5842 pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + RTE_PCI_PM_CTRL), &pmcsr, 5843 2); 5844 5845 switch (state) { 5846 case PCI_PM_D0: 5847 pci_write_word(sc, (sc->devinfo.pcie_pm_cap_reg + RTE_PCI_PM_CTRL), 5848 ((pmcsr & ~RTE_PCI_PM_CTRL_STATE_MASK) | RTE_PCI_PM_CTRL_PME_STATUS)); 5849 5850 if (pmcsr & RTE_PCI_PM_CTRL_STATE_MASK) { 5851 /* delay required during transition out of D3hot */ 5852 DELAY(20000); 5853 } 5854 5855 break; 5856 5857 case PCI_PM_D3hot: 5858 /* don't shut down the power for emulation and FPGA */ 5859 if (CHIP_REV_IS_SLOW(sc)) { 5860 return 0; 5861 } 5862 5863 pmcsr &= ~RTE_PCI_PM_CTRL_STATE_MASK; 5864 /* D3 power state */ 5865 pmcsr |= 0x3; 5866 5867 if (sc->wol) { 5868 pmcsr |= RTE_PCI_PM_CTRL_PME_ENABLE; 5869 } 5870 5871 pci_write_long(sc, 5872 (sc->devinfo.pcie_pm_cap_reg + 5873 RTE_PCI_PM_CTRL), pmcsr); 5874 5875 /* 5876 * No more memory access after this point until device is brought back 5877 * to D0 state. 5878 */ 5879 break; 5880 5881 default: 5882 PMD_DRV_LOG(NOTICE, sc, "Can't support PCI power state = %d", 5883 state); 5884 return -1; 5885 } 5886 5887 return 0; 5888 } 5889 5890 /* return true if succeeded to acquire the lock */ 5891 static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 5892 { 5893 uint32_t lock_status; 5894 uint32_t resource_bit = (1 << resource); 5895 int func = SC_FUNC(sc); 5896 uint32_t hw_lock_control_reg; 5897 5898 /* Validating that the resource is within range */ 5899 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 5900 PMD_DRV_LOG(INFO, sc, 5901 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)", 5902 resource, HW_LOCK_MAX_RESOURCE_VALUE); 5903 return FALSE; 5904 } 5905 5906 if (func <= 5) { 5907 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func * 8); 5908 } else { 5909 hw_lock_control_reg = 5910 (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8); 5911 } 5912 5913 /* try to acquire the lock */ 5914 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 5915 lock_status = REG_RD(sc, hw_lock_control_reg); 5916 if (lock_status & resource_bit) { 5917 return TRUE; 5918 } 5919 5920 PMD_DRV_LOG(NOTICE, sc, "Failed to get a resource lock 0x%x", resource); 5921 5922 return FALSE; 5923 } 5924 5925 /* 5926 * Get the recovery leader resource id according to the engine this function 5927 * belongs to. Currently only 2 engines are supported. 5928 */ 5929 static int bnx2x_get_leader_lock_resource(struct bnx2x_softc *sc) 5930 { 5931 if (SC_PATH(sc)) { 5932 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 5933 } else { 5934 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 5935 } 5936 } 5937 5938 /* try to acquire a leader lock for current engine */ 5939 static uint8_t bnx2x_trylock_leader_lock(struct bnx2x_softc *sc) 5940 { 5941 return bnx2x_trylock_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); 5942 } 5943 5944 static int bnx2x_release_leader_lock(struct bnx2x_softc *sc) 5945 { 5946 return bnx2x_release_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); 5947 } 5948 5949 /* close gates #2, #3 and #4 */ 5950 static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close) 5951 { 5952 uint32_t val; 5953 5954 /* gates #2 and #4a are closed/opened */ 5955 /* #4 */ 5956 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, ! !close); 5957 /* #2 */ 5958 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, ! !close); 5959 5960 /* #3 */ 5961 if (CHIP_IS_E1x(sc)) { 5962 /* prevent interrupts from HC on both ports */ 5963 val = REG_RD(sc, HC_REG_CONFIG_1); 5964 if (close) 5965 REG_WR(sc, HC_REG_CONFIG_1, (val & ~(uint32_t) 5966 HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 5967 else 5968 REG_WR(sc, HC_REG_CONFIG_1, 5969 (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 5970 5971 val = REG_RD(sc, HC_REG_CONFIG_0); 5972 if (close) 5973 REG_WR(sc, HC_REG_CONFIG_0, (val & ~(uint32_t) 5974 HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 5975 else 5976 REG_WR(sc, HC_REG_CONFIG_0, 5977 (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 5978 5979 } else { 5980 /* Prevent incoming interrupts in IGU */ 5981 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 5982 5983 if (close) 5984 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 5985 (val & ~(uint32_t) 5986 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 5987 else 5988 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 5989 (val | 5990 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 5991 } 5992 5993 wmb(); 5994 } 5995 5996 /* poll for pending writes bit, it should get cleared in no more than 1s */ 5997 static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc) 5998 { 5999 uint32_t cnt = 1000; 6000 uint32_t pend_bits = 0; 6001 6002 do { 6003 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 6004 6005 if (pend_bits == 0) { 6006 break; 6007 } 6008 6009 DELAY(1000); 6010 } while (cnt-- > 0); 6011 6012 if (cnt <= 0) { 6013 PMD_DRV_LOG(NOTICE, sc, "Still pending IGU requests bits=0x%08x!", 6014 pend_bits); 6015 return -1; 6016 } 6017 6018 return 0; 6019 } 6020 6021 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 6022 6023 static void bnx2x_clp_reset_prep(struct bnx2x_softc *sc, uint32_t * magic_val) 6024 { 6025 /* Do some magic... */ 6026 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 6027 *magic_val = val & SHARED_MF_CLP_MAGIC; 6028 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 6029 } 6030 6031 /* restore the value of the 'magic' bit */ 6032 static void bnx2x_clp_reset_done(struct bnx2x_softc *sc, uint32_t magic_val) 6033 { 6034 /* Restore the 'magic' bit value... */ 6035 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 6036 MFCFG_WR(sc, shared_mf_config.clp_mb, 6037 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 6038 } 6039 6040 /* prepare for MCP reset, takes care of CLP configurations */ 6041 static void bnx2x_reset_mcp_prep(struct bnx2x_softc *sc, uint32_t * magic_val) 6042 { 6043 uint32_t shmem; 6044 uint32_t validity_offset; 6045 6046 /* set `magic' bit in order to save MF config */ 6047 bnx2x_clp_reset_prep(sc, magic_val); 6048 6049 /* get shmem offset */ 6050 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 6051 validity_offset = 6052 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 6053 6054 /* Clear validity map flags */ 6055 if (shmem > 0) { 6056 REG_WR(sc, shmem + validity_offset, 0); 6057 } 6058 } 6059 6060 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 6061 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 6062 6063 static void bnx2x_mcp_wait_one(struct bnx2x_softc *sc) 6064 { 6065 /* special handling for emulation and FPGA (10 times longer) */ 6066 if (CHIP_REV_IS_SLOW(sc)) { 6067 DELAY((MCP_ONE_TIMEOUT * 10) * 1000); 6068 } else { 6069 DELAY((MCP_ONE_TIMEOUT) * 1000); 6070 } 6071 } 6072 6073 /* initialize shmem_base and waits for validity signature to appear */ 6074 static int bnx2x_init_shmem(struct bnx2x_softc *sc) 6075 { 6076 int cnt = 0; 6077 uint32_t val = 0; 6078 6079 do { 6080 sc->devinfo.shmem_base = 6081 sc->link_params.shmem_base = 6082 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 6083 6084 if (sc->devinfo.shmem_base) { 6085 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 6086 if (val & SHR_MEM_VALIDITY_MB) 6087 return 0; 6088 } 6089 6090 bnx2x_mcp_wait_one(sc); 6091 6092 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 6093 6094 PMD_DRV_LOG(NOTICE, sc, "BAD MCP validity signature"); 6095 6096 return -1; 6097 } 6098 6099 static int bnx2x_reset_mcp_comp(struct bnx2x_softc *sc, uint32_t magic_val) 6100 { 6101 int rc = bnx2x_init_shmem(sc); 6102 6103 /* Restore the `magic' bit value */ 6104 bnx2x_clp_reset_done(sc, magic_val); 6105 6106 return rc; 6107 } 6108 6109 static void bnx2x_pxp_prep(struct bnx2x_softc *sc) 6110 { 6111 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 6112 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 6113 wmb(); 6114 } 6115 6116 /* 6117 * Reset the whole chip except for: 6118 * - PCIE core 6119 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 6120 * - IGU 6121 * - MISC (including AEU) 6122 * - GRC 6123 * - RBCN, RBCP 6124 */ 6125 static void bnx2x_process_kill_chip_reset(struct bnx2x_softc *sc, uint8_t global) 6126 { 6127 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 6128 uint32_t global_bits2, stay_reset2; 6129 6130 /* 6131 * Bits that have to be set in reset_mask2 if we want to reset 'global' 6132 * (per chip) blocks. 6133 */ 6134 global_bits2 = 6135 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 6136 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 6137 6138 /* 6139 * Don't reset the following blocks. 6140 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 6141 * reset, as in 4 port device they might still be owned 6142 * by the MCP (there is only one leader per path). 6143 */ 6144 not_reset_mask1 = 6145 MISC_REGISTERS_RESET_REG_1_RST_HC | 6146 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 6147 MISC_REGISTERS_RESET_REG_1_RST_PXP; 6148 6149 not_reset_mask2 = 6150 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 6151 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 6152 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 6153 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 6154 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 6155 MISC_REGISTERS_RESET_REG_2_RST_GRC | 6156 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 6157 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 6158 MISC_REGISTERS_RESET_REG_2_RST_ATC | 6159 MISC_REGISTERS_RESET_REG_2_PGLC | 6160 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 6161 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 6162 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 6163 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 6164 MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; 6165 6166 /* 6167 * Keep the following blocks in reset: 6168 * - all xxMACs are handled by the elink code. 6169 */ 6170 stay_reset2 = 6171 MISC_REGISTERS_RESET_REG_2_XMAC | 6172 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 6173 6174 /* Full reset masks according to the chip */ 6175 reset_mask1 = 0xffffffff; 6176 6177 if (CHIP_IS_E1H(sc)) 6178 reset_mask2 = 0x1ffff; 6179 else if (CHIP_IS_E2(sc)) 6180 reset_mask2 = 0xfffff; 6181 else /* CHIP_IS_E3 */ 6182 reset_mask2 = 0x3ffffff; 6183 6184 /* Don't reset global blocks unless we need to */ 6185 if (!global) 6186 reset_mask2 &= ~global_bits2; 6187 6188 /* 6189 * In case of attention in the QM, we need to reset PXP 6190 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 6191 * because otherwise QM reset would release 'close the gates' shortly 6192 * before resetting the PXP, then the PSWRQ would send a write 6193 * request to PGLUE. Then when PXP is reset, PGLUE would try to 6194 * read the payload data from PSWWR, but PSWWR would not 6195 * respond. The write queue in PGLUE would stuck, dmae commands 6196 * would not return. Therefore it's important to reset the second 6197 * reset register (containing the 6198 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 6199 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 6200 * bit). 6201 */ 6202 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6203 reset_mask2 & (~not_reset_mask2)); 6204 6205 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6206 reset_mask1 & (~not_reset_mask1)); 6207 6208 mb(); 6209 wmb(); 6210 6211 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 6212 reset_mask2 & (~stay_reset2)); 6213 6214 mb(); 6215 wmb(); 6216 6217 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 6218 wmb(); 6219 } 6220 6221 static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global) 6222 { 6223 int cnt = 1000; 6224 uint32_t val = 0; 6225 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 6226 uint32_t tags_63_32 = 0; 6227 6228 /* Empty the Tetris buffer, wait for 1s */ 6229 do { 6230 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 6231 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 6232 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 6233 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 6234 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 6235 if (CHIP_IS_E3(sc)) { 6236 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 6237 } 6238 6239 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 6240 ((port_is_idle_0 & 0x1) == 0x1) && 6241 ((port_is_idle_1 & 0x1) == 0x1) && 6242 (pgl_exp_rom2 == 0xffffffff) && 6243 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 6244 break; 6245 DELAY(1000); 6246 } while (cnt-- > 0); 6247 6248 if (cnt <= 0) { 6249 PMD_DRV_LOG(NOTICE, sc, 6250 "ERROR: Tetris buffer didn't get empty or there " 6251 "are still outstanding read requests after 1s! " 6252 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 6253 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x", 6254 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 6255 pgl_exp_rom2); 6256 return -1; 6257 } 6258 6259 mb(); 6260 6261 /* Close gates #2, #3 and #4 */ 6262 bnx2x_set_234_gates(sc, TRUE); 6263 6264 /* Poll for IGU VQs for 57712 and newer chips */ 6265 if (!CHIP_IS_E1x(sc) && bnx2x_er_poll_igu_vq(sc)) { 6266 return -1; 6267 } 6268 6269 /* clear "unprepared" bit */ 6270 REG_WR(sc, MISC_REG_UNPREPARED, 0); 6271 mb(); 6272 6273 /* Make sure all is written to the chip before the reset */ 6274 wmb(); 6275 6276 /* 6277 * Wait for 1ms to empty GLUE and PCI-E core queues, 6278 * PSWHST, GRC and PSWRD Tetris buffer. 6279 */ 6280 DELAY(1000); 6281 6282 /* Prepare to chip reset: */ 6283 /* MCP */ 6284 if (global) { 6285 bnx2x_reset_mcp_prep(sc, &val); 6286 } 6287 6288 /* PXP */ 6289 bnx2x_pxp_prep(sc); 6290 mb(); 6291 6292 /* reset the chip */ 6293 bnx2x_process_kill_chip_reset(sc, global); 6294 mb(); 6295 6296 /* Recover after reset: */ 6297 /* MCP */ 6298 if (global && bnx2x_reset_mcp_comp(sc, val)) { 6299 return -1; 6300 } 6301 6302 /* Open the gates #2, #3 and #4 */ 6303 bnx2x_set_234_gates(sc, FALSE); 6304 6305 return 0; 6306 } 6307 6308 static int bnx2x_leader_reset(struct bnx2x_softc *sc) 6309 { 6310 int rc = 0; 6311 uint8_t global = bnx2x_reset_is_global(sc); 6312 uint32_t load_code; 6313 6314 /* 6315 * If not going to reset MCP, load "fake" driver to reset HW while 6316 * driver is owner of the HW. 6317 */ 6318 if (!global && !BNX2X_NOMCP(sc)) { 6319 load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 6320 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 6321 if (!load_code) { 6322 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 6323 rc = -1; 6324 goto exit_leader_reset; 6325 } 6326 6327 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 6328 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 6329 PMD_DRV_LOG(NOTICE, sc, 6330 "MCP unexpected response, aborting"); 6331 rc = -1; 6332 goto exit_leader_reset2; 6333 } 6334 6335 load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 6336 if (!load_code) { 6337 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 6338 rc = -1; 6339 goto exit_leader_reset2; 6340 } 6341 } 6342 6343 /* try to recover after the failure */ 6344 if (bnx2x_process_kill(sc, global)) { 6345 PMD_DRV_LOG(NOTICE, sc, "Something bad occurred on engine %d!", 6346 SC_PATH(sc)); 6347 rc = -1; 6348 goto exit_leader_reset2; 6349 } 6350 6351 /* 6352 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 6353 * state. 6354 */ 6355 bnx2x_set_reset_done(sc); 6356 if (global) { 6357 bnx2x_clear_reset_global(sc); 6358 } 6359 6360 exit_leader_reset2: 6361 6362 /* unload "fake driver" if it was loaded */ 6363 if (!global &&!BNX2X_NOMCP(sc)) { 6364 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 6365 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 6366 } 6367 6368 exit_leader_reset: 6369 6370 sc->is_leader = 0; 6371 bnx2x_release_leader_lock(sc); 6372 6373 mb(); 6374 return rc; 6375 } 6376 6377 /* 6378 * prepare INIT transition, parameters configured: 6379 * - HC configuration 6380 * - Queue's CDU context 6381 */ 6382 static void 6383 bnx2x_pf_q_prep_init(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6384 struct ecore_queue_init_params *init_params) 6385 { 6386 uint8_t cos; 6387 int cxt_index, cxt_offset; 6388 6389 rte_bit_relaxed_set32(ECORE_Q_FLG_HC, &init_params->rx.flags); 6390 rte_bit_relaxed_set32(ECORE_Q_FLG_HC, &init_params->tx.flags); 6391 6392 rte_bit_relaxed_set32(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 6393 rte_bit_relaxed_set32(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 6394 6395 /* HC rate */ 6396 init_params->rx.hc_rate = 6397 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 6398 init_params->tx.hc_rate = 6399 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 6400 6401 /* FW SB ID */ 6402 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 6403 6404 /* CQ index among the SB indices */ 6405 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 6406 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 6407 6408 /* set maximum number of COSs supported by this queue */ 6409 init_params->max_cos = sc->max_cos; 6410 6411 /* set the context pointers queue object */ 6412 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 6413 cxt_index = fp->index / ILT_PAGE_CIDS; 6414 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 6415 init_params->cxts[cos] = 6416 &sc->context[cxt_index].vcxt[cxt_offset].eth; 6417 } 6418 } 6419 6420 /* set flags that are common for the Tx-only and not normal connections */ 6421 static unsigned long 6422 bnx2x_get_common_flags(struct bnx2x_softc *sc, uint8_t zero_stats) 6423 { 6424 uint32_t flags = 0; 6425 6426 /* PF driver will always initialize the Queue to an ACTIVE state */ 6427 rte_bit_relaxed_set32(ECORE_Q_FLG_ACTIVE, &flags); 6428 6429 /* 6430 * tx only connections collect statistics (on the same index as the 6431 * parent connection). The statistics are zeroed when the parent 6432 * connection is initialized. 6433 */ 6434 6435 rte_bit_relaxed_set32(ECORE_Q_FLG_STATS, &flags); 6436 if (zero_stats) { 6437 rte_bit_relaxed_set32(ECORE_Q_FLG_ZERO_STATS, &flags); 6438 } 6439 6440 /* 6441 * tx only connections can support tx-switching, though their 6442 * CoS-ness doesn't survive the loopback 6443 */ 6444 if (sc->flags & BNX2X_TX_SWITCHING) { 6445 rte_bit_relaxed_set32(ECORE_Q_FLG_TX_SWITCH, &flags); 6446 } 6447 6448 rte_bit_relaxed_set32(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 6449 6450 return flags; 6451 } 6452 6453 static unsigned long bnx2x_get_q_flags(struct bnx2x_softc *sc, uint8_t leading) 6454 { 6455 uint32_t flags = 0; 6456 6457 if (IS_MF_SD(sc)) { 6458 rte_bit_relaxed_set32(ECORE_Q_FLG_OV, &flags); 6459 } 6460 6461 if (leading) { 6462 rte_bit_relaxed_set32(ECORE_Q_FLG_LEADING_RSS, &flags); 6463 rte_bit_relaxed_set32(ECORE_Q_FLG_MCAST, &flags); 6464 } 6465 6466 rte_bit_relaxed_set32(ECORE_Q_FLG_VLAN, &flags); 6467 6468 /* merge with common flags */ 6469 return flags | bnx2x_get_common_flags(sc, TRUE); 6470 } 6471 6472 static void 6473 bnx2x_pf_q_prep_general(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6474 struct ecore_general_setup_params *gen_init, uint8_t cos) 6475 { 6476 gen_init->stat_id = bnx2x_stats_id(fp); 6477 gen_init->spcl_id = fp->cl_id; 6478 gen_init->mtu = sc->mtu; 6479 gen_init->cos = cos; 6480 } 6481 6482 static void 6483 bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6484 struct rxq_pause_params *pause, 6485 struct ecore_rxq_setup_params *rxq_init) 6486 { 6487 struct bnx2x_rx_queue *rxq; 6488 6489 rxq = sc->rx_queues[fp->index]; 6490 if (!rxq) { 6491 PMD_RX_LOG(ERR, "RX queue is NULL"); 6492 return; 6493 } 6494 /* pause */ 6495 pause->bd_th_lo = BD_TH_LO(sc); 6496 pause->bd_th_hi = BD_TH_HI(sc); 6497 6498 pause->rcq_th_lo = RCQ_TH_LO(sc); 6499 pause->rcq_th_hi = RCQ_TH_HI(sc); 6500 6501 /* validate rings have enough entries to cross high thresholds */ 6502 if (sc->dropless_fc && 6503 pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { 6504 PMD_DRV_LOG(WARNING, sc, "rx bd ring threshold limit"); 6505 } 6506 6507 if (sc->dropless_fc && 6508 pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) { 6509 PMD_DRV_LOG(WARNING, sc, "rcq ring threshold limit"); 6510 } 6511 6512 pause->pri_map = 1; 6513 6514 /* rxq setup */ 6515 rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr; 6516 rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr; 6517 rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr + 6518 BNX2X_PAGE_SIZE); 6519 6520 /* 6521 * This should be a maximum number of data bytes that may be 6522 * placed on the BD (not including paddings). 6523 */ 6524 rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); 6525 6526 rxq_init->cl_qzone_id = fp->cl_qzone_id; 6527 rxq_init->rss_engine_id = SC_FUNC(sc); 6528 rxq_init->mcast_engine_id = SC_FUNC(sc); 6529 6530 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 6531 rxq_init->fw_sb_id = fp->fw_sb_id; 6532 6533 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 6534 6535 /* 6536 * configure silent vlan removal 6537 * if multi function mode is afex, then mask default vlan 6538 */ 6539 if (IS_MF_AFEX(sc)) { 6540 rxq_init->silent_removal_value = 6541 sc->devinfo.mf_info.afex_def_vlan_tag; 6542 rxq_init->silent_removal_mask = EVL_VLID_MASK; 6543 } 6544 } 6545 6546 static void 6547 bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6548 struct ecore_txq_setup_params *txq_init, uint8_t cos) 6549 { 6550 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 6551 6552 if (!txq) { 6553 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 6554 return; 6555 } 6556 txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr; 6557 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 6558 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 6559 txq_init->fw_sb_id = fp->fw_sb_id; 6560 6561 /* 6562 * set the TSS leading client id for Tx classification to the 6563 * leading RSS client id 6564 */ 6565 txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id); 6566 } 6567 6568 /* 6569 * This function performs 2 steps in a queue state machine: 6570 * 1) RESET->INIT 6571 * 2) INIT->SETUP 6572 */ 6573 static int 6574 bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t leading) 6575 { 6576 struct ecore_queue_state_params q_params = { NULL }; 6577 struct ecore_queue_setup_params *setup_params = &q_params.params.setup; 6578 int rc; 6579 6580 PMD_DRV_LOG(DEBUG, sc, "setting up queue %d", fp->index); 6581 6582 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 6583 6584 q_params.q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; 6585 6586 /* we want to wait for completion in this context */ 6587 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 6588 6589 /* prepare the INIT parameters */ 6590 bnx2x_pf_q_prep_init(sc, fp, &q_params.params.init); 6591 6592 /* Set the command */ 6593 q_params.cmd = ECORE_Q_CMD_INIT; 6594 6595 /* Change the state to INIT */ 6596 rc = ecore_queue_state_change(sc, &q_params); 6597 if (rc) { 6598 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) INIT failed", fp->index); 6599 return rc; 6600 } 6601 6602 PMD_DRV_LOG(DEBUG, sc, "init complete"); 6603 6604 /* now move the Queue to the SETUP state */ 6605 memset(setup_params, 0, sizeof(*setup_params)); 6606 6607 /* set Queue flags */ 6608 setup_params->flags = bnx2x_get_q_flags(sc, leading); 6609 6610 /* set general SETUP parameters */ 6611 bnx2x_pf_q_prep_general(sc, fp, &setup_params->gen_params, 6612 FIRST_TX_COS_INDEX); 6613 6614 bnx2x_pf_rx_q_prep(sc, fp, 6615 &setup_params->pause_params, 6616 &setup_params->rxq_params); 6617 6618 bnx2x_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); 6619 6620 /* Set the command */ 6621 q_params.cmd = ECORE_Q_CMD_SETUP; 6622 6623 /* change the state to SETUP */ 6624 rc = ecore_queue_state_change(sc, &q_params); 6625 if (rc) { 6626 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) SETUP failed", fp->index); 6627 return rc; 6628 } 6629 6630 return rc; 6631 } 6632 6633 static int bnx2x_setup_leading(struct bnx2x_softc *sc) 6634 { 6635 if (IS_PF(sc)) 6636 return bnx2x_setup_queue(sc, &sc->fp[0], TRUE); 6637 else /* VF */ 6638 return bnx2x_vf_setup_queue(sc, &sc->fp[0], TRUE); 6639 } 6640 6641 static int 6642 bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj, 6643 uint8_t config_hash) 6644 { 6645 struct ecore_config_rss_params params = { NULL }; 6646 uint32_t i; 6647 6648 /* 6649 * Although RSS is meaningless when there is a single HW queue we 6650 * still need it enabled in order to have HW Rx hash generated. 6651 */ 6652 6653 params.rss_obj = rss_obj; 6654 6655 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 6656 6657 rte_bit_relaxed_set32(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 6658 6659 /* RSS configuration */ 6660 rte_bit_relaxed_set32(ECORE_RSS_IPV4, ¶ms.rss_flags); 6661 rte_bit_relaxed_set32(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 6662 rte_bit_relaxed_set32(ECORE_RSS_IPV6, ¶ms.rss_flags); 6663 rte_bit_relaxed_set32(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 6664 if (rss_obj->udp_rss_v4) { 6665 rte_bit_relaxed_set32(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 6666 } 6667 if (rss_obj->udp_rss_v6) { 6668 rte_bit_relaxed_set32(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 6669 } 6670 6671 /* Hash bits */ 6672 params.rss_result_mask = MULTI_MASK; 6673 6674 rte_memcpy(params.ind_table, rss_obj->ind_table, 6675 sizeof(params.ind_table)); 6676 6677 if (config_hash) { 6678 /* RSS keys */ 6679 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 6680 params.rss_key[i] = (uint32_t) rte_rand(); 6681 } 6682 6683 rte_bit_relaxed_set32(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 6684 } 6685 6686 if (IS_PF(sc)) 6687 return ecore_config_rss(sc, ¶ms); 6688 else 6689 return bnx2x_vf_config_rss(sc, ¶ms); 6690 } 6691 6692 static int bnx2x_config_rss_eth(struct bnx2x_softc *sc, uint8_t config_hash) 6693 { 6694 return bnx2x_config_rss_pf(sc, &sc->rss_conf_obj, config_hash); 6695 } 6696 6697 static int bnx2x_init_rss_pf(struct bnx2x_softc *sc) 6698 { 6699 uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(sc); 6700 uint32_t i; 6701 6702 /* 6703 * Prepare the initial contents of the indirection table if 6704 * RSS is enabled 6705 */ 6706 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 6707 sc->rss_conf_obj.ind_table[i] = 6708 (sc->fp->cl_id + (i % num_eth_queues)); 6709 } 6710 6711 if (sc->udp_rss) { 6712 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 6713 } 6714 6715 /* 6716 * For 57711 SEARCHER configuration (rss_keys) is 6717 * per-port, so if explicit configuration is needed, do it only 6718 * for a PMF. 6719 * 6720 * For 57712 and newer it's a per-function configuration. 6721 */ 6722 return bnx2x_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)); 6723 } 6724 6725 static int 6726 bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac, 6727 struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, 6728 uint32_t *ramrod_flags) 6729 { 6730 struct ecore_vlan_mac_ramrod_params ramrod_param; 6731 int rc; 6732 6733 memset(&ramrod_param, 0, sizeof(ramrod_param)); 6734 6735 /* fill in general parameters */ 6736 ramrod_param.vlan_mac_obj = obj; 6737 ramrod_param.ramrod_flags = *ramrod_flags; 6738 6739 /* fill a user request section if needed */ 6740 if (!rte_bit_relaxed_get32(RAMROD_CONT, ramrod_flags)) { 6741 rte_memcpy(ramrod_param.user_req.u.mac.mac, mac, 6742 ETH_ALEN); 6743 6744 rte_bit_relaxed_set32(mac_type, 6745 &ramrod_param.user_req.vlan_mac_flags); 6746 6747 /* Set the command: ADD or DEL */ 6748 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 6749 ECORE_VLAN_MAC_DEL; 6750 } 6751 6752 rc = ecore_config_vlan_mac(sc, &ramrod_param); 6753 6754 if (rc == ECORE_EXISTS) { 6755 PMD_DRV_LOG(INFO, sc, "Failed to schedule ADD operations (EEXIST)"); 6756 /* do not treat adding same MAC as error */ 6757 rc = 0; 6758 } else if (rc < 0) { 6759 PMD_DRV_LOG(ERR, sc, 6760 "%s MAC failed (%d)", (set ? "Set" : "Delete"), rc); 6761 } 6762 6763 return rc; 6764 } 6765 6766 static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set) 6767 { 6768 uint32_t ramrod_flags = 0; 6769 6770 PMD_DRV_LOG(DEBUG, sc, "Adding Ethernet MAC"); 6771 6772 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 6773 6774 /* Eth MAC is set on RSS leading client (fp[0]) */ 6775 return bnx2x_set_mac_one(sc, sc->link_params.mac_addr, 6776 &sc->sp_objs->mac_obj, 6777 set, ECORE_ETH_MAC, &ramrod_flags); 6778 } 6779 6780 static int bnx2x_get_cur_phy_idx(struct bnx2x_softc *sc) 6781 { 6782 uint32_t sel_phy_idx = 0; 6783 6784 if (sc->link_params.num_phys <= 1) { 6785 return ELINK_INT_PHY; 6786 } 6787 6788 if (sc->link_vars.link_up) { 6789 sel_phy_idx = ELINK_EXT_PHY1; 6790 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 6791 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 6792 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 6793 ELINK_SUPPORTED_FIBRE)) 6794 sel_phy_idx = ELINK_EXT_PHY2; 6795 } else { 6796 switch (elink_phy_selection(&sc->link_params)) { 6797 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 6798 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 6799 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 6800 sel_phy_idx = ELINK_EXT_PHY1; 6801 break; 6802 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 6803 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 6804 sel_phy_idx = ELINK_EXT_PHY2; 6805 break; 6806 } 6807 } 6808 6809 return sel_phy_idx; 6810 } 6811 6812 static int bnx2x_get_link_cfg_idx(struct bnx2x_softc *sc) 6813 { 6814 uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(sc); 6815 6816 /* 6817 * The selected activated PHY is always after swapping (in case PHY 6818 * swapping is enabled). So when swapping is enabled, we need to reverse 6819 * the configuration 6820 */ 6821 6822 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 6823 if (sel_phy_idx == ELINK_EXT_PHY1) 6824 sel_phy_idx = ELINK_EXT_PHY2; 6825 else if (sel_phy_idx == ELINK_EXT_PHY2) 6826 sel_phy_idx = ELINK_EXT_PHY1; 6827 } 6828 6829 return ELINK_LINK_CONFIG_IDX(sel_phy_idx); 6830 } 6831 6832 static void bnx2x_set_requested_fc(struct bnx2x_softc *sc) 6833 { 6834 /* 6835 * Initialize link parameters structure variables 6836 * It is recommended to turn off RX FC for jumbo frames 6837 * for better performance 6838 */ 6839 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 6840 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 6841 } else { 6842 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 6843 } 6844 } 6845 6846 static void bnx2x_calc_fc_adv(struct bnx2x_softc *sc) 6847 { 6848 uint8_t cfg_idx = bnx2x_get_link_cfg_idx(sc); 6849 switch (sc->link_vars.ieee_fc & 6850 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 6851 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 6852 default: 6853 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 6854 ADVERTISED_Pause); 6855 break; 6856 6857 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 6858 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 6859 ADVERTISED_Pause); 6860 break; 6861 6862 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 6863 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 6864 break; 6865 } 6866 } 6867 6868 static uint16_t bnx2x_get_mf_speed(struct bnx2x_softc *sc) 6869 { 6870 uint16_t line_speed = sc->link_vars.line_speed; 6871 if (IS_MF(sc)) { 6872 uint16_t maxCfg = bnx2x_extract_max_cfg(sc, 6873 sc->devinfo. 6874 mf_info.mf_config[SC_VN 6875 (sc)]); 6876 6877 /* calculate the current MAX line speed limit for the MF devices */ 6878 if (IS_MF_SI(sc)) { 6879 line_speed = (line_speed * maxCfg) / 100; 6880 } else { /* SD mode */ 6881 uint16_t vn_max_rate = maxCfg * 100; 6882 6883 if (vn_max_rate < line_speed) { 6884 line_speed = vn_max_rate; 6885 } 6886 } 6887 } 6888 6889 return line_speed; 6890 } 6891 6892 static void 6893 bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *data) 6894 { 6895 uint16_t line_speed = bnx2x_get_mf_speed(sc); 6896 6897 memset(data, 0, sizeof(*data)); 6898 6899 /* fill the report data with the effective line speed */ 6900 data->line_speed = line_speed; 6901 6902 /* Link is down */ 6903 if (!sc->link_vars.link_up || (sc->flags & BNX2X_MF_FUNC_DIS)) { 6904 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_LINK_DOWN, 6905 &data->link_report_flags); 6906 } 6907 6908 /* Full DUPLEX */ 6909 if (sc->link_vars.duplex == DUPLEX_FULL) { 6910 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_FULL_DUPLEX, 6911 &data->link_report_flags); 6912 } 6913 6914 /* Rx Flow Control is ON */ 6915 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 6916 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_RX_FC_ON, 6917 &data->link_report_flags); 6918 } 6919 6920 /* Tx Flow Control is ON */ 6921 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 6922 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_TX_FC_ON, 6923 &data->link_report_flags); 6924 } 6925 } 6926 6927 /* report link status to OS, should be called under phy_lock */ 6928 static void bnx2x_link_report_locked(struct bnx2x_softc *sc) 6929 { 6930 struct bnx2x_link_report_data cur_data; 6931 6932 /* reread mf_cfg */ 6933 if (IS_PF(sc)) { 6934 bnx2x_read_mf_cfg(sc); 6935 } 6936 6937 /* Read the current link report info */ 6938 bnx2x_fill_report_data(sc, &cur_data); 6939 6940 /* Don't report link down or exactly the same link status twice */ 6941 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 6942 (rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN, 6943 &sc->last_reported_link.link_report_flags) && 6944 rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN, 6945 &cur_data.link_report_flags))) { 6946 return; 6947 } 6948 6949 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x", 6950 cur_data.link_report_flags, 6951 sc->last_reported_link.link_report_flags); 6952 6953 sc->link_cnt++; 6954 6955 ELINK_DEBUG_P1(sc, "link status change count = %x", sc->link_cnt); 6956 /* report new link params and remember the state for the next time */ 6957 rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 6958 6959 if (rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN, 6960 &cur_data.link_report_flags)) { 6961 ELINK_DEBUG_P0(sc, "NIC Link is Down"); 6962 } else { 6963 __rte_unused const char *duplex; 6964 __rte_unused const char *flow; 6965 6966 if (rte_bit_relaxed_test_and_clear32 6967 (BNX2X_LINK_REPORT_FULL_DUPLEX, 6968 &cur_data.link_report_flags)) { 6969 duplex = "full"; 6970 ELINK_DEBUG_P0(sc, "link set to full duplex"); 6971 } else { 6972 duplex = "half"; 6973 ELINK_DEBUG_P0(sc, "link set to half duplex"); 6974 } 6975 6976 /* 6977 * Handle the FC at the end so that only these flags would be 6978 * possibly set. This way we may easily check if there is no FC 6979 * enabled. 6980 */ 6981 if (cur_data.link_report_flags) { 6982 if (rte_bit_relaxed_get32 6983 (BNX2X_LINK_REPORT_RX_FC_ON, 6984 &cur_data.link_report_flags) && 6985 rte_bit_relaxed_get32(BNX2X_LINK_REPORT_TX_FC_ON, 6986 &cur_data.link_report_flags)) { 6987 flow = "ON - receive & transmit"; 6988 } else if (rte_bit_relaxed_get32 6989 (BNX2X_LINK_REPORT_RX_FC_ON, 6990 &cur_data.link_report_flags) && 6991 !rte_bit_relaxed_get32 6992 (BNX2X_LINK_REPORT_TX_FC_ON, 6993 &cur_data.link_report_flags)) { 6994 flow = "ON - receive"; 6995 } else if (!rte_bit_relaxed_get32 6996 (BNX2X_LINK_REPORT_RX_FC_ON, 6997 &cur_data.link_report_flags) && 6998 rte_bit_relaxed_get32 6999 (BNX2X_LINK_REPORT_TX_FC_ON, 7000 &cur_data.link_report_flags)) { 7001 flow = "ON - transmit"; 7002 } else { 7003 flow = "none"; /* possible? */ 7004 } 7005 } else { 7006 flow = "none"; 7007 } 7008 7009 PMD_DRV_LOG(INFO, sc, 7010 "NIC Link is Up, %d Mbps %s duplex, Flow control: %s", 7011 cur_data.line_speed, duplex, flow); 7012 } 7013 } 7014 7015 static void 7016 bnx2x_link_report(struct bnx2x_softc *sc) 7017 { 7018 bnx2x_acquire_phy_lock(sc); 7019 bnx2x_link_report_locked(sc); 7020 bnx2x_release_phy_lock(sc); 7021 } 7022 7023 void bnx2x_link_status_update(struct bnx2x_softc *sc) 7024 { 7025 if (sc->state != BNX2X_STATE_OPEN) { 7026 return; 7027 } 7028 7029 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 7030 elink_link_status_update(&sc->link_params, &sc->link_vars); 7031 } else { 7032 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 7033 ELINK_SUPPORTED_10baseT_Full | 7034 ELINK_SUPPORTED_100baseT_Half | 7035 ELINK_SUPPORTED_100baseT_Full | 7036 ELINK_SUPPORTED_1000baseT_Full | 7037 ELINK_SUPPORTED_2500baseX_Full | 7038 ELINK_SUPPORTED_10000baseT_Full | 7039 ELINK_SUPPORTED_TP | 7040 ELINK_SUPPORTED_FIBRE | 7041 ELINK_SUPPORTED_Autoneg | 7042 ELINK_SUPPORTED_Pause | 7043 ELINK_SUPPORTED_Asym_Pause); 7044 sc->port.advertising[0] = sc->port.supported[0]; 7045 7046 sc->link_params.sc = sc; 7047 sc->link_params.port = SC_PORT(sc); 7048 sc->link_params.req_duplex[0] = DUPLEX_FULL; 7049 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 7050 sc->link_params.req_line_speed[0] = SPEED_10000; 7051 sc->link_params.speed_cap_mask[0] = 0x7f0000; 7052 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 7053 7054 if (CHIP_REV_IS_FPGA(sc)) { 7055 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 7056 sc->link_vars.line_speed = ELINK_SPEED_1000; 7057 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 7058 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 7059 } else { 7060 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 7061 sc->link_vars.line_speed = ELINK_SPEED_10000; 7062 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 7063 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 7064 } 7065 7066 sc->link_vars.link_up = 1; 7067 7068 sc->link_vars.duplex = DUPLEX_FULL; 7069 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 7070 7071 if (IS_PF(sc)) { 7072 REG_WR(sc, 7073 NIG_REG_EGRESS_DRAIN0_MODE + 7074 sc->link_params.port * 4, 0); 7075 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7076 bnx2x_link_report(sc); 7077 } 7078 } 7079 7080 if (IS_PF(sc)) { 7081 if (sc->link_vars.link_up) { 7082 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7083 } else { 7084 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 7085 } 7086 bnx2x_link_report(sc); 7087 } else { 7088 bnx2x_link_report_locked(sc); 7089 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7090 } 7091 } 7092 7093 static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode) 7094 { 7095 int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc); 7096 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 7097 struct elink_params *lp = &sc->link_params; 7098 7099 bnx2x_set_requested_fc(sc); 7100 7101 bnx2x_acquire_phy_lock(sc); 7102 7103 if (load_mode == LOAD_DIAG) { 7104 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 7105 /* Prefer doing PHY loopback at 10G speed, if possible */ 7106 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 7107 if (lp->speed_cap_mask[cfg_idx] & 7108 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 7109 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 7110 } else { 7111 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 7112 } 7113 } 7114 } 7115 7116 if (load_mode == LOAD_LOOPBACK_EXT) { 7117 lp->loopback_mode = ELINK_LOOPBACK_EXT; 7118 } 7119 7120 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 7121 7122 bnx2x_release_phy_lock(sc); 7123 7124 bnx2x_calc_fc_adv(sc); 7125 7126 if (sc->link_vars.link_up) { 7127 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7128 bnx2x_link_report(sc); 7129 } 7130 7131 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 7132 return rc; 7133 } 7134 7135 /* update flags in shmem */ 7136 static void 7137 bnx2x_update_drv_flags(struct bnx2x_softc *sc, uint32_t flags, uint32_t set) 7138 { 7139 uint32_t drv_flags; 7140 7141 if (SHMEM2_HAS(sc, drv_flags)) { 7142 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 7143 drv_flags = SHMEM2_RD(sc, drv_flags); 7144 7145 if (set) { 7146 drv_flags |= flags; 7147 } else { 7148 drv_flags &= ~flags; 7149 } 7150 7151 SHMEM2_WR(sc, drv_flags, drv_flags); 7152 7153 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 7154 } 7155 } 7156 7157 /* periodic timer callout routine, only runs when the interface is up */ 7158 void bnx2x_periodic_callout(struct bnx2x_softc *sc) 7159 { 7160 if ((sc->state != BNX2X_STATE_OPEN) || 7161 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 7162 PMD_DRV_LOG(DEBUG, sc, "periodic callout exit (state=0x%x)", 7163 sc->state); 7164 return; 7165 } 7166 if (!CHIP_REV_IS_SLOW(sc)) { 7167 /* 7168 * This barrier is needed to ensure the ordering between the writing 7169 * to the sc->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 7170 * the reading here. 7171 */ 7172 mb(); 7173 if (sc->port.pmf) { 7174 bnx2x_acquire_phy_lock(sc); 7175 elink_period_func(&sc->link_params, &sc->link_vars); 7176 bnx2x_release_phy_lock(sc); 7177 } 7178 } 7179 #ifdef BNX2X_PULSE 7180 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 7181 int mb_idx = SC_FW_MB_IDX(sc); 7182 uint32_t drv_pulse; 7183 uint32_t mcp_pulse; 7184 7185 ++sc->fw_drv_pulse_wr_seq; 7186 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 7187 7188 drv_pulse = sc->fw_drv_pulse_wr_seq; 7189 bnx2x_drv_pulse(sc); 7190 7191 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 7192 MCP_PULSE_SEQ_MASK); 7193 7194 /* 7195 * The delta between driver pulse and mcp response should 7196 * be 1 (before mcp response) or 0 (after mcp response). 7197 */ 7198 if ((drv_pulse != mcp_pulse) && 7199 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 7200 /* someone lost a heartbeat... */ 7201 PMD_DRV_LOG(ERR, sc, 7202 "drv_pulse (0x%x) != mcp_pulse (0x%x)", 7203 drv_pulse, mcp_pulse); 7204 } 7205 } 7206 #endif 7207 } 7208 7209 /* start the controller */ 7210 static __rte_noinline 7211 int bnx2x_nic_load(struct bnx2x_softc *sc) 7212 { 7213 uint32_t val; 7214 uint32_t load_code = 0; 7215 int i, rc = 0; 7216 7217 PMD_INIT_FUNC_TRACE(sc); 7218 7219 sc->state = BNX2X_STATE_OPENING_WAITING_LOAD; 7220 7221 if (IS_PF(sc)) { 7222 /* must be called before memory allocation and HW init */ 7223 bnx2x_ilt_set_info(sc); 7224 } 7225 7226 bnx2x_set_fp_rx_buf_size(sc); 7227 7228 if (IS_PF(sc)) { 7229 if (bnx2x_alloc_mem(sc) != 0) { 7230 sc->state = BNX2X_STATE_CLOSED; 7231 rc = -ENOMEM; 7232 goto bnx2x_nic_load_error0; 7233 } 7234 } 7235 7236 /* allocate the host hardware/software hsi structures */ 7237 if (bnx2x_alloc_hsi_mem(sc) != 0) { 7238 PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed"); 7239 sc->state = BNX2X_STATE_CLOSED; 7240 rc = -ENOMEM; 7241 goto bnx2x_nic_load_error0; 7242 } 7243 7244 if (bnx2x_alloc_fw_stats_mem(sc) != 0) { 7245 sc->state = BNX2X_STATE_CLOSED; 7246 rc = -ENOMEM; 7247 goto bnx2x_nic_load_error0; 7248 } 7249 7250 if (IS_VF(sc)) { 7251 rc = bnx2x_vf_init(sc); 7252 if (rc) { 7253 sc->state = BNX2X_STATE_ERROR; 7254 goto bnx2x_nic_load_error0; 7255 } 7256 } 7257 7258 if (IS_PF(sc)) { 7259 /* set pf load just before approaching the MCP */ 7260 bnx2x_set_pf_load(sc); 7261 7262 /* if MCP exists send load request and analyze response */ 7263 if (!BNX2X_NOMCP(sc)) { 7264 /* attempt to load pf */ 7265 if (bnx2x_nic_load_request(sc, &load_code) != 0) { 7266 sc->state = BNX2X_STATE_CLOSED; 7267 rc = -ENXIO; 7268 goto bnx2x_nic_load_error1; 7269 } 7270 7271 /* what did the MCP say? */ 7272 if (bnx2x_nic_load_analyze_req(sc, load_code) != 0) { 7273 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7274 sc->state = BNX2X_STATE_CLOSED; 7275 rc = -ENXIO; 7276 goto bnx2x_nic_load_error2; 7277 } 7278 } else { 7279 PMD_DRV_LOG(INFO, sc, "Device has no MCP!"); 7280 load_code = bnx2x_nic_load_no_mcp(sc); 7281 } 7282 7283 /* mark PMF if applicable */ 7284 bnx2x_nic_load_pmf(sc, load_code); 7285 7286 /* Init Function state controlling object */ 7287 bnx2x_init_func_obj(sc); 7288 7289 /* Initialize HW */ 7290 if (bnx2x_init_hw(sc, load_code) != 0) { 7291 PMD_DRV_LOG(NOTICE, sc, "HW init failed"); 7292 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7293 sc->state = BNX2X_STATE_CLOSED; 7294 rc = -ENXIO; 7295 goto bnx2x_nic_load_error2; 7296 } 7297 } 7298 7299 bnx2x_nic_init(sc, load_code); 7300 7301 /* Init per-function objects */ 7302 if (IS_PF(sc)) { 7303 bnx2x_init_objs(sc); 7304 7305 /* set AFEX default VLAN tag to an invalid value */ 7306 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 7307 7308 sc->state = BNX2X_STATE_OPENING_WAITING_PORT; 7309 rc = bnx2x_func_start(sc); 7310 if (rc) { 7311 PMD_DRV_LOG(NOTICE, sc, "Function start failed!"); 7312 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7313 sc->state = BNX2X_STATE_ERROR; 7314 goto bnx2x_nic_load_error3; 7315 } 7316 7317 /* send LOAD_DONE command to MCP */ 7318 if (!BNX2X_NOMCP(sc)) { 7319 load_code = 7320 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7321 if (!load_code) { 7322 PMD_DRV_LOG(NOTICE, sc, 7323 "MCP response failure, aborting"); 7324 sc->state = BNX2X_STATE_ERROR; 7325 rc = -ENXIO; 7326 goto bnx2x_nic_load_error3; 7327 } 7328 } 7329 } 7330 7331 rc = bnx2x_setup_leading(sc); 7332 if (rc) { 7333 PMD_DRV_LOG(NOTICE, sc, "Setup leading failed!"); 7334 sc->state = BNX2X_STATE_ERROR; 7335 goto bnx2x_nic_load_error3; 7336 } 7337 7338 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 7339 if (IS_PF(sc)) 7340 rc = bnx2x_setup_queue(sc, &sc->fp[i], FALSE); 7341 else /* IS_VF(sc) */ 7342 rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE); 7343 7344 if (rc) { 7345 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) setup failed", i); 7346 sc->state = BNX2X_STATE_ERROR; 7347 goto bnx2x_nic_load_error3; 7348 } 7349 } 7350 7351 rc = bnx2x_init_rss_pf(sc); 7352 if (rc) { 7353 PMD_DRV_LOG(NOTICE, sc, "PF RSS init failed"); 7354 sc->state = BNX2X_STATE_ERROR; 7355 goto bnx2x_nic_load_error3; 7356 } 7357 7358 /* now when Clients are configured we are ready to work */ 7359 sc->state = BNX2X_STATE_OPEN; 7360 7361 /* Configure a ucast MAC */ 7362 if (IS_PF(sc)) { 7363 rc = bnx2x_set_eth_mac(sc, TRUE); 7364 } else { /* IS_VF(sc) */ 7365 rc = bnx2x_vf_set_mac(sc, TRUE); 7366 } 7367 7368 if (rc) { 7369 PMD_DRV_LOG(NOTICE, sc, "Setting Ethernet MAC failed"); 7370 sc->state = BNX2X_STATE_ERROR; 7371 goto bnx2x_nic_load_error3; 7372 } 7373 7374 if (sc->port.pmf) { 7375 rc = bnx2x_initial_phy_init(sc, LOAD_OPEN); 7376 if (rc) { 7377 sc->state = BNX2X_STATE_ERROR; 7378 goto bnx2x_nic_load_error3; 7379 } 7380 } 7381 7382 sc->link_params.feature_config_flags &= 7383 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 7384 7385 /* start the Tx */ 7386 switch (LOAD_OPEN) { 7387 case LOAD_NORMAL: 7388 case LOAD_OPEN: 7389 break; 7390 7391 case LOAD_DIAG: 7392 case LOAD_LOOPBACK_EXT: 7393 sc->state = BNX2X_STATE_DIAG; 7394 break; 7395 7396 default: 7397 break; 7398 } 7399 7400 if (sc->port.pmf) { 7401 bnx2x_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 7402 } else { 7403 bnx2x_link_status_update(sc); 7404 } 7405 7406 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 7407 /* mark driver is loaded in shmem2 */ 7408 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 7409 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 7410 (val | 7411 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 7412 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 7413 } 7414 7415 /* start fast path */ 7416 /* Initialize Rx filter */ 7417 bnx2x_set_rx_mode(sc); 7418 7419 /* wait for all pending SP commands to complete */ 7420 if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0U)) { 7421 PMD_DRV_LOG(NOTICE, sc, "Timeout waiting for all SPs to complete!"); 7422 bnx2x_periodic_stop(sc); 7423 bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE); 7424 return -ENXIO; 7425 } 7426 7427 PMD_DRV_LOG(DEBUG, sc, "NIC successfully loaded"); 7428 7429 return 0; 7430 7431 bnx2x_nic_load_error3: 7432 7433 if (IS_PF(sc)) { 7434 bnx2x_int_disable_sync(sc, 1); 7435 7436 /* clean out queued objects */ 7437 bnx2x_squeeze_objects(sc); 7438 } 7439 7440 bnx2x_nic_load_error2: 7441 7442 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 7443 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 7444 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 7445 } 7446 7447 sc->port.pmf = 0; 7448 7449 bnx2x_nic_load_error1: 7450 7451 /* clear pf_load status, as it was already set */ 7452 if (IS_PF(sc)) { 7453 bnx2x_clear_pf_load(sc); 7454 } 7455 7456 bnx2x_nic_load_error0: 7457 7458 bnx2x_free_fw_stats_mem(sc); 7459 bnx2x_free_hsi_mem(sc); 7460 bnx2x_free_mem(sc); 7461 7462 return rc; 7463 } 7464 7465 /* 7466 * Handles controller initialization. 7467 */ 7468 int bnx2x_init(struct bnx2x_softc *sc) 7469 { 7470 int other_engine = SC_PATH(sc) ? 0 : 1; 7471 uint8_t other_load_status, load_status; 7472 uint8_t global = FALSE; 7473 int rc; 7474 7475 /* Check if the driver is still running and bail out if it is. */ 7476 if (sc->state != BNX2X_STATE_CLOSED) { 7477 PMD_DRV_LOG(DEBUG, sc, "Init called while driver is running!"); 7478 rc = 0; 7479 goto bnx2x_init_done; 7480 } 7481 7482 bnx2x_set_power_state(sc, PCI_PM_D0); 7483 7484 /* 7485 * If parity occurred during the unload, then attentions and/or 7486 * RECOVERY_IN_PROGRESS may still be set. If so we want the first function 7487 * loaded on the current engine to complete the recovery. Parity recovery 7488 * is only relevant for PF driver. 7489 */ 7490 if (IS_PF(sc)) { 7491 other_load_status = bnx2x_get_load_status(sc, other_engine); 7492 load_status = bnx2x_get_load_status(sc, SC_PATH(sc)); 7493 7494 if (!bnx2x_reset_is_done(sc, SC_PATH(sc)) || 7495 bnx2x_chk_parity_attn(sc, &global, TRUE)) { 7496 do { 7497 /* 7498 * If there are attentions and they are in global blocks, set 7499 * the GLOBAL_RESET bit regardless whether it will be this 7500 * function that will complete the recovery or not. 7501 */ 7502 if (global) { 7503 bnx2x_set_reset_global(sc); 7504 } 7505 7506 /* 7507 * Only the first function on the current engine should try 7508 * to recover in open. In case of attentions in global blocks 7509 * only the first in the chip should try to recover. 7510 */ 7511 if ((!load_status 7512 && (!global ||!other_load_status)) 7513 && bnx2x_trylock_leader_lock(sc) 7514 && !bnx2x_leader_reset(sc)) { 7515 PMD_DRV_LOG(INFO, sc, 7516 "Recovered during init"); 7517 break; 7518 } 7519 7520 /* recovery has failed... */ 7521 bnx2x_set_power_state(sc, PCI_PM_D3hot); 7522 7523 sc->recovery_state = BNX2X_RECOVERY_FAILED; 7524 7525 PMD_DRV_LOG(NOTICE, sc, 7526 "Recovery flow hasn't properly " 7527 "completed yet, try again later. " 7528 "If you still see this message after a " 7529 "few retries then power cycle is required."); 7530 7531 rc = -ENXIO; 7532 goto bnx2x_init_done; 7533 } while (0); 7534 } 7535 } 7536 7537 sc->recovery_state = BNX2X_RECOVERY_DONE; 7538 7539 rc = bnx2x_nic_load(sc); 7540 7541 bnx2x_init_done: 7542 7543 if (rc) { 7544 PMD_DRV_LOG(NOTICE, sc, "Initialization failed, " 7545 "stack notified driver is NOT running!"); 7546 } 7547 7548 return rc; 7549 } 7550 7551 static void bnx2x_get_function_num(struct bnx2x_softc *sc) 7552 { 7553 uint32_t val = 0; 7554 7555 /* 7556 * Read the ME register to get the function number. The ME register 7557 * holds the relative-function number and absolute-function number. The 7558 * absolute-function number appears only in E2 and above. Before that 7559 * these bits always contained zero, therefore we cannot blindly use them. 7560 */ 7561 7562 val = REG_RD(sc, BAR_ME_REGISTER); 7563 7564 sc->pfunc_rel = 7565 (uint8_t) ((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 7566 sc->path_id = 7567 (uint8_t) ((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 7568 1; 7569 7570 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 7571 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 7572 } else { 7573 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 7574 } 7575 7576 PMD_DRV_LOG(DEBUG, sc, 7577 "Relative function %d, Absolute function %d, Path %d", 7578 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 7579 } 7580 7581 static uint32_t bnx2x_get_shmem_mf_cfg_base(struct bnx2x_softc *sc) 7582 { 7583 uint32_t shmem2_size; 7584 uint32_t offset; 7585 uint32_t mf_cfg_offset_value; 7586 7587 /* Non 57712 */ 7588 offset = (SHMEM_ADDR(sc, func_mb) + 7589 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 7590 7591 /* 57712 plus */ 7592 if (sc->devinfo.shmem2_base != 0) { 7593 shmem2_size = SHMEM2_RD(sc, size); 7594 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 7595 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 7596 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 7597 offset = mf_cfg_offset_value; 7598 } 7599 } 7600 } 7601 7602 return offset; 7603 } 7604 7605 static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg) 7606 { 7607 uint32_t ret; 7608 struct bnx2x_pci_cap *caps; 7609 7610 /* ensure PCIe capability is enabled */ 7611 caps = pci_find_cap(sc, RTE_PCI_CAP_ID_EXP, BNX2X_PCI_CAP); 7612 if (NULL != caps) { 7613 PMD_DRV_LOG(DEBUG, sc, "Found PCIe capability: " 7614 "id=0x%04X type=0x%04X addr=0x%08X", 7615 caps->id, caps->type, caps->addr); 7616 pci_read(sc, (caps->addr + reg), &ret, 2); 7617 return ret; 7618 } 7619 7620 PMD_DRV_LOG(WARNING, sc, "PCIe capability NOT FOUND!!!"); 7621 7622 return 0; 7623 } 7624 7625 static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc) 7626 { 7627 return bnx2x_pcie_capability_read(sc, RTE_PCI_EXP_TYPE_RC_EC) & 7628 RTE_PCI_EXP_DEVSTA_TRPND; 7629 } 7630 7631 /* 7632 * Walk the PCI capabilities list for the device to find what features are 7633 * supported. These capabilities may be enabled/disabled by firmware so it's 7634 * best to walk the list rather than make assumptions. 7635 */ 7636 static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) 7637 { 7638 PMD_INIT_FUNC_TRACE(sc); 7639 7640 struct bnx2x_pci_cap *caps; 7641 uint16_t link_status; 7642 int reg = 0; 7643 7644 /* check if PCI Power Management is enabled */ 7645 caps = pci_find_cap(sc, RTE_PCI_CAP_ID_PM, BNX2X_PCI_CAP); 7646 if (NULL != caps) { 7647 PMD_DRV_LOG(DEBUG, sc, "Found PM capability: " 7648 "id=0x%04X type=0x%04X addr=0x%08X", 7649 caps->id, caps->type, caps->addr); 7650 7651 sc->devinfo.pcie_cap_flags |= BNX2X_PM_CAPABLE_FLAG; 7652 sc->devinfo.pcie_pm_cap_reg = caps->addr; 7653 } 7654 7655 link_status = bnx2x_pcie_capability_read(sc, RTE_PCI_EXP_LNKSTA); 7656 7657 sc->devinfo.pcie_link_speed = (link_status & RTE_PCI_EXP_LNKSTA_CLS); 7658 sc->devinfo.pcie_link_width = 7659 ((link_status & RTE_PCI_EXP_LNKSTA_NLW) >> 4); 7660 7661 PMD_DRV_LOG(DEBUG, sc, "PCIe link speed=%d width=%d", 7662 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 7663 7664 sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG; 7665 7666 /* check if MSI capability is enabled */ 7667 caps = pci_find_cap(sc, RTE_PCI_CAP_ID_MSI, BNX2X_PCI_CAP); 7668 if (NULL != caps) { 7669 PMD_DRV_LOG(DEBUG, sc, "Found MSI capability at 0x%04x", reg); 7670 7671 sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG; 7672 sc->devinfo.pcie_msi_cap_reg = caps->addr; 7673 } 7674 7675 /* check if MSI-X capability is enabled */ 7676 caps = pci_find_cap(sc, RTE_PCI_CAP_ID_MSIX, BNX2X_PCI_CAP); 7677 if (NULL != caps) { 7678 PMD_DRV_LOG(DEBUG, sc, "Found MSI-X capability at 0x%04x", reg); 7679 7680 sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG; 7681 sc->devinfo.pcie_msix_cap_reg = caps->addr; 7682 } 7683 } 7684 7685 static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc) 7686 { 7687 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7688 uint32_t val; 7689 7690 /* get the outer vlan if we're in switch-dependent mode */ 7691 7692 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7693 mf_info->ext_id = (uint16_t) val; 7694 7695 mf_info->multi_vnics_mode = 1; 7696 7697 if (!VALID_OVLAN(mf_info->ext_id)) { 7698 PMD_DRV_LOG(NOTICE, sc, "Invalid VLAN (%d)", mf_info->ext_id); 7699 return 1; 7700 } 7701 7702 /* get the capabilities */ 7703 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 7704 FUNC_MF_CFG_PROTOCOL_ISCSI) { 7705 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 7706 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) 7707 == FUNC_MF_CFG_PROTOCOL_FCOE) { 7708 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 7709 } else { 7710 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 7711 } 7712 7713 mf_info->vnics_per_port = 7714 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7715 7716 return 0; 7717 } 7718 7719 static uint32_t bnx2x_get_shmem_ext_proto_support_flags(struct bnx2x_softc *sc) 7720 { 7721 uint32_t retval = 0; 7722 uint32_t val; 7723 7724 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 7725 7726 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 7727 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 7728 retval |= MF_PROTO_SUPPORT_ETHERNET; 7729 } 7730 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 7731 retval |= MF_PROTO_SUPPORT_ISCSI; 7732 } 7733 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 7734 retval |= MF_PROTO_SUPPORT_FCOE; 7735 } 7736 } 7737 7738 return retval; 7739 } 7740 7741 static int bnx2x_get_shmem_mf_cfg_info_si(struct bnx2x_softc *sc) 7742 { 7743 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7744 uint32_t val; 7745 7746 /* 7747 * There is no outer vlan if we're in switch-independent mode. 7748 * If the mac is valid then assume multi-function. 7749 */ 7750 7751 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 7752 7753 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 7754 7755 mf_info->mf_protos_supported = 7756 bnx2x_get_shmem_ext_proto_support_flags(sc); 7757 7758 mf_info->vnics_per_port = 7759 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7760 7761 return 0; 7762 } 7763 7764 static int bnx2x_get_shmem_mf_cfg_info_niv(struct bnx2x_softc *sc) 7765 { 7766 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7767 uint32_t e1hov_tag; 7768 uint32_t func_config; 7769 uint32_t niv_config; 7770 7771 mf_info->multi_vnics_mode = 1; 7772 7773 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7774 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 7775 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 7776 7777 mf_info->ext_id = 7778 (uint16_t) ((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 7779 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 7780 7781 mf_info->default_vlan = 7782 (uint16_t) ((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 7783 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 7784 7785 mf_info->niv_allowed_priorities = 7786 (uint8_t) ((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 7787 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 7788 7789 mf_info->niv_default_cos = 7790 (uint8_t) ((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 7791 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 7792 7793 mf_info->afex_vlan_mode = 7794 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 7795 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 7796 7797 mf_info->niv_mba_enabled = 7798 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 7799 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 7800 7801 mf_info->mf_protos_supported = 7802 bnx2x_get_shmem_ext_proto_support_flags(sc); 7803 7804 mf_info->vnics_per_port = 7805 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7806 7807 return 0; 7808 } 7809 7810 static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc) 7811 { 7812 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7813 uint32_t mf_cfg1; 7814 uint32_t mf_cfg2; 7815 uint32_t ovlan1; 7816 uint32_t ovlan2; 7817 uint8_t i, j; 7818 7819 /* various MF mode sanity checks... */ 7820 7821 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 7822 PMD_DRV_LOG(NOTICE, sc, 7823 "Enumerated function %d is marked as hidden", 7824 SC_PORT(sc)); 7825 return 1; 7826 } 7827 7828 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 7829 PMD_DRV_LOG(NOTICE, sc, "vnics_per_port=%d multi_vnics_mode=%d", 7830 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 7831 return 1; 7832 } 7833 7834 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 7835 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 7836 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 7837 PMD_DRV_LOG(NOTICE, sc, "mf_mode=SD vnic_id=%d ovlan=%d", 7838 SC_VN(sc), OVLAN(sc)); 7839 return 1; 7840 } 7841 7842 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 7843 PMD_DRV_LOG(NOTICE, sc, 7844 "mf_mode=SD multi_vnics_mode=%d ovlan=%d", 7845 mf_info->multi_vnics_mode, OVLAN(sc)); 7846 return 1; 7847 } 7848 7849 /* 7850 * Verify all functions are either MF or SF mode. If MF, make sure 7851 * sure that all non-hidden functions have a valid ovlan. If SF, 7852 * make sure that all non-hidden functions have an invalid ovlan. 7853 */ 7854 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 7855 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 7856 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 7857 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 7858 (((mf_info->multi_vnics_mode) 7859 && !VALID_OVLAN(ovlan1)) 7860 || ((!mf_info->multi_vnics_mode) 7861 && VALID_OVLAN(ovlan1)))) { 7862 PMD_DRV_LOG(NOTICE, sc, 7863 "mf_mode=SD function %d MF config " 7864 "mismatch, multi_vnics_mode=%d ovlan=%d", 7865 i, mf_info->multi_vnics_mode, 7866 ovlan1); 7867 return 1; 7868 } 7869 } 7870 7871 /* Verify all funcs on the same port each have a different ovlan. */ 7872 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 7873 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 7874 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 7875 /* iterate from the next function on the port to the max func */ 7876 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 7877 mf_cfg2 = 7878 MFCFG_RD(sc, func_mf_config[j].config); 7879 ovlan2 = 7880 MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 7881 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) 7882 && VALID_OVLAN(ovlan1) 7883 && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) 7884 && VALID_OVLAN(ovlan2) 7885 && (ovlan1 == ovlan2)) { 7886 PMD_DRV_LOG(NOTICE, sc, 7887 "mf_mode=SD functions %d and %d " 7888 "have the same ovlan (%d)", 7889 i, j, ovlan1); 7890 return 1; 7891 } 7892 } 7893 } 7894 } 7895 /* MULTI_FUNCTION_SD */ 7896 return 0; 7897 } 7898 7899 static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc) 7900 { 7901 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7902 uint32_t val, mac_upper; 7903 uint8_t i, vnic; 7904 7905 /* initialize mf_info defaults */ 7906 mf_info->vnics_per_port = 1; 7907 mf_info->multi_vnics_mode = FALSE; 7908 mf_info->path_has_ovlan = FALSE; 7909 mf_info->mf_mode = SINGLE_FUNCTION; 7910 7911 if (!CHIP_IS_MF_CAP(sc)) { 7912 return 0; 7913 } 7914 7915 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 7916 PMD_DRV_LOG(NOTICE, sc, "Invalid mf_cfg_base!"); 7917 return 1; 7918 } 7919 7920 /* get the MF mode (switch dependent / independent / single-function) */ 7921 7922 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 7923 7924 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { 7925 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 7926 7927 mac_upper = 7928 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 7929 7930 /* check for legal upper mac bytes */ 7931 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 7932 mf_info->mf_mode = MULTI_FUNCTION_SI; 7933 } else { 7934 PMD_DRV_LOG(NOTICE, sc, 7935 "Invalid config for Switch Independent mode"); 7936 } 7937 7938 break; 7939 7940 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 7941 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 7942 7943 /* get outer vlan configuration */ 7944 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7945 7946 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 7947 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7948 mf_info->mf_mode = MULTI_FUNCTION_SD; 7949 } else { 7950 PMD_DRV_LOG(NOTICE, sc, 7951 "Invalid config for Switch Dependent mode"); 7952 } 7953 7954 break; 7955 7956 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 7957 7958 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 7959 return 0; 7960 7961 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 7962 7963 /* 7964 * Mark MF mode as NIV if MCP version includes NPAR-SD support 7965 * and the MAC address is valid. 7966 */ 7967 mac_upper = 7968 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 7969 7970 if ((SHMEM2_HAS(sc, afex_driver_support)) && 7971 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 7972 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 7973 } else { 7974 PMD_DRV_LOG(NOTICE, sc, "Invalid config for AFEX mode"); 7975 } 7976 7977 break; 7978 7979 default: 7980 7981 PMD_DRV_LOG(NOTICE, sc, "Unknown MF mode (0x%08x)", 7982 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 7983 7984 return 1; 7985 } 7986 7987 /* set path mf_mode (which could be different than function mf_mode) */ 7988 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 7989 mf_info->path_has_ovlan = TRUE; 7990 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 7991 /* 7992 * Decide on path multi vnics mode. If we're not in MF mode and in 7993 * 4-port mode, this is good enough to check vnic-0 of the other port 7994 * on the same path 7995 */ 7996 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 7997 uint8_t other_port = !(PORT_ID(sc) & 1); 7998 uint8_t abs_func_other_port = 7999 (SC_PATH(sc) + (2 * other_port)); 8000 8001 val = 8002 MFCFG_RD(sc, 8003 func_mf_config 8004 [abs_func_other_port].e1hov_tag); 8005 8006 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t) val); 8007 } 8008 } 8009 8010 if (mf_info->mf_mode == SINGLE_FUNCTION) { 8011 /* invalid MF config */ 8012 if (SC_VN(sc) >= 1) { 8013 PMD_DRV_LOG(NOTICE, sc, "VNIC ID >= 1 in SF mode"); 8014 return 1; 8015 } 8016 8017 return 0; 8018 } 8019 8020 /* get the MF configuration */ 8021 mf_info->mf_config[SC_VN(sc)] = 8022 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8023 8024 switch (mf_info->mf_mode) { 8025 case MULTI_FUNCTION_SD: 8026 8027 bnx2x_get_shmem_mf_cfg_info_sd(sc); 8028 break; 8029 8030 case MULTI_FUNCTION_SI: 8031 8032 bnx2x_get_shmem_mf_cfg_info_si(sc); 8033 break; 8034 8035 case MULTI_FUNCTION_AFEX: 8036 8037 bnx2x_get_shmem_mf_cfg_info_niv(sc); 8038 break; 8039 8040 default: 8041 8042 PMD_DRV_LOG(NOTICE, sc, "Get MF config failed (mf_mode=0x%08x)", 8043 mf_info->mf_mode); 8044 return 1; 8045 } 8046 8047 /* get the congestion management parameters */ 8048 8049 vnic = 0; 8050 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 8051 /* get min/max bw */ 8052 val = MFCFG_RD(sc, func_mf_config[i].config); 8053 mf_info->min_bw[vnic] = 8054 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> 8055 FUNC_MF_CFG_MIN_BW_SHIFT); 8056 mf_info->max_bw[vnic] = 8057 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> 8058 FUNC_MF_CFG_MAX_BW_SHIFT); 8059 vnic++; 8060 } 8061 8062 return bnx2x_check_valid_mf_cfg(sc); 8063 } 8064 8065 static int bnx2x_get_shmem_info(struct bnx2x_softc *sc) 8066 { 8067 int port; 8068 uint32_t mac_hi, mac_lo, val; 8069 8070 PMD_INIT_FUNC_TRACE(sc); 8071 8072 port = SC_PORT(sc); 8073 mac_hi = mac_lo = 0; 8074 8075 sc->link_params.sc = sc; 8076 sc->link_params.port = port; 8077 8078 /* get the hardware config info */ 8079 sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); 8080 sc->devinfo.hw_config2 = 8081 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 8082 8083 sc->link_params.hw_led_mode = 8084 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 8085 SHARED_HW_CFG_LED_MODE_SHIFT); 8086 8087 /* get the port feature config */ 8088 sc->port.config = 8089 SHMEM_RD(sc, dev_info.port_feature_config[port].config); 8090 8091 /* get the link params */ 8092 sc->link_params.speed_cap_mask[ELINK_INT_PHY] = 8093 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask) 8094 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 8095 sc->link_params.speed_cap_mask[ELINK_EXT_PHY1] = 8096 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2) 8097 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 8098 8099 /* get the lane config */ 8100 sc->link_params.lane_config = 8101 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 8102 8103 /* get the link config */ 8104 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 8105 sc->port.link_config[ELINK_INT_PHY] = val; 8106 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 8107 sc->port.link_config[ELINK_EXT_PHY1] = 8108 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 8109 8110 /* get the override preemphasis flag and enable it or turn it off */ 8111 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 8112 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 8113 sc->link_params.feature_config_flags |= 8114 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 8115 } else { 8116 sc->link_params.feature_config_flags &= 8117 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 8118 } 8119 8120 val = sc->devinfo.bc_ver >> 8; 8121 if (val < BNX2X_BC_VER) { 8122 /* for now only warn later we might need to enforce this */ 8123 PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC", 8124 BNX2X_BC_VER, val); 8125 } 8126 sc->link_params.feature_config_flags |= 8127 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 8128 ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 8129 0; 8130 8131 sc->link_params.feature_config_flags |= 8132 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 8133 ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 8134 sc->link_params.feature_config_flags |= 8135 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 8136 ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 8137 sc->link_params.feature_config_flags |= 8138 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 8139 ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 8140 8141 /* get the initial value of the link params */ 8142 sc->link_params.multi_phy_config = 8143 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 8144 8145 /* get external phy info */ 8146 sc->port.ext_phy_config = 8147 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 8148 8149 /* get the multifunction configuration */ 8150 bnx2x_get_mf_cfg_info(sc); 8151 8152 /* get the mac address */ 8153 if (IS_MF(sc)) { 8154 mac_hi = 8155 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 8156 mac_lo = 8157 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 8158 } else { 8159 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 8160 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 8161 } 8162 8163 if ((mac_lo == 0) && (mac_hi == 0)) { 8164 *sc->mac_addr_str = 0; 8165 PMD_DRV_LOG(NOTICE, sc, "No Ethernet address programmed!"); 8166 } else { 8167 sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8); 8168 sc->link_params.mac_addr[1] = (uint8_t) (mac_hi); 8169 sc->link_params.mac_addr[2] = (uint8_t) (mac_lo >> 24); 8170 sc->link_params.mac_addr[3] = (uint8_t) (mac_lo >> 16); 8171 sc->link_params.mac_addr[4] = (uint8_t) (mac_lo >> 8); 8172 sc->link_params.mac_addr[5] = (uint8_t) (mac_lo); 8173 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 8174 RTE_ETHER_ADDR_PRT_FMT, 8175 sc->link_params.mac_addr[0], 8176 sc->link_params.mac_addr[1], 8177 sc->link_params.mac_addr[2], 8178 sc->link_params.mac_addr[3], 8179 sc->link_params.mac_addr[4], 8180 sc->link_params.mac_addr[5]); 8181 PMD_DRV_LOG(DEBUG, sc, 8182 "Ethernet address: %s", sc->mac_addr_str); 8183 } 8184 8185 return 0; 8186 } 8187 8188 static void bnx2x_media_detect(struct bnx2x_softc *sc) 8189 { 8190 uint32_t phy_idx = bnx2x_get_cur_phy_idx(sc); 8191 switch (sc->link_params.phy[phy_idx].media_type) { 8192 case ELINK_ETH_PHY_SFPP_10G_FIBER: 8193 case ELINK_ETH_PHY_SFP_1G_FIBER: 8194 case ELINK_ETH_PHY_XFP_FIBER: 8195 case ELINK_ETH_PHY_KR: 8196 case ELINK_ETH_PHY_CX4: 8197 PMD_DRV_LOG(INFO, sc, "Found 10GBase-CX4 media."); 8198 sc->media = IFM_10G_CX4; 8199 break; 8200 case ELINK_ETH_PHY_DA_TWINAX: 8201 PMD_DRV_LOG(INFO, sc, "Found 10Gb Twinax media."); 8202 sc->media = IFM_10G_TWINAX; 8203 break; 8204 case ELINK_ETH_PHY_BASE_T: 8205 PMD_DRV_LOG(INFO, sc, "Found 10GBase-T media."); 8206 sc->media = IFM_10G_T; 8207 break; 8208 case ELINK_ETH_PHY_NOT_PRESENT: 8209 PMD_DRV_LOG(INFO, sc, "Media not present."); 8210 sc->media = 0; 8211 break; 8212 case ELINK_ETH_PHY_UNSPECIFIED: 8213 default: 8214 PMD_DRV_LOG(INFO, sc, "Unknown media!"); 8215 sc->media = 0; 8216 break; 8217 } 8218 } 8219 8220 #define GET_FIELD(value, fname) \ 8221 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 8222 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 8223 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 8224 8225 static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc) 8226 { 8227 int pfid = SC_FUNC(sc); 8228 int igu_sb_id; 8229 uint32_t val; 8230 uint8_t fid, igu_sb_cnt = 0; 8231 8232 sc->igu_base_sb = 0xff; 8233 8234 if (CHIP_INT_MODE_IS_BC(sc)) { 8235 int vn = SC_VN(sc); 8236 igu_sb_cnt = sc->igu_sb_cnt; 8237 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 8238 FP_SB_MAX_E1x); 8239 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 8240 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 8241 return 0; 8242 } 8243 8244 /* IGU in normal mode - read CAM */ 8245 for (igu_sb_id = 0; 8246 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { 8247 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 8248 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 8249 continue; 8250 } 8251 fid = IGU_FID(val); 8252 if (fid & IGU_FID_ENCODE_IS_PF) { 8253 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 8254 continue; 8255 } 8256 if (IGU_VEC(val) == 0) { 8257 /* default status block */ 8258 sc->igu_dsb_id = igu_sb_id; 8259 } else { 8260 if (sc->igu_base_sb == 0xff) { 8261 sc->igu_base_sb = igu_sb_id; 8262 } 8263 igu_sb_cnt++; 8264 } 8265 } 8266 } 8267 8268 /* 8269 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 8270 * that number of CAM entries will not be equal to the value advertised in 8271 * PCI. Driver should use the minimal value of both as the actual status 8272 * block count 8273 */ 8274 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 8275 8276 if (igu_sb_cnt == 0) { 8277 PMD_DRV_LOG(ERR, sc, "CAM configuration error"); 8278 return -1; 8279 } 8280 8281 return 0; 8282 } 8283 8284 /* 8285 * Gather various information from the device config space, the device itself, 8286 * shmem, and the user input. 8287 */ 8288 static int bnx2x_get_device_info(struct bnx2x_softc *sc) 8289 { 8290 uint32_t val; 8291 int rc; 8292 8293 /* get the chip revision (chip metal comes from pci config space) */ 8294 sc->devinfo.chip_id = sc->link_params.chip_id = 8295 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 8296 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 8297 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 8298 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 8299 8300 /* force 57811 according to MISC register */ 8301 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 8302 if (CHIP_IS_57810(sc)) { 8303 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 8304 (sc-> 8305 devinfo.chip_id & 0x0000ffff)); 8306 } else if (CHIP_IS_57810_MF(sc)) { 8307 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 8308 (sc-> 8309 devinfo.chip_id & 0x0000ffff)); 8310 } 8311 sc->devinfo.chip_id |= 0x1; 8312 } 8313 8314 PMD_DRV_LOG(DEBUG, sc, 8315 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)", 8316 sc->devinfo.chip_id, 8317 ((sc->devinfo.chip_id >> 16) & 0xffff), 8318 ((sc->devinfo.chip_id >> 12) & 0xf), 8319 ((sc->devinfo.chip_id >> 4) & 0xff), 8320 ((sc->devinfo.chip_id >> 0) & 0xf)); 8321 8322 val = (REG_RD(sc, 0x2874) & 0x55); 8323 if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) { 8324 sc->flags |= BNX2X_ONE_PORT_FLAG; 8325 PMD_DRV_LOG(DEBUG, sc, "single port device"); 8326 } 8327 8328 /* set the doorbell size */ 8329 sc->doorbell_size = (1 << BNX2X_DB_SHIFT); 8330 8331 /* determine whether the device is in 2 port or 4 port mode */ 8332 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1h */ 8333 if (CHIP_IS_E2E3(sc)) { 8334 /* 8335 * Read port4mode_en_ovwr[0]: 8336 * If 1, four port mode is in port4mode_en_ovwr[1]. 8337 * If 0, four port mode is in port4mode_en[0]. 8338 */ 8339 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 8340 if (val & 1) { 8341 val = ((val >> 1) & 1); 8342 } else { 8343 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 8344 } 8345 8346 sc->devinfo.chip_port_mode = 8347 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 8348 8349 PMD_DRV_LOG(DEBUG, sc, "Port mode = %s", (val) ? "4" : "2"); 8350 } 8351 8352 /* get the function and path info for the device */ 8353 bnx2x_get_function_num(sc); 8354 8355 /* get the shared memory base address */ 8356 sc->devinfo.shmem_base = 8357 sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 8358 sc->devinfo.shmem2_base = 8359 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 8360 MISC_REG_GENERIC_CR_0)); 8361 8362 if (!sc->devinfo.shmem_base) { 8363 /* this should ONLY prevent upcoming shmem reads */ 8364 PMD_DRV_LOG(INFO, sc, "MCP not active"); 8365 sc->flags |= BNX2X_NO_MCP_FLAG; 8366 return 0; 8367 } 8368 8369 /* make sure the shared memory contents are valid */ 8370 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 8371 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 8372 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 8373 PMD_DRV_LOG(NOTICE, sc, "Invalid SHMEM validity signature: 0x%08x", 8374 val); 8375 return 0; 8376 } 8377 8378 /* get the bootcode version */ 8379 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 8380 snprintf(sc->devinfo.bc_ver_str, 8381 sizeof(sc->devinfo.bc_ver_str), 8382 "%d.%d.%d", 8383 ((sc->devinfo.bc_ver >> 24) & 0xff), 8384 ((sc->devinfo.bc_ver >> 16) & 0xff), 8385 ((sc->devinfo.bc_ver >> 8) & 0xff)); 8386 PMD_DRV_LOG(DEBUG, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str); 8387 8388 /* get the bootcode shmem address */ 8389 sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc); 8390 8391 /* clean indirect addresses as they're not used */ 8392 pci_write_long(sc, PCICFG_GRC_ADDRESS, 0); 8393 if (IS_PF(sc)) { 8394 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 8395 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 8396 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 8397 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 8398 if (CHIP_IS_E1x(sc)) { 8399 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 8400 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 8401 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 8402 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 8403 } 8404 } 8405 8406 /* get the nvram size */ 8407 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 8408 sc->devinfo.flash_size = 8409 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 8410 8411 bnx2x_set_power_state(sc, PCI_PM_D0); 8412 /* get various configuration parameters from shmem */ 8413 bnx2x_get_shmem_info(sc); 8414 8415 /* initialize IGU parameters */ 8416 if (CHIP_IS_E1x(sc)) { 8417 sc->devinfo.int_block = INT_BLOCK_HC; 8418 sc->igu_dsb_id = DEF_SB_IGU_ID; 8419 sc->igu_base_sb = 0; 8420 } else { 8421 sc->devinfo.int_block = INT_BLOCK_IGU; 8422 8423 /* do not allow device reset during IGU info processing */ 8424 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8425 8426 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 8427 8428 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 8429 int tout = 5000; 8430 8431 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 8432 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 8433 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 8434 8435 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 8436 tout--; 8437 DELAY(1000); 8438 } 8439 8440 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 8441 PMD_DRV_LOG(NOTICE, sc, 8442 "FORCING IGU Normal Mode failed!!!"); 8443 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8444 return -1; 8445 } 8446 } 8447 8448 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 8449 PMD_DRV_LOG(DEBUG, sc, "IGU Backward Compatible Mode"); 8450 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 8451 } else { 8452 PMD_DRV_LOG(DEBUG, sc, "IGU Normal Mode"); 8453 } 8454 8455 rc = bnx2x_get_igu_cam_info(sc); 8456 8457 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8458 8459 if (rc) { 8460 return rc; 8461 } 8462 } 8463 8464 /* 8465 * Get base FW non-default (fast path) status block ID. This value is 8466 * used to initialize the fw_sb_id saved on the fp/queue structure to 8467 * determine the id used by the FW. 8468 */ 8469 if (CHIP_IS_E1x(sc)) { 8470 sc->base_fw_ndsb = 8471 ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 8472 } else { 8473 /* 8474 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 8475 * the same queue are indicated on the same IGU SB). So we prefer 8476 * FW and IGU SBs to be the same value. 8477 */ 8478 sc->base_fw_ndsb = sc->igu_base_sb; 8479 } 8480 8481 elink_phy_probe(&sc->link_params); 8482 8483 return 0; 8484 } 8485 8486 static void 8487 bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg) 8488 { 8489 uint32_t cfg_size = 0; 8490 uint32_t idx; 8491 uint8_t port = SC_PORT(sc); 8492 8493 /* aggregation of supported attributes of all external phys */ 8494 sc->port.supported[0] = 0; 8495 sc->port.supported[1] = 0; 8496 8497 switch (sc->link_params.num_phys) { 8498 case 1: 8499 sc->port.supported[0] = 8500 sc->link_params.phy[ELINK_INT_PHY].supported; 8501 cfg_size = 1; 8502 break; 8503 case 2: 8504 sc->port.supported[0] = 8505 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8506 cfg_size = 1; 8507 break; 8508 case 3: 8509 if (sc->link_params.multi_phy_config & 8510 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 8511 sc->port.supported[1] = 8512 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8513 sc->port.supported[0] = 8514 sc->link_params.phy[ELINK_EXT_PHY2].supported; 8515 } else { 8516 sc->port.supported[0] = 8517 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8518 sc->port.supported[1] = 8519 sc->link_params.phy[ELINK_EXT_PHY2].supported; 8520 } 8521 cfg_size = 2; 8522 break; 8523 } 8524 8525 if (!(sc->port.supported[0] || sc->port.supported[1])) { 8526 PMD_DRV_LOG(ERR, sc, 8527 "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)", 8528 SHMEM_RD(sc, 8529 dev_info.port_hw_config 8530 [port].external_phy_config), 8531 SHMEM_RD(sc, 8532 dev_info.port_hw_config 8533 [port].external_phy_config2)); 8534 return; 8535 } 8536 8537 if (CHIP_IS_E3(sc)) 8538 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 8539 else { 8540 switch (switch_cfg) { 8541 case ELINK_SWITCH_CFG_1G: 8542 sc->port.phy_addr = 8543 REG_RD(sc, 8544 NIG_REG_SERDES0_CTRL_PHY_ADDR + port * 0x10); 8545 break; 8546 case ELINK_SWITCH_CFG_10G: 8547 sc->port.phy_addr = 8548 REG_RD(sc, 8549 NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18); 8550 break; 8551 default: 8552 PMD_DRV_LOG(ERR, sc, 8553 "Invalid switch config in" 8554 "link_config=0x%08x", 8555 sc->port.link_config[0]); 8556 return; 8557 } 8558 } 8559 8560 PMD_DRV_LOG(INFO, sc, "PHY addr 0x%08x", sc->port.phy_addr); 8561 8562 /* mask what we support according to speed_cap_mask per configuration */ 8563 for (idx = 0; idx < cfg_size; idx++) { 8564 if (!(sc->link_params.speed_cap_mask[idx] & 8565 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 8566 sc->port.supported[idx] &= 8567 ~ELINK_SUPPORTED_10baseT_Half; 8568 } 8569 8570 if (!(sc->link_params.speed_cap_mask[idx] & 8571 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 8572 sc->port.supported[idx] &= 8573 ~ELINK_SUPPORTED_10baseT_Full; 8574 } 8575 8576 if (!(sc->link_params.speed_cap_mask[idx] & 8577 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 8578 sc->port.supported[idx] &= 8579 ~ELINK_SUPPORTED_100baseT_Half; 8580 } 8581 8582 if (!(sc->link_params.speed_cap_mask[idx] & 8583 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 8584 sc->port.supported[idx] &= 8585 ~ELINK_SUPPORTED_100baseT_Full; 8586 } 8587 8588 if (!(sc->link_params.speed_cap_mask[idx] & 8589 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 8590 sc->port.supported[idx] &= 8591 ~ELINK_SUPPORTED_1000baseT_Full; 8592 } 8593 8594 if (!(sc->link_params.speed_cap_mask[idx] & 8595 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 8596 sc->port.supported[idx] &= 8597 ~ELINK_SUPPORTED_2500baseX_Full; 8598 } 8599 8600 if (!(sc->link_params.speed_cap_mask[idx] & 8601 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 8602 sc->port.supported[idx] &= 8603 ~ELINK_SUPPORTED_10000baseT_Full; 8604 } 8605 8606 if (!(sc->link_params.speed_cap_mask[idx] & 8607 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 8608 sc->port.supported[idx] &= 8609 ~ELINK_SUPPORTED_20000baseKR2_Full; 8610 } 8611 } 8612 8613 PMD_DRV_LOG(INFO, sc, "PHY supported 0=0x%08x 1=0x%08x", 8614 sc->port.supported[0], sc->port.supported[1]); 8615 } 8616 8617 static void bnx2x_link_settings_requested(struct bnx2x_softc *sc) 8618 { 8619 uint32_t link_config; 8620 uint32_t idx; 8621 uint32_t cfg_size = 0; 8622 8623 sc->port.advertising[0] = 0; 8624 sc->port.advertising[1] = 0; 8625 8626 switch (sc->link_params.num_phys) { 8627 case 1: 8628 case 2: 8629 cfg_size = 1; 8630 break; 8631 case 3: 8632 cfg_size = 2; 8633 break; 8634 } 8635 8636 for (idx = 0; idx < cfg_size; idx++) { 8637 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 8638 link_config = sc->port.link_config[idx]; 8639 8640 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 8641 case PORT_FEATURE_LINK_SPEED_AUTO: 8642 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 8643 sc->link_params.req_line_speed[idx] = 8644 ELINK_SPEED_AUTO_NEG; 8645 sc->port.advertising[idx] |= 8646 sc->port.supported[idx]; 8647 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 8648 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) 8649 sc->port.advertising[idx] |= 8650 (ELINK_SUPPORTED_100baseT_Half | 8651 ELINK_SUPPORTED_100baseT_Full); 8652 } else { 8653 /* force 10G, no AN */ 8654 sc->link_params.req_line_speed[idx] = 8655 ELINK_SPEED_10000; 8656 sc->port.advertising[idx] |= 8657 (ADVERTISED_10000baseT_Full | 8658 ADVERTISED_FIBRE); 8659 continue; 8660 } 8661 break; 8662 8663 case PORT_FEATURE_LINK_SPEED_10M_FULL: 8664 if (sc-> 8665 port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) 8666 { 8667 sc->link_params.req_line_speed[idx] = 8668 ELINK_SPEED_10; 8669 sc->port.advertising[idx] |= 8670 (ADVERTISED_10baseT_Full | ADVERTISED_TP); 8671 } else { 8672 PMD_DRV_LOG(ERR, sc, 8673 "Invalid NVRAM config link_config=0x%08x " 8674 "speed_cap_mask=0x%08x", 8675 link_config, 8676 sc-> 8677 link_params.speed_cap_mask[idx]); 8678 return; 8679 } 8680 break; 8681 8682 case PORT_FEATURE_LINK_SPEED_10M_HALF: 8683 if (sc-> 8684 port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) 8685 { 8686 sc->link_params.req_line_speed[idx] = 8687 ELINK_SPEED_10; 8688 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 8689 sc->port.advertising[idx] |= 8690 (ADVERTISED_10baseT_Half | ADVERTISED_TP); 8691 } else { 8692 PMD_DRV_LOG(ERR, sc, 8693 "Invalid NVRAM config link_config=0x%08x " 8694 "speed_cap_mask=0x%08x", 8695 link_config, 8696 sc-> 8697 link_params.speed_cap_mask[idx]); 8698 return; 8699 } 8700 break; 8701 8702 case PORT_FEATURE_LINK_SPEED_100M_FULL: 8703 if (sc-> 8704 port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) 8705 { 8706 sc->link_params.req_line_speed[idx] = 8707 ELINK_SPEED_100; 8708 sc->port.advertising[idx] |= 8709 (ADVERTISED_100baseT_Full | ADVERTISED_TP); 8710 } else { 8711 PMD_DRV_LOG(ERR, sc, 8712 "Invalid NVRAM config link_config=0x%08x " 8713 "speed_cap_mask=0x%08x", 8714 link_config, 8715 sc-> 8716 link_params.speed_cap_mask[idx]); 8717 return; 8718 } 8719 break; 8720 8721 case PORT_FEATURE_LINK_SPEED_100M_HALF: 8722 if (sc-> 8723 port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) 8724 { 8725 sc->link_params.req_line_speed[idx] = 8726 ELINK_SPEED_100; 8727 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 8728 sc->port.advertising[idx] |= 8729 (ADVERTISED_100baseT_Half | ADVERTISED_TP); 8730 } else { 8731 PMD_DRV_LOG(ERR, sc, 8732 "Invalid NVRAM config link_config=0x%08x " 8733 "speed_cap_mask=0x%08x", 8734 link_config, 8735 sc-> 8736 link_params.speed_cap_mask[idx]); 8737 return; 8738 } 8739 break; 8740 8741 case PORT_FEATURE_LINK_SPEED_1G: 8742 if (sc->port.supported[idx] & 8743 ELINK_SUPPORTED_1000baseT_Full) { 8744 sc->link_params.req_line_speed[idx] = 8745 ELINK_SPEED_1000; 8746 sc->port.advertising[idx] |= 8747 (ADVERTISED_1000baseT_Full | ADVERTISED_TP); 8748 } else { 8749 PMD_DRV_LOG(ERR, sc, 8750 "Invalid NVRAM config link_config=0x%08x " 8751 "speed_cap_mask=0x%08x", 8752 link_config, 8753 sc-> 8754 link_params.speed_cap_mask[idx]); 8755 return; 8756 } 8757 break; 8758 8759 case PORT_FEATURE_LINK_SPEED_2_5G: 8760 if (sc->port.supported[idx] & 8761 ELINK_SUPPORTED_2500baseX_Full) { 8762 sc->link_params.req_line_speed[idx] = 8763 ELINK_SPEED_2500; 8764 sc->port.advertising[idx] |= 8765 (ADVERTISED_2500baseX_Full | ADVERTISED_TP); 8766 } else { 8767 PMD_DRV_LOG(ERR, sc, 8768 "Invalid NVRAM config link_config=0x%08x " 8769 "speed_cap_mask=0x%08x", 8770 link_config, 8771 sc-> 8772 link_params.speed_cap_mask[idx]); 8773 return; 8774 } 8775 break; 8776 8777 case PORT_FEATURE_LINK_SPEED_10G_CX4: 8778 if (sc->port.supported[idx] & 8779 ELINK_SUPPORTED_10000baseT_Full) { 8780 sc->link_params.req_line_speed[idx] = 8781 ELINK_SPEED_10000; 8782 sc->port.advertising[idx] |= 8783 (ADVERTISED_10000baseT_Full | 8784 ADVERTISED_FIBRE); 8785 } else { 8786 PMD_DRV_LOG(ERR, sc, 8787 "Invalid NVRAM config link_config=0x%08x " 8788 "speed_cap_mask=0x%08x", 8789 link_config, 8790 sc-> 8791 link_params.speed_cap_mask[idx]); 8792 return; 8793 } 8794 break; 8795 8796 case PORT_FEATURE_LINK_SPEED_20G: 8797 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 8798 break; 8799 8800 default: 8801 PMD_DRV_LOG(ERR, sc, 8802 "Invalid NVRAM config link_config=0x%08x " 8803 "speed_cap_mask=0x%08x", link_config, 8804 sc->link_params.speed_cap_mask[idx]); 8805 sc->link_params.req_line_speed[idx] = 8806 ELINK_SPEED_AUTO_NEG; 8807 sc->port.advertising[idx] = sc->port.supported[idx]; 8808 break; 8809 } 8810 8811 sc->link_params.req_flow_ctrl[idx] = 8812 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 8813 8814 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 8815 if (! 8816 (sc-> 8817 port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 8818 sc->link_params.req_flow_ctrl[idx] = 8819 ELINK_FLOW_CTRL_NONE; 8820 } else { 8821 bnx2x_set_requested_fc(sc); 8822 } 8823 } 8824 } 8825 } 8826 8827 static void bnx2x_get_phy_info(struct bnx2x_softc *sc) 8828 { 8829 uint8_t port = SC_PORT(sc); 8830 uint32_t eee_mode; 8831 8832 PMD_INIT_FUNC_TRACE(sc); 8833 8834 /* shmem data already read in bnx2x_get_shmem_info() */ 8835 8836 bnx2x_link_settings_supported(sc, sc->link_params.switch_cfg); 8837 bnx2x_link_settings_requested(sc); 8838 8839 /* configure link feature according to nvram value */ 8840 eee_mode = 8841 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) 8842 & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 8843 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 8844 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 8845 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 8846 ELINK_EEE_MODE_ENABLE_LPI | 8847 ELINK_EEE_MODE_OUTPUT_TIME); 8848 } else { 8849 sc->link_params.eee_mode = 0; 8850 } 8851 8852 /* get the media type */ 8853 bnx2x_media_detect(sc); 8854 } 8855 8856 static void bnx2x_set_modes_bitmap(struct bnx2x_softc *sc) 8857 { 8858 uint32_t flags = MODE_ASIC | MODE_PORT2; 8859 8860 if (CHIP_IS_E2(sc)) { 8861 flags |= MODE_E2; 8862 } else if (CHIP_IS_E3(sc)) { 8863 flags |= MODE_E3; 8864 if (CHIP_REV(sc) == CHIP_REV_Ax) { 8865 flags |= MODE_E3_A0; 8866 } else { /*if (CHIP_REV(sc) == CHIP_REV_Bx) */ 8867 8868 flags |= MODE_E3_B0 | MODE_COS3; 8869 } 8870 } 8871 8872 if (IS_MF(sc)) { 8873 flags |= MODE_MF; 8874 switch (sc->devinfo.mf_info.mf_mode) { 8875 case MULTI_FUNCTION_SD: 8876 flags |= MODE_MF_SD; 8877 break; 8878 case MULTI_FUNCTION_SI: 8879 flags |= MODE_MF_SI; 8880 break; 8881 case MULTI_FUNCTION_AFEX: 8882 flags |= MODE_MF_AFEX; 8883 break; 8884 } 8885 } else { 8886 flags |= MODE_SF; 8887 } 8888 8889 #if defined(__LITTLE_ENDIAN) 8890 flags |= MODE_LITTLE_ENDIAN; 8891 #else /* __BIG_ENDIAN */ 8892 flags |= MODE_BIG_ENDIAN; 8893 #endif 8894 8895 INIT_MODE_FLAGS(sc) = flags; 8896 } 8897 8898 int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) 8899 { 8900 struct bnx2x_fastpath *fp; 8901 char buf[32]; 8902 uint32_t i; 8903 8904 if (IS_PF(sc)) { 8905 /************************/ 8906 /* DEFAULT STATUS BLOCK */ 8907 /************************/ 8908 8909 if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block), 8910 &sc->def_sb_dma, "def_sb", 8911 RTE_CACHE_LINE_SIZE) != 0) { 8912 return -1; 8913 } 8914 8915 sc->def_sb = 8916 (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 8917 /***************/ 8918 /* EVENT QUEUE */ 8919 /***************/ 8920 8921 if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, 8922 &sc->eq_dma, "ev_queue", 8923 RTE_CACHE_LINE_SIZE) != 0) { 8924 sc->def_sb = NULL; 8925 return -1; 8926 } 8927 8928 sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr; 8929 8930 /*************/ 8931 /* SLOW PATH */ 8932 /*************/ 8933 8934 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath), 8935 &sc->sp_dma, "sp", 8936 RTE_CACHE_LINE_SIZE) != 0) { 8937 sc->eq = NULL; 8938 sc->def_sb = NULL; 8939 return -1; 8940 } 8941 8942 sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr; 8943 8944 /*******************/ 8945 /* SLOW PATH QUEUE */ 8946 /*******************/ 8947 8948 if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, 8949 &sc->spq_dma, "sp_queue", 8950 RTE_CACHE_LINE_SIZE) != 0) { 8951 sc->sp = NULL; 8952 sc->eq = NULL; 8953 sc->def_sb = NULL; 8954 return -1; 8955 } 8956 8957 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 8958 8959 /***************************/ 8960 /* FW DECOMPRESSION BUFFER */ 8961 /***************************/ 8962 8963 if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 8964 "fw_buf", RTE_CACHE_LINE_SIZE) != 0) { 8965 sc->spq = NULL; 8966 sc->sp = NULL; 8967 sc->eq = NULL; 8968 sc->def_sb = NULL; 8969 return -1; 8970 } 8971 8972 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 8973 } 8974 8975 /*************/ 8976 /* FASTPATHS */ 8977 /*************/ 8978 8979 /* allocate DMA memory for each fastpath structure */ 8980 for (i = 0; i < sc->num_queues; i++) { 8981 fp = &sc->fp[i]; 8982 fp->sc = sc; 8983 fp->index = i; 8984 8985 /*******************/ 8986 /* FP STATUS BLOCK */ 8987 /*******************/ 8988 8989 snprintf(buf, sizeof(buf), "fp_%d_sb", i); 8990 if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block), 8991 &fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) { 8992 PMD_DRV_LOG(NOTICE, sc, "Failed to alloc %s", buf); 8993 return -1; 8994 } else { 8995 if (CHIP_IS_E2E3(sc)) { 8996 fp->status_block.e2_sb = 8997 (struct host_hc_status_block_e2 *) 8998 fp->sb_dma.vaddr; 8999 } else { 9000 fp->status_block.e1x_sb = 9001 (struct host_hc_status_block_e1x *) 9002 fp->sb_dma.vaddr; 9003 } 9004 } 9005 } 9006 9007 return 0; 9008 } 9009 9010 void bnx2x_free_hsi_mem(struct bnx2x_softc *sc) 9011 { 9012 struct bnx2x_fastpath *fp; 9013 int i; 9014 9015 for (i = 0; i < sc->num_queues; i++) { 9016 fp = &sc->fp[i]; 9017 9018 /*******************/ 9019 /* FP STATUS BLOCK */ 9020 /*******************/ 9021 9022 memset(&fp->status_block, 0, sizeof(fp->status_block)); 9023 bnx2x_dma_free(&fp->sb_dma); 9024 } 9025 9026 if (IS_PF(sc)) { 9027 /***************************/ 9028 /* FW DECOMPRESSION BUFFER */ 9029 /***************************/ 9030 9031 bnx2x_dma_free(&sc->gz_buf_dma); 9032 sc->gz_buf = NULL; 9033 9034 /*******************/ 9035 /* SLOW PATH QUEUE */ 9036 /*******************/ 9037 9038 bnx2x_dma_free(&sc->spq_dma); 9039 sc->spq = NULL; 9040 9041 /*************/ 9042 /* SLOW PATH */ 9043 /*************/ 9044 9045 bnx2x_dma_free(&sc->sp_dma); 9046 sc->sp = NULL; 9047 9048 /***************/ 9049 /* EVENT QUEUE */ 9050 /***************/ 9051 9052 bnx2x_dma_free(&sc->eq_dma); 9053 sc->eq = NULL; 9054 9055 /************************/ 9056 /* DEFAULT STATUS BLOCK */ 9057 /************************/ 9058 9059 bnx2x_dma_free(&sc->def_sb_dma); 9060 sc->def_sb = NULL; 9061 } 9062 } 9063 9064 /* 9065 * Previous driver DMAE transaction may have occurred when pre-boot stage 9066 * ended and boot began. This would invalidate the addresses of the 9067 * transaction, resulting in was-error bit set in the PCI causing all 9068 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 9069 * the interrupt which detected this from the pglueb and the was-done bit 9070 */ 9071 static void bnx2x_prev_interrupted_dmae(struct bnx2x_softc *sc) 9072 { 9073 uint32_t val; 9074 9075 if (!CHIP_IS_E1x(sc)) { 9076 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 9077 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9078 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 9079 1 << SC_FUNC(sc)); 9080 } 9081 } 9082 } 9083 9084 static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc) 9085 { 9086 uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 9087 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 9088 if (!rc) { 9089 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 9090 return -1; 9091 } 9092 9093 return 0; 9094 } 9095 9096 static struct bnx2x_prev_list_node *bnx2x_prev_path_get_entry(struct bnx2x_softc *sc) 9097 { 9098 struct bnx2x_prev_list_node *tmp; 9099 9100 LIST_FOREACH(tmp, &bnx2x_prev_list, node) { 9101 if ((sc->pcie_bus == tmp->bus) && 9102 (sc->pcie_device == tmp->slot) && 9103 (SC_PATH(sc) == tmp->path)) { 9104 return tmp; 9105 } 9106 } 9107 9108 return NULL; 9109 } 9110 9111 static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc) 9112 { 9113 struct bnx2x_prev_list_node *tmp; 9114 int rc = FALSE; 9115 9116 rte_spinlock_lock(&bnx2x_prev_mtx); 9117 9118 tmp = bnx2x_prev_path_get_entry(sc); 9119 if (tmp) { 9120 if (tmp->aer) { 9121 PMD_DRV_LOG(DEBUG, sc, 9122 "Path %d/%d/%d was marked by AER", 9123 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9124 } else { 9125 rc = TRUE; 9126 PMD_DRV_LOG(DEBUG, sc, 9127 "Path %d/%d/%d was already cleaned from previous drivers", 9128 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9129 } 9130 } 9131 9132 rte_spinlock_unlock(&bnx2x_prev_mtx); 9133 9134 return rc; 9135 } 9136 9137 static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi) 9138 { 9139 struct bnx2x_prev_list_node *tmp; 9140 9141 rte_spinlock_lock(&bnx2x_prev_mtx); 9142 9143 /* Check whether the entry for this path already exists */ 9144 tmp = bnx2x_prev_path_get_entry(sc); 9145 if (tmp) { 9146 if (!tmp->aer) { 9147 PMD_DRV_LOG(DEBUG, sc, 9148 "Re-marking AER in path %d/%d/%d", 9149 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9150 } else { 9151 PMD_DRV_LOG(DEBUG, sc, 9152 "Removing AER indication from path %d/%d/%d", 9153 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9154 tmp->aer = 0; 9155 } 9156 9157 rte_spinlock_unlock(&bnx2x_prev_mtx); 9158 return 0; 9159 } 9160 9161 rte_spinlock_unlock(&bnx2x_prev_mtx); 9162 9163 /* Create an entry for this path and add it */ 9164 tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node), 9165 RTE_CACHE_LINE_SIZE); 9166 if (!tmp) { 9167 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate 'bnx2x_prev_list_node'"); 9168 return -1; 9169 } 9170 9171 tmp->bus = sc->pcie_bus; 9172 tmp->slot = sc->pcie_device; 9173 tmp->path = SC_PATH(sc); 9174 tmp->aer = 0; 9175 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 9176 9177 rte_spinlock_lock(&bnx2x_prev_mtx); 9178 9179 LIST_INSERT_HEAD(&bnx2x_prev_list, tmp, node); 9180 9181 rte_spinlock_unlock(&bnx2x_prev_mtx); 9182 9183 return 0; 9184 } 9185 9186 static int bnx2x_do_flr(struct bnx2x_softc *sc) 9187 { 9188 int i; 9189 9190 /* only E2 and onwards support FLR */ 9191 if (CHIP_IS_E1x(sc)) { 9192 PMD_DRV_LOG(WARNING, sc, "FLR not supported in E1H"); 9193 return -1; 9194 } 9195 9196 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 9197 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 9198 PMD_DRV_LOG(WARNING, sc, 9199 "FLR not supported by BC_VER: 0x%08x", 9200 sc->devinfo.bc_ver); 9201 return -1; 9202 } 9203 9204 /* Wait for Transaction Pending bit clean */ 9205 for (i = 0; i < 4; i++) { 9206 if (i) { 9207 DELAY(((1 << (i - 1)) * 100) * 1000); 9208 } 9209 9210 if (!bnx2x_is_pcie_pending(sc)) { 9211 goto clear; 9212 } 9213 } 9214 9215 PMD_DRV_LOG(NOTICE, sc, "PCIE transaction is not cleared, " 9216 "proceeding with reset anyway"); 9217 9218 clear: 9219 bnx2x_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 9220 9221 return 0; 9222 } 9223 9224 struct bnx2x_mac_vals { 9225 uint32_t xmac_addr; 9226 uint32_t xmac_val; 9227 uint32_t emac_addr; 9228 uint32_t emac_val; 9229 uint32_t umac_addr; 9230 uint32_t umac_val; 9231 uint32_t bmac_addr; 9232 uint32_t bmac_val[2]; 9233 }; 9234 9235 static void 9236 bnx2x_prev_unload_close_mac(struct bnx2x_softc *sc, struct bnx2x_mac_vals *vals) 9237 { 9238 uint32_t val, base_addr, offset, mask, reset_reg; 9239 uint8_t mac_stopped = FALSE; 9240 uint8_t port = SC_PORT(sc); 9241 uint32_t wb_data[2]; 9242 9243 /* reset addresses as they also mark which values were changed */ 9244 vals->bmac_addr = 0; 9245 vals->umac_addr = 0; 9246 vals->xmac_addr = 0; 9247 vals->emac_addr = 0; 9248 9249 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 9250 9251 if (!CHIP_IS_E3(sc)) { 9252 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9253 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9254 if ((mask & reset_reg) && val) { 9255 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 9256 : NIG_REG_INGRESS_BMAC0_MEM; 9257 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 9258 : BIGMAC_REGISTER_BMAC_CONTROL; 9259 9260 /* 9261 * use rd/wr since we cannot use dmae. This is safe 9262 * since MCP won't access the bus due to the request 9263 * to unload, and no function on the path can be 9264 * loaded at this time. 9265 */ 9266 wb_data[0] = REG_RD(sc, base_addr + offset); 9267 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 9268 vals->bmac_addr = base_addr + offset; 9269 vals->bmac_val[0] = wb_data[0]; 9270 vals->bmac_val[1] = wb_data[1]; 9271 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 9272 REG_WR(sc, vals->bmac_addr, wb_data[0]); 9273 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 9274 } 9275 9276 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc) * 4; 9277 vals->emac_val = REG_RD(sc, vals->emac_addr); 9278 REG_WR(sc, vals->emac_addr, 0); 9279 mac_stopped = TRUE; 9280 } else { 9281 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9282 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9283 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 9284 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, 9285 val & ~(1 << 1)); 9286 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, 9287 val | (1 << 1)); 9288 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 9289 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 9290 REG_WR(sc, vals->xmac_addr, 0); 9291 mac_stopped = TRUE; 9292 } 9293 9294 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9295 if (mask & reset_reg) { 9296 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9297 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 9298 vals->umac_val = REG_RD(sc, vals->umac_addr); 9299 REG_WR(sc, vals->umac_addr, 0); 9300 mac_stopped = TRUE; 9301 } 9302 } 9303 9304 if (mac_stopped) { 9305 DELAY(20000); 9306 } 9307 } 9308 9309 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9310 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9311 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9312 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9313 9314 static void 9315 bnx2x_prev_unload_undi_inc(struct bnx2x_softc *sc, uint8_t port, uint8_t inc) 9316 { 9317 uint16_t rcq, bd; 9318 uint32_t tmp_reg = REG_RD(sc, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9319 9320 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9321 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9322 9323 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9324 REG_WR(sc, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9325 } 9326 9327 static int bnx2x_prev_unload_common(struct bnx2x_softc *sc) 9328 { 9329 uint32_t reset_reg, tmp_reg = 0, rc; 9330 uint8_t prev_undi = FALSE; 9331 struct bnx2x_mac_vals mac_vals; 9332 uint32_t timer_count = 1000; 9333 uint32_t prev_brb; 9334 9335 /* 9336 * It is possible a previous function received 'common' answer, 9337 * but hasn't loaded yet, therefore creating a scenario of 9338 * multiple functions receiving 'common' on the same path. 9339 */ 9340 memset(&mac_vals, 0, sizeof(mac_vals)); 9341 9342 if (bnx2x_prev_is_path_marked(sc)) { 9343 return bnx2x_prev_mcp_done(sc); 9344 } 9345 9346 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 9347 9348 /* Reset should be performed after BRB is emptied */ 9349 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 9350 /* Close the MAC Rx to prevent BRB from filling up */ 9351 bnx2x_prev_unload_close_mac(sc, &mac_vals); 9352 9353 /* close LLH filters towards the BRB */ 9354 elink_set_rx_filter(&sc->link_params, 0); 9355 9356 /* 9357 * Check if the UNDI driver was previously loaded. 9358 * UNDI driver initializes CID offset for normal bell to 0x7 9359 */ 9360 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 9361 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 9362 if (tmp_reg == 0x7) { 9363 PMD_DRV_LOG(DEBUG, sc, "UNDI previously loaded"); 9364 prev_undi = TRUE; 9365 /* clear the UNDI indication */ 9366 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 9367 /* clear possible idle check errors */ 9368 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 9369 } 9370 } 9371 9372 /* wait until BRB is empty */ 9373 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 9374 while (timer_count) { 9375 prev_brb = tmp_reg; 9376 9377 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 9378 if (!tmp_reg) { 9379 break; 9380 } 9381 9382 PMD_DRV_LOG(DEBUG, sc, "BRB still has 0x%08x", tmp_reg); 9383 9384 /* reset timer as long as BRB actually gets emptied */ 9385 if (prev_brb > tmp_reg) { 9386 timer_count = 1000; 9387 } else { 9388 timer_count--; 9389 } 9390 9391 /* If UNDI resides in memory, manually increment it */ 9392 if (prev_undi) { 9393 bnx2x_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 9394 } 9395 9396 DELAY(10); 9397 } 9398 9399 if (!timer_count) { 9400 PMD_DRV_LOG(NOTICE, sc, "Failed to empty BRB"); 9401 } 9402 } 9403 9404 /* No packets are in the pipeline, path is ready for reset */ 9405 bnx2x_reset_common(sc); 9406 9407 if (mac_vals.xmac_addr) { 9408 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 9409 } 9410 if (mac_vals.umac_addr) { 9411 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 9412 } 9413 if (mac_vals.emac_addr) { 9414 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 9415 } 9416 if (mac_vals.bmac_addr) { 9417 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 9418 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 9419 } 9420 9421 rc = bnx2x_prev_mark_path(sc, prev_undi); 9422 if (rc) { 9423 bnx2x_prev_mcp_done(sc); 9424 return rc; 9425 } 9426 9427 return bnx2x_prev_mcp_done(sc); 9428 } 9429 9430 static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc) 9431 { 9432 int rc; 9433 9434 /* Test if previous unload process was already finished for this path */ 9435 if (bnx2x_prev_is_path_marked(sc)) { 9436 return bnx2x_prev_mcp_done(sc); 9437 } 9438 9439 /* 9440 * If function has FLR capabilities, and existing FW version matches 9441 * the one required, then FLR will be sufficient to clean any residue 9442 * left by previous driver 9443 */ 9444 rc = bnx2x_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 9445 if (!rc) { 9446 /* fw version is good */ 9447 rc = bnx2x_do_flr(sc); 9448 } 9449 9450 if (!rc) { 9451 /* FLR was performed */ 9452 return 0; 9453 } 9454 9455 PMD_DRV_LOG(INFO, sc, "Could not FLR"); 9456 9457 /* Close the MCP request, return failure */ 9458 rc = bnx2x_prev_mcp_done(sc); 9459 if (!rc) { 9460 rc = BNX2X_PREV_WAIT_NEEDED; 9461 } 9462 9463 return rc; 9464 } 9465 9466 static int bnx2x_prev_unload(struct bnx2x_softc *sc) 9467 { 9468 int time_counter = 10; 9469 uint32_t fw, hw_lock_reg, hw_lock_val; 9470 uint32_t rc = 0; 9471 9472 PMD_INIT_FUNC_TRACE(sc); 9473 9474 /* 9475 * Clear HW from errors which may have resulted from an interrupted 9476 * DMAE transaction. 9477 */ 9478 bnx2x_prev_interrupted_dmae(sc); 9479 9480 /* Release previously held locks */ 9481 hw_lock_reg = (SC_FUNC(sc) <= 5) ? 9482 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 9483 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 9484 9485 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 9486 if (hw_lock_val) { 9487 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 9488 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock"); 9489 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 9490 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 9491 } 9492 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock"); 9493 REG_WR(sc, hw_lock_reg, 0xffffffff); 9494 } 9495 9496 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 9497 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR"); 9498 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 9499 } 9500 9501 do { 9502 /* Lock MCP using an unload request */ 9503 fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 9504 if (!fw) { 9505 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 9506 rc = -1; 9507 break; 9508 } 9509 9510 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 9511 rc = bnx2x_prev_unload_common(sc); 9512 break; 9513 } 9514 9515 /* non-common reply from MCP might require looping */ 9516 rc = bnx2x_prev_unload_uncommon(sc); 9517 if (rc != BNX2X_PREV_WAIT_NEEDED) { 9518 break; 9519 } 9520 9521 DELAY(20000); 9522 } while (--time_counter); 9523 9524 if (!time_counter || rc) { 9525 PMD_DRV_LOG(NOTICE, sc, "Failed to unload previous driver!"); 9526 rc = -1; 9527 } 9528 9529 return rc; 9530 } 9531 9532 static void 9533 bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) 9534 { 9535 if (!CHIP_IS_E1x(sc)) { 9536 sc->dcb_state = dcb_on; 9537 sc->dcbx_enabled = dcbx_enabled; 9538 } else { 9539 sc->dcb_state = FALSE; 9540 sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; 9541 } 9542 PMD_DRV_LOG(DEBUG, sc, 9543 "DCB state [%s:%s]", 9544 dcb_on ? "ON" : "OFF", 9545 (dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" : 9546 (dcbx_enabled == 9547 BNX2X_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" 9548 : (dcbx_enabled == 9549 BNX2X_DCBX_ENABLED_ON_NEG_ON) ? 9550 "on-chip with negotiation" : "invalid"); 9551 } 9552 9553 static int bnx2x_set_qm_cid_count(struct bnx2x_softc *sc) 9554 { 9555 int cid_count = BNX2X_L2_MAX_CID(sc); 9556 9557 if (CNIC_SUPPORT(sc)) { 9558 cid_count += CNIC_CID_MAX; 9559 } 9560 9561 return roundup(cid_count, QM_CID_ROUND); 9562 } 9563 9564 static void bnx2x_init_multi_cos(struct bnx2x_softc *sc) 9565 { 9566 int pri, cos; 9567 9568 uint32_t pri_map = 0; 9569 9570 for (pri = 0; pri < BNX2X_MAX_PRIORITY; pri++) { 9571 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 9572 if (cos < sc->max_cos) { 9573 sc->prio_to_cos[pri] = cos; 9574 } else { 9575 PMD_DRV_LOG(WARNING, sc, 9576 "Invalid COS %d for priority %d " 9577 "(max COS is %d), setting to 0", cos, pri, 9578 (sc->max_cos - 1)); 9579 sc->prio_to_cos[pri] = 0; 9580 } 9581 } 9582 } 9583 9584 static uint8_t bnx2x_pci_capabilities[] = { 9585 RTE_PCI_CAP_ID_EXP, 9586 RTE_PCI_CAP_ID_PM, 9587 RTE_PCI_CAP_ID_MSI, 9588 RTE_PCI_CAP_ID_MSIX, 9589 }; 9590 9591 static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) 9592 { 9593 struct bnx2x_pci_cap *cap; 9594 unsigned int i; 9595 9596 cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap), 9597 RTE_CACHE_LINE_SIZE); 9598 if (!cap) { 9599 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); 9600 return -ENOMEM; 9601 } 9602 9603 if (!rte_pci_has_capability_list(sc->pci_dev)) { 9604 PMD_DRV_LOG(NOTICE, sc, "PCIe capability reading failed"); 9605 return -1; 9606 } 9607 9608 for (i = 0; i < RTE_DIM(bnx2x_pci_capabilities); i++) { 9609 off_t pos = rte_pci_find_capability(sc->pci_dev, 9610 bnx2x_pci_capabilities[i]); 9611 9612 if (pos <= 0) 9613 continue; 9614 9615 cap->id = bnx2x_pci_capabilities[i]; 9616 cap->type = BNX2X_PCI_CAP; 9617 cap->addr = pos; 9618 cap->next = rte_zmalloc("pci_cap", 9619 sizeof(struct bnx2x_pci_cap), 9620 RTE_CACHE_LINE_SIZE); 9621 if (!cap->next) { 9622 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); 9623 return -ENOMEM; 9624 } 9625 cap = cap->next; 9626 } 9627 9628 return 0; 9629 } 9630 9631 static void bnx2x_init_rte(struct bnx2x_softc *sc) 9632 { 9633 if (IS_VF(sc)) { 9634 sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, 9635 sc->igu_sb_cnt); 9636 sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, 9637 sc->igu_sb_cnt); 9638 } else { 9639 sc->max_rx_queues = BNX2X_MAX_RSS_COUNT(sc); 9640 sc->max_tx_queues = sc->max_rx_queues; 9641 } 9642 } 9643 9644 #define FW_HEADER_LEN 104 9645 #define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.13.11.0.fw" 9646 #define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.13.11.0.fw" 9647 9648 void bnx2x_load_firmware(struct bnx2x_softc *sc) 9649 { 9650 const char *fwname; 9651 void *buf; 9652 size_t bufsz; 9653 9654 fwname = sc->devinfo.device_id == CHIP_NUM_57711 9655 ? FW_NAME_57711 : FW_NAME_57810; 9656 if (rte_firmware_read(fwname, &buf, &bufsz) != 0) { 9657 PMD_DRV_LOG(NOTICE, sc, "Can't open firmware file"); 9658 return; 9659 } 9660 9661 sc->firmware = rte_zmalloc("bnx2x_fw", bufsz, RTE_CACHE_LINE_SIZE); 9662 if (!sc->firmware) { 9663 PMD_DRV_LOG(NOTICE, sc, "Can't allocate memory for firmware"); 9664 goto out; 9665 } 9666 9667 sc->fw_len = bufsz; 9668 if (sc->fw_len < FW_HEADER_LEN) { 9669 PMD_DRV_LOG(NOTICE, sc, 9670 "Invalid fw size: %" PRIu64, sc->fw_len); 9671 goto out; 9672 } 9673 9674 memcpy(sc->firmware, buf, sc->fw_len); 9675 PMD_DRV_LOG(DEBUG, sc, "fw_len = %" PRIu64, sc->fw_len); 9676 out: 9677 free(buf); 9678 } 9679 9680 static void 9681 bnx2x_data_to_init_ops(uint8_t * data, struct raw_op *dst, uint32_t len) 9682 { 9683 uint32_t *src = (uint32_t *) data; 9684 uint32_t i, j, tmp; 9685 9686 for (i = 0, j = 0; i < len / 8; ++i, j += 2) { 9687 tmp = rte_be_to_cpu_32(src[j]); 9688 dst[i].op = (tmp >> 24) & 0xFF; 9689 dst[i].offset = tmp & 0xFFFFFF; 9690 dst[i].raw_data = rte_be_to_cpu_32(src[j + 1]); 9691 } 9692 } 9693 9694 static void 9695 bnx2x_data_to_init_offsets(uint8_t * data, uint16_t * dst, uint32_t len) 9696 { 9697 uint16_t *src = (uint16_t *) data; 9698 uint32_t i; 9699 9700 for (i = 0; i < len / 2; ++i) 9701 dst[i] = rte_be_to_cpu_16(src[i]); 9702 } 9703 9704 static void bnx2x_data_to_init_data(uint8_t * data, uint32_t * dst, uint32_t len) 9705 { 9706 uint32_t *src = (uint32_t *) data; 9707 uint32_t i; 9708 9709 for (i = 0; i < len / 4; ++i) 9710 dst[i] = rte_be_to_cpu_32(src[i]); 9711 } 9712 9713 static void bnx2x_data_to_iro_array(uint8_t * data, struct iro *dst, uint32_t len) 9714 { 9715 uint32_t *src = (uint32_t *) data; 9716 uint32_t i, j, tmp; 9717 9718 for (i = 0, j = 0; i < len / sizeof(struct iro); ++i, ++j) { 9719 dst[i].base = rte_be_to_cpu_32(src[j++]); 9720 tmp = rte_be_to_cpu_32(src[j]); 9721 dst[i].m1 = (tmp >> 16) & 0xFFFF; 9722 dst[i].m2 = tmp & 0xFFFF; 9723 ++j; 9724 tmp = rte_be_to_cpu_32(src[j]); 9725 dst[i].m3 = (tmp >> 16) & 0xFFFF; 9726 dst[i].size = tmp & 0xFFFF; 9727 } 9728 } 9729 9730 /* 9731 * Device attach function. 9732 * 9733 * Allocates device resources, performs secondary chip identification, and 9734 * initializes driver instance variables. This function is called from driver 9735 * load after a successful probe. 9736 * 9737 * Returns: 9738 * 0 = Success, >0 = Failure 9739 */ 9740 int bnx2x_attach(struct bnx2x_softc *sc) 9741 { 9742 int rc; 9743 9744 PMD_DRV_LOG(DEBUG, sc, "Starting attach..."); 9745 9746 rc = bnx2x_pci_get_caps(sc); 9747 if (rc) { 9748 PMD_DRV_LOG(NOTICE, sc, "PCIe caps reading was failed"); 9749 return rc; 9750 } 9751 9752 sc->state = BNX2X_STATE_CLOSED; 9753 9754 pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); 9755 9756 sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 9757 9758 /* get PCI capabilities */ 9759 bnx2x_probe_pci_caps(sc); 9760 9761 if (sc->devinfo.pcie_msix_cap_reg != 0) { 9762 uint32_t val; 9763 pci_read(sc, 9764 (sc->devinfo.pcie_msix_cap_reg + RTE_PCI_MSIX_FLAGS), &val, 9765 2); 9766 sc->igu_sb_cnt = (val & RTE_PCI_MSIX_FLAGS_QSIZE) + 1; 9767 } else { 9768 sc->igu_sb_cnt = 1; 9769 } 9770 9771 /* Init RTE stuff */ 9772 bnx2x_init_rte(sc); 9773 9774 if (IS_PF(sc)) { 9775 /* Enable internal target-read (in case we are probed after PF 9776 * FLR). Must be done prior to any BAR read access. Only for 9777 * 57712 and up 9778 */ 9779 if (!CHIP_IS_E1x(sc)) { 9780 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 9781 1); 9782 DELAY(200000); 9783 } 9784 9785 /* get device info and set params */ 9786 if (bnx2x_get_device_info(sc) != 0) { 9787 PMD_DRV_LOG(NOTICE, sc, "getting device info"); 9788 return -ENXIO; 9789 } 9790 9791 /* get phy settings from shmem and 'and' against admin settings */ 9792 bnx2x_get_phy_info(sc); 9793 } else { 9794 /* Left mac of VF unfilled, PF should set it for VF */ 9795 memset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN); 9796 } 9797 9798 sc->wol = 0; 9799 9800 /* set the default MTU (changed via ifconfig) */ 9801 sc->mtu = RTE_ETHER_MTU; 9802 9803 bnx2x_set_modes_bitmap(sc); 9804 9805 /* need to reset chip if UNDI was active */ 9806 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 9807 /* init fw_seq */ 9808 sc->fw_seq = 9809 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 9810 DRV_MSG_SEQ_NUMBER_MASK); 9811 PMD_DRV_LOG(DEBUG, sc, "prev unload fw_seq 0x%04x", 9812 sc->fw_seq); 9813 bnx2x_prev_unload(sc); 9814 } 9815 9816 bnx2x_dcbx_set_state(sc, FALSE, BNX2X_DCBX_ENABLED_OFF); 9817 9818 /* calculate qm_cid_count */ 9819 sc->qm_cid_count = bnx2x_set_qm_cid_count(sc); 9820 9821 sc->max_cos = 1; 9822 bnx2x_init_multi_cos(sc); 9823 9824 return 0; 9825 } 9826 9827 static void 9828 bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment, 9829 uint16_t index, uint8_t op, uint8_t update) 9830 { 9831 uint32_t igu_addr = sc->igu_base_addr; 9832 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 9833 bnx2x_igu_ack_sb_gen(sc, segment, index, op, update, igu_addr); 9834 } 9835 9836 static void 9837 bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, 9838 uint16_t index, uint8_t op, uint8_t update) 9839 { 9840 if (unlikely(sc->devinfo.int_block == INT_BLOCK_HC)) 9841 bnx2x_hc_ack_sb(sc, igu_sb_id, storm, index, op, update); 9842 else { 9843 uint8_t segment; 9844 if (CHIP_INT_MODE_IS_BC(sc)) { 9845 segment = storm; 9846 } else if (igu_sb_id != sc->igu_dsb_id) { 9847 segment = IGU_SEG_ACCESS_DEF; 9848 } else if (storm == ATTENTION_ID) { 9849 segment = IGU_SEG_ACCESS_ATTN; 9850 } else { 9851 segment = IGU_SEG_ACCESS_DEF; 9852 } 9853 bnx2x_igu_ack_sb(sc, igu_sb_id, segment, index, op, update); 9854 } 9855 } 9856 9857 static void 9858 bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id, 9859 uint8_t is_pf) 9860 { 9861 uint32_t data, ctl, cnt = 100; 9862 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 9863 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 9864 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + 9865 (idu_sb_id / 32) * 4; 9866 uint32_t sb_bit = 1 << (idu_sb_id % 32); 9867 uint32_t func_encode = func | 9868 (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 9869 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 9870 9871 /* Not supported in BC mode */ 9872 if (CHIP_INT_MODE_IS_BC(sc)) { 9873 return; 9874 } 9875 9876 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 9877 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 9878 IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); 9879 9880 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 9881 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 9882 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 9883 9884 REG_WR(sc, igu_addr_data, data); 9885 9886 mb(); 9887 9888 PMD_DRV_LOG(DEBUG, sc, "write 0x%08x to IGU(via GRC) addr 0x%x", 9889 ctl, igu_addr_ctl); 9890 REG_WR(sc, igu_addr_ctl, ctl); 9891 9892 mb(); 9893 9894 /* wait for clean up to finish */ 9895 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 9896 DELAY(20000); 9897 } 9898 9899 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 9900 PMD_DRV_LOG(DEBUG, sc, 9901 "Unable to finish IGU cleanup: " 9902 "idu_sb_id %d offset %d bit %d (cnt %d)", 9903 idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt); 9904 } 9905 } 9906 9907 static void bnx2x_igu_clear_sb(struct bnx2x_softc *sc, uint8_t idu_sb_id) 9908 { 9909 bnx2x_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 9910 } 9911 9912 /*******************/ 9913 /* ECORE CALLBACKS */ 9914 /*******************/ 9915 9916 static void bnx2x_reset_common(struct bnx2x_softc *sc) 9917 { 9918 uint32_t val = 0x1400; 9919 9920 PMD_INIT_FUNC_TRACE(sc); 9921 9922 /* reset_common */ 9923 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 9924 0xd3ffff7f); 9925 9926 if (CHIP_IS_E3(sc)) { 9927 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 9928 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 9929 } 9930 9931 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 9932 } 9933 9934 static void bnx2x_common_init_phy(struct bnx2x_softc *sc) 9935 { 9936 uint32_t shmem_base[2]; 9937 uint32_t shmem2_base[2]; 9938 9939 /* Avoid common init in case MFW supports LFA */ 9940 if (SHMEM2_RD(sc, size) > 9941 (uint32_t) offsetof(struct shmem2_region, 9942 lfa_host_addr[SC_PORT(sc)])) { 9943 return; 9944 } 9945 9946 shmem_base[0] = sc->devinfo.shmem_base; 9947 shmem2_base[0] = sc->devinfo.shmem2_base; 9948 9949 if (!CHIP_IS_E1x(sc)) { 9950 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 9951 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 9952 } 9953 9954 bnx2x_acquire_phy_lock(sc); 9955 elink_common_init_phy(sc, shmem_base, shmem2_base, 9956 sc->devinfo.chip_id, 0); 9957 bnx2x_release_phy_lock(sc); 9958 } 9959 9960 static void bnx2x_pf_disable(struct bnx2x_softc *sc) 9961 { 9962 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 9963 9964 val &= ~IGU_PF_CONF_FUNC_EN; 9965 9966 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 9967 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 9968 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 9969 } 9970 9971 static void bnx2x_init_pxp(struct bnx2x_softc *sc) 9972 { 9973 uint16_t devctl; 9974 int r_order, w_order; 9975 9976 devctl = bnx2x_pcie_capability_read(sc, RTE_PCI_EXP_DEVCTL); 9977 9978 w_order = ((devctl & RTE_PCI_EXP_DEVCTL_PAYLOAD) >> 5); 9979 r_order = ((devctl & RTE_PCI_EXP_DEVCTL_READRQ) >> 12); 9980 9981 ecore_init_pxp_arb(sc, r_order, w_order); 9982 } 9983 9984 static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc *sc) 9985 { 9986 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 9987 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 9988 return base + (SC_ABS_FUNC(sc)) * stride; 9989 } 9990 9991 /* 9992 * Called only on E1H or E2. 9993 * When pretending to be PF, the pretend value is the function number 0..7. 9994 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 9995 * combination. 9996 */ 9997 static int bnx2x_pretend_func(struct bnx2x_softc *sc, uint16_t pretend_func_val) 9998 { 9999 uint32_t pretend_reg; 10000 10001 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) 10002 return -1; 10003 10004 /* get my own pretend register */ 10005 pretend_reg = bnx2x_get_pretend_reg(sc); 10006 REG_WR(sc, pretend_reg, pretend_func_val); 10007 REG_RD(sc, pretend_reg); 10008 return 0; 10009 } 10010 10011 static void bnx2x_setup_fan_failure_detection(struct bnx2x_softc *sc) 10012 { 10013 int is_required; 10014 uint32_t val; 10015 int port; 10016 10017 is_required = 0; 10018 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 10019 SHARED_HW_CFG_FAN_FAILURE_MASK); 10020 10021 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 10022 is_required = 1; 10023 } 10024 /* 10025 * The fan failure mechanism is usually related to the PHY type since 10026 * the power consumption of the board is affected by the PHY. Currently, 10027 * fan is required for most designs with SFX7101, BNX2X8727 and BNX2X8481. 10028 */ 10029 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 10030 for (port = PORT_0; port < PORT_MAX; port++) { 10031 is_required |= elink_fan_failure_det_req(sc, 10032 sc-> 10033 devinfo.shmem_base, 10034 sc-> 10035 devinfo.shmem2_base, 10036 port); 10037 } 10038 } 10039 10040 if (is_required == 0) { 10041 return; 10042 } 10043 10044 /* Fan failure is indicated by SPIO 5 */ 10045 bnx2x_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 10046 10047 /* set to active low mode */ 10048 val = REG_RD(sc, MISC_REG_SPIO_INT); 10049 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 10050 REG_WR(sc, MISC_REG_SPIO_INT, val); 10051 10052 /* enable interrupt to signal the IGU */ 10053 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 10054 val |= MISC_SPIO_SPIO5; 10055 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 10056 } 10057 10058 static void bnx2x_enable_blocks_attention(struct bnx2x_softc *sc) 10059 { 10060 uint32_t val; 10061 10062 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 10063 if (!CHIP_IS_E1x(sc)) { 10064 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 10065 } else { 10066 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 10067 } 10068 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 10069 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 10070 /* 10071 * mask read length error interrupts in brb for parser 10072 * (parsing unit and 'checksum and crc' unit) 10073 * these errors are legal (PU reads fixed length and CAC can cause 10074 * read length error on truncated packets) 10075 */ 10076 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 10077 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 10078 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 10079 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 10080 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 10081 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 10082 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 10083 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 10084 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 10085 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 10086 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 10087 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 10088 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 10089 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 10090 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 10091 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 10092 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 10093 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 10094 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 10095 10096 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 10097 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 10098 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 10099 if (!CHIP_IS_E1x(sc)) { 10100 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 10101 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 10102 } 10103 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 10104 10105 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 10106 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 10107 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 10108 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 10109 10110 if (!CHIP_IS_E1x(sc)) { 10111 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 10112 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 10113 } 10114 10115 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 10116 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 10117 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 10118 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 10119 } 10120 10121 /** 10122 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 10123 * 10124 * @sc: driver handle 10125 */ 10126 static int bnx2x_init_hw_common(struct bnx2x_softc *sc) 10127 { 10128 uint8_t abs_func_id; 10129 uint32_t val; 10130 10131 PMD_DRV_LOG(DEBUG, sc, 10132 "starting common init for func %d", SC_ABS_FUNC(sc)); 10133 10134 /* 10135 * take the RESET lock to protect undi_unload flow from accessing 10136 * registers while we are resetting the chip 10137 */ 10138 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 10139 10140 bnx2x_reset_common(sc); 10141 10142 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 10143 10144 val = 0xfffc; 10145 if (CHIP_IS_E3(sc)) { 10146 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 10147 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 10148 } 10149 10150 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 10151 10152 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 10153 10154 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 10155 10156 if (!CHIP_IS_E1x(sc)) { 10157 /* 10158 * 4-port mode or 2-port mode we need to turn off master-enable for 10159 * everyone. After that we turn it back on for self. So, we disregard 10160 * multi-function, and always disable all functions on the given path, 10161 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 10162 */ 10163 for (abs_func_id = SC_PATH(sc); 10164 abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { 10165 if (abs_func_id == SC_ABS_FUNC(sc)) { 10166 REG_WR(sc, 10167 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 10168 1); 10169 continue; 10170 } 10171 10172 bnx2x_pretend_func(sc, abs_func_id); 10173 10174 /* clear pf enable */ 10175 bnx2x_pf_disable(sc); 10176 10177 bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); 10178 } 10179 } 10180 10181 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 10182 10183 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 10184 bnx2x_init_pxp(sc); 10185 10186 #ifdef __BIG_ENDIAN 10187 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 10188 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 10189 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 10190 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 10191 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 10192 /* make sure this value is 0 */ 10193 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 10194 10195 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 10196 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 10197 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 10198 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 10199 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 10200 #endif 10201 10202 ecore_ilt_init_page_size(sc, INITOP_SET); 10203 10204 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 10205 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 10206 } 10207 10208 /* let the HW do it's magic... */ 10209 DELAY(100000); 10210 10211 /* finish PXP init */ 10212 10213 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 10214 if (val != 1) { 10215 PMD_DRV_LOG(NOTICE, sc, "PXP2 CFG failed"); 10216 return -1; 10217 } 10218 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 10219 if (val != 1) { 10220 PMD_DRV_LOG(NOTICE, sc, "PXP2 RD_INIT failed"); 10221 return -1; 10222 } 10223 10224 /* 10225 * Timer bug workaround for E2 only. We need to set the entire ILT to have 10226 * entries with value "0" and valid bit on. This needs to be done by the 10227 * first PF that is loaded in a path (i.e. common phase) 10228 */ 10229 if (!CHIP_IS_E1x(sc)) { 10230 /* 10231 * In E2 there is a bug in the timers block that can cause function 6 / 7 10232 * (i.e. vnic3) to start even if it is marked as "scan-off". 10233 * This occurs when a different function (func2,3) is being marked 10234 * as "scan-off". Real-life scenario for example: if a driver is being 10235 * load-unloaded while func6,7 are down. This will cause the timer to access 10236 * the ilt, translate to a logical address and send a request to read/write. 10237 * Since the ilt for the function that is down is not valid, this will cause 10238 * a translation error which is unrecoverable. 10239 * The Workaround is intended to make sure that when this happens nothing 10240 * fatal will occur. The workaround: 10241 * 1. First PF driver which loads on a path will: 10242 * a. After taking the chip out of reset, by using pretend, 10243 * it will write "0" to the following registers of 10244 * the other vnics. 10245 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10246 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 10247 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 10248 * And for itself it will write '1' to 10249 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 10250 * dmae-operations (writing to pram for example.) 10251 * note: can be done for only function 6,7 but cleaner this 10252 * way. 10253 * b. Write zero+valid to the entire ILT. 10254 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 10255 * VNIC3 (of that port). The range allocated will be the 10256 * entire ILT. This is needed to prevent ILT range error. 10257 * 2. Any PF driver load flow: 10258 * a. ILT update with the physical addresses of the allocated 10259 * logical pages. 10260 * b. Wait 20msec. - note that this timeout is needed to make 10261 * sure there are no requests in one of the PXP internal 10262 * queues with "old" ILT addresses. 10263 * c. PF enable in the PGLC. 10264 * d. Clear the was_error of the PF in the PGLC. (could have 10265 * occurred while driver was down) 10266 * e. PF enable in the CFC (WEAK + STRONG) 10267 * f. Timers scan enable 10268 * 3. PF driver unload flow: 10269 * a. Clear the Timers scan_en. 10270 * b. Polling for scan_on=0 for that PF. 10271 * c. Clear the PF enable bit in the PXP. 10272 * d. Clear the PF enable in the CFC (WEAK + STRONG) 10273 * e. Write zero+valid to all ILT entries (The valid bit must 10274 * stay set) 10275 * f. If this is VNIC 3 of a port then also init 10276 * first_timers_ilt_entry to zero and last_timers_ilt_entry 10277 * to the last entry in the ILT. 10278 * 10279 * Notes: 10280 * Currently the PF error in the PGLC is non recoverable. 10281 * In the future the there will be a recovery routine for this error. 10282 * Currently attention is masked. 10283 * Having an MCP lock on the load/unload process does not guarantee that 10284 * there is no Timer disable during Func6/7 enable. This is because the 10285 * Timers scan is currently being cleared by the MCP on FLR. 10286 * Step 2.d can be done only for PF6/7 and the driver can also check if 10287 * there is error before clearing it. But the flow above is simpler and 10288 * more general. 10289 * All ILT entries are written by zero+valid and not just PF6/7 10290 * ILT entries since in the future the ILT entries allocation for 10291 * PF-s might be dynamic. 10292 */ 10293 struct ilt_client_info ilt_cli; 10294 struct ecore_ilt ilt; 10295 10296 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 10297 memset(&ilt, 0, sizeof(struct ecore_ilt)); 10298 10299 /* initialize dummy TM client */ 10300 ilt_cli.start = 0; 10301 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 10302 ilt_cli.client_num = ILT_CLIENT_TM; 10303 10304 /* 10305 * Step 1: set zeroes to all ilt page entries with valid bit on 10306 * Step 2: set the timers first/last ilt entry to point 10307 * to the entire range to prevent ILT range error for 3rd/4th 10308 * vnic (this code assumes existence of the vnic) 10309 * 10310 * both steps performed by call to ecore_ilt_client_init_op() 10311 * with dummy TM client 10312 * 10313 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 10314 * and his brother are split registers 10315 */ 10316 10317 bnx2x_pretend_func(sc, (SC_PATH(sc) + 6)); 10318 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 10319 bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); 10320 10321 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 10322 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 10323 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 10324 } 10325 10326 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 10327 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 10328 10329 if (!CHIP_IS_E1x(sc)) { 10330 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 10331 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 10332 10333 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 10334 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 10335 10336 /* let the HW do it's magic... */ 10337 do { 10338 DELAY(200000); 10339 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 10340 } while (factor-- && (val != 1)); 10341 10342 if (val != 1) { 10343 PMD_DRV_LOG(NOTICE, sc, "ATC_INIT failed"); 10344 return -1; 10345 } 10346 } 10347 10348 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 10349 10350 /* clean the DMAE memory */ 10351 sc->dmae_ready = 1; 10352 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 10353 10354 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 10355 10356 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 10357 10358 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 10359 10360 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 10361 10362 bnx2x_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 10363 bnx2x_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 10364 bnx2x_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 10365 bnx2x_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 10366 10367 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 10368 10369 /* QM queues pointers table */ 10370 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 10371 10372 /* soft reset pulse */ 10373 REG_WR(sc, QM_REG_SOFT_RESET, 1); 10374 REG_WR(sc, QM_REG_SOFT_RESET, 0); 10375 10376 if (CNIC_SUPPORT(sc)) 10377 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 10378 10379 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 10380 10381 if (!CHIP_REV_IS_SLOW(sc)) { 10382 /* enable hw interrupt from doorbell Q */ 10383 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 10384 } 10385 10386 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 10387 10388 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 10389 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 10390 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 10391 10392 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 10393 if (IS_MF_AFEX(sc)) { 10394 /* 10395 * configure that AFEX and VLAN headers must be 10396 * received in AFEX mode 10397 */ 10398 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 10399 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 10400 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 10401 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 10402 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 10403 } else { 10404 /* 10405 * Bit-map indicating which L2 hdrs may appear 10406 * after the basic Ethernet header 10407 */ 10408 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 10409 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 10410 } 10411 } 10412 10413 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 10414 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 10415 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 10416 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 10417 10418 if (!CHIP_IS_E1x(sc)) { 10419 /* reset VFC memories */ 10420 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 10421 VFC_MEMORIES_RST_REG_CAM_RST | 10422 VFC_MEMORIES_RST_REG_RAM_RST); 10423 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 10424 VFC_MEMORIES_RST_REG_CAM_RST | 10425 VFC_MEMORIES_RST_REG_RAM_RST); 10426 10427 DELAY(20000); 10428 } 10429 10430 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 10431 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 10432 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 10433 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 10434 10435 /* sync semi rtc */ 10436 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); 10437 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); 10438 10439 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 10440 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 10441 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 10442 10443 if (!CHIP_IS_E1x(sc)) { 10444 if (IS_MF_AFEX(sc)) { 10445 /* 10446 * configure that AFEX and VLAN headers must be 10447 * sent in AFEX mode 10448 */ 10449 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 10450 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 10451 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 10452 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 10453 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 10454 } else { 10455 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 10456 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 10457 } 10458 } 10459 10460 REG_WR(sc, SRC_REG_SOFT_RST, 1); 10461 10462 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 10463 10464 if (CNIC_SUPPORT(sc)) { 10465 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 10466 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 10467 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 10468 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 10469 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 10470 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 10471 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 10472 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 10473 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 10474 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 10475 } 10476 REG_WR(sc, SRC_REG_SOFT_RST, 0); 10477 10478 if (sizeof(union cdu_context) != 1024) { 10479 /* we currently assume that a context is 1024 bytes */ 10480 PMD_DRV_LOG(NOTICE, sc, 10481 "please adjust the size of cdu_context(%ld)", 10482 (long)sizeof(union cdu_context)); 10483 } 10484 10485 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 10486 val = (4 << 24) + (0 << 12) + 1024; 10487 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 10488 10489 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 10490 10491 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 10492 /* enable context validation interrupt from CFC */ 10493 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 10494 10495 /* set the thresholds to prevent CFC/CDU race */ 10496 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 10497 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 10498 10499 if (!CHIP_IS_E1x(sc) && BNX2X_NOMCP(sc)) { 10500 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 10501 } 10502 10503 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 10504 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 10505 10506 /* Reset PCIE errors for debug */ 10507 REG_WR(sc, 0x2814, 0xffffffff); 10508 REG_WR(sc, 0x3820, 0xffffffff); 10509 10510 if (!CHIP_IS_E1x(sc)) { 10511 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 10512 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 10513 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 10514 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 10515 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 10516 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 10517 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 10518 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 10519 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 10520 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 10521 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 10522 } 10523 10524 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 10525 10526 /* in E3 this done in per-port section */ 10527 if (!CHIP_IS_E3(sc)) 10528 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 10529 10530 if (CHIP_IS_E1H(sc)) { 10531 /* not applicable for E2 (and above ...) */ 10532 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 10533 } 10534 10535 if (CHIP_REV_IS_SLOW(sc)) { 10536 DELAY(200000); 10537 } 10538 10539 /* finish CFC init */ 10540 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 10541 if (val != 1) { 10542 PMD_DRV_LOG(NOTICE, sc, "CFC LL_INIT failed"); 10543 return -1; 10544 } 10545 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 10546 if (val != 1) { 10547 PMD_DRV_LOG(NOTICE, sc, "CFC AC_INIT failed"); 10548 return -1; 10549 } 10550 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 10551 if (val != 1) { 10552 PMD_DRV_LOG(NOTICE, sc, "CFC CAM_INIT failed"); 10553 return -1; 10554 } 10555 REG_WR(sc, CFC_REG_DEBUG0, 0); 10556 10557 bnx2x_setup_fan_failure_detection(sc); 10558 10559 /* clear PXP2 attentions */ 10560 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 10561 10562 bnx2x_enable_blocks_attention(sc); 10563 10564 if (!CHIP_REV_IS_SLOW(sc)) { 10565 ecore_enable_blocks_parity(sc); 10566 } 10567 10568 if (!BNX2X_NOMCP(sc)) { 10569 if (CHIP_IS_E1x(sc)) { 10570 bnx2x_common_init_phy(sc); 10571 } 10572 } 10573 10574 return 0; 10575 } 10576 10577 /** 10578 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 10579 * 10580 * @sc: driver handle 10581 */ 10582 static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc) 10583 { 10584 int rc = bnx2x_init_hw_common(sc); 10585 10586 if (rc) { 10587 return rc; 10588 } 10589 10590 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 10591 if (!BNX2X_NOMCP(sc)) { 10592 bnx2x_common_init_phy(sc); 10593 } 10594 10595 return 0; 10596 } 10597 10598 static int bnx2x_init_hw_port(struct bnx2x_softc *sc) 10599 { 10600 int port = SC_PORT(sc); 10601 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 10602 uint32_t low, high; 10603 uint32_t val; 10604 10605 PMD_DRV_LOG(DEBUG, sc, "starting port init for port %d", port); 10606 10607 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); 10608 10609 ecore_init_block(sc, BLOCK_MISC, init_phase); 10610 ecore_init_block(sc, BLOCK_PXP, init_phase); 10611 ecore_init_block(sc, BLOCK_PXP2, init_phase); 10612 10613 /* 10614 * Timers bug workaround: disables the pf_master bit in pglue at 10615 * common phase, we need to enable it here before any dmae access are 10616 * attempted. Therefore we manually added the enable-master to the 10617 * port phase (it also happens in the function phase) 10618 */ 10619 if (!CHIP_IS_E1x(sc)) { 10620 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 10621 } 10622 10623 ecore_init_block(sc, BLOCK_ATC, init_phase); 10624 ecore_init_block(sc, BLOCK_DMAE, init_phase); 10625 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 10626 ecore_init_block(sc, BLOCK_QM, init_phase); 10627 10628 ecore_init_block(sc, BLOCK_TCM, init_phase); 10629 ecore_init_block(sc, BLOCK_UCM, init_phase); 10630 ecore_init_block(sc, BLOCK_CCM, init_phase); 10631 ecore_init_block(sc, BLOCK_XCM, init_phase); 10632 10633 /* QM cid (connection) count */ 10634 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 10635 10636 if (CNIC_SUPPORT(sc)) { 10637 ecore_init_block(sc, BLOCK_TM, init_phase); 10638 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port * 4, 20); 10639 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port * 4, 31); 10640 } 10641 10642 ecore_init_block(sc, BLOCK_DORQ, init_phase); 10643 10644 ecore_init_block(sc, BLOCK_BRB1, init_phase); 10645 10646 if (CHIP_IS_E1H(sc)) { 10647 if (IS_MF(sc)) { 10648 low = (BNX2X_ONE_PORT(sc) ? 160 : 246); 10649 } else if (sc->mtu > 4096) { 10650 if (BNX2X_ONE_PORT(sc)) { 10651 low = 160; 10652 } else { 10653 val = sc->mtu; 10654 /* (24*1024 + val*4)/256 */ 10655 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 10656 } 10657 } else { 10658 low = (BNX2X_ONE_PORT(sc) ? 80 : 160); 10659 } 10660 high = (low + 56); /* 14*1024/256 */ 10661 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low); 10662 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high); 10663 } 10664 10665 if (CHIP_IS_MODE_4_PORT(sc)) { 10666 REG_WR(sc, SC_PORT(sc) ? 10667 BRB1_REG_MAC_GUARANTIED_1 : 10668 BRB1_REG_MAC_GUARANTIED_0, 40); 10669 } 10670 10671 ecore_init_block(sc, BLOCK_PRS, init_phase); 10672 if (CHIP_IS_E3B0(sc)) { 10673 if (IS_MF_AFEX(sc)) { 10674 /* configure headers for AFEX mode */ 10675 if (SC_PORT(sc)) { 10676 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_1, 10677 0xE); 10678 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_1, 10679 0x6); 10680 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_1, 0xA); 10681 } else { 10682 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_0, 10683 0xE); 10684 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 10685 0x6); 10686 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 10687 } 10688 } else { 10689 /* Ovlan exists only if we are in multi-function + 10690 * switch-dependent mode, in switch-independent there 10691 * is no ovlan headers 10692 */ 10693 REG_WR(sc, SC_PORT(sc) ? 10694 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 10695 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 10696 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 10697 } 10698 } 10699 10700 ecore_init_block(sc, BLOCK_TSDM, init_phase); 10701 ecore_init_block(sc, BLOCK_CSDM, init_phase); 10702 ecore_init_block(sc, BLOCK_USDM, init_phase); 10703 ecore_init_block(sc, BLOCK_XSDM, init_phase); 10704 10705 ecore_init_block(sc, BLOCK_TSEM, init_phase); 10706 ecore_init_block(sc, BLOCK_USEM, init_phase); 10707 ecore_init_block(sc, BLOCK_CSEM, init_phase); 10708 ecore_init_block(sc, BLOCK_XSEM, init_phase); 10709 10710 ecore_init_block(sc, BLOCK_UPB, init_phase); 10711 ecore_init_block(sc, BLOCK_XPB, init_phase); 10712 10713 ecore_init_block(sc, BLOCK_PBF, init_phase); 10714 10715 if (CHIP_IS_E1x(sc)) { 10716 /* configure PBF to work without PAUSE mtu 9000 */ 10717 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0); 10718 10719 /* update threshold */ 10720 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040 / 16)); 10721 /* update init credit */ 10722 REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, 10723 (9040 / 16) + 553 - 22); 10724 10725 /* probe changes */ 10726 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1); 10727 DELAY(50); 10728 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0); 10729 } 10730 10731 if (CNIC_SUPPORT(sc)) { 10732 ecore_init_block(sc, BLOCK_SRC, init_phase); 10733 } 10734 10735 ecore_init_block(sc, BLOCK_CDU, init_phase); 10736 ecore_init_block(sc, BLOCK_CFC, init_phase); 10737 ecore_init_block(sc, BLOCK_HC, init_phase); 10738 ecore_init_block(sc, BLOCK_IGU, init_phase); 10739 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 10740 /* init aeu_mask_attn_func_0/1: 10741 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 10742 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 10743 * bits 4-7 are used for "per vn group attention" */ 10744 val = IS_MF(sc) ? 0xF7 : 0x7; 10745 val |= 0x10; 10746 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, val); 10747 10748 ecore_init_block(sc, BLOCK_NIG, init_phase); 10749 10750 if (!CHIP_IS_E1x(sc)) { 10751 /* Bit-map indicating which L2 hdrs may appear after the 10752 * basic Ethernet header 10753 */ 10754 if (IS_MF_AFEX(sc)) { 10755 REG_WR(sc, SC_PORT(sc) ? 10756 NIG_REG_P1_HDRS_AFTER_BASIC : 10757 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 10758 } else { 10759 REG_WR(sc, SC_PORT(sc) ? 10760 NIG_REG_P1_HDRS_AFTER_BASIC : 10761 NIG_REG_P0_HDRS_AFTER_BASIC, 10762 IS_MF_SD(sc) ? 7 : 6); 10763 } 10764 10765 if (CHIP_IS_E3(sc)) { 10766 REG_WR(sc, SC_PORT(sc) ? 10767 NIG_REG_LLH1_MF_MODE : 10768 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 10769 } 10770 } 10771 if (!CHIP_IS_E3(sc)) { 10772 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); 10773 } 10774 10775 /* 0x2 disable mf_ov, 0x1 enable */ 10776 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4, 10777 (IS_MF_SD(sc) ? 0x1 : 0x2)); 10778 10779 if (!CHIP_IS_E1x(sc)) { 10780 val = 0; 10781 switch (sc->devinfo.mf_info.mf_mode) { 10782 case MULTI_FUNCTION_SD: 10783 val = 1; 10784 break; 10785 case MULTI_FUNCTION_SI: 10786 case MULTI_FUNCTION_AFEX: 10787 val = 2; 10788 break; 10789 } 10790 10791 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 10792 NIG_REG_LLH0_CLS_TYPE), val); 10793 } 10794 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0); 10795 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0); 10796 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1); 10797 10798 /* If SPIO5 is set to generate interrupts, enable it for this port */ 10799 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 10800 if (val & MISC_SPIO_SPIO5) { 10801 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10802 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 10803 val = REG_RD(sc, reg_addr); 10804 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 10805 REG_WR(sc, reg_addr, val); 10806 } 10807 10808 return 0; 10809 } 10810 10811 static uint32_t 10812 bnx2x_flr_clnup_reg_poll(struct bnx2x_softc *sc, uint32_t reg, 10813 uint32_t expected, uint32_t poll_count) 10814 { 10815 uint32_t cur_cnt = poll_count; 10816 uint32_t val; 10817 10818 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 10819 DELAY(FLR_WAIT_INTERVAL); 10820 } 10821 10822 return val; 10823 } 10824 10825 static int 10826 bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg, 10827 __rte_unused const char *msg, uint32_t poll_cnt) 10828 { 10829 uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 10830 10831 if (val != 0) { 10832 PMD_DRV_LOG(NOTICE, sc, "%s usage count=%d", msg, val); 10833 return -1; 10834 } 10835 10836 return 0; 10837 } 10838 10839 /* Common routines with VF FLR cleanup */ 10840 static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc *sc) 10841 { 10842 /* adjust polling timeout */ 10843 if (CHIP_REV_IS_EMUL(sc)) { 10844 return FLR_POLL_CNT * 2000; 10845 } 10846 10847 if (CHIP_REV_IS_FPGA(sc)) { 10848 return FLR_POLL_CNT * 120; 10849 } 10850 10851 return FLR_POLL_CNT; 10852 } 10853 10854 static int bnx2x_poll_hw_usage_counters(struct bnx2x_softc *sc, uint32_t poll_cnt) 10855 { 10856 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 10857 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10858 CFC_REG_NUM_LCIDS_INSIDE_PF, 10859 "CFC PF usage counter timed out", 10860 poll_cnt)) { 10861 return -1; 10862 } 10863 10864 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 10865 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10866 DORQ_REG_PF_USAGE_CNT, 10867 "DQ PF usage counter timed out", 10868 poll_cnt)) { 10869 return -1; 10870 } 10871 10872 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 10873 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10874 QM_REG_PF_USG_CNT_0 + 4 * SC_FUNC(sc), 10875 "QM PF usage counter timed out", 10876 poll_cnt)) { 10877 return -1; 10878 } 10879 10880 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 10881 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10882 TM_REG_LIN0_VNIC_UC + 4 * SC_PORT(sc), 10883 "Timers VNIC usage counter timed out", 10884 poll_cnt)) { 10885 return -1; 10886 } 10887 10888 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10889 TM_REG_LIN0_NUM_SCANS + 10890 4 * SC_PORT(sc), 10891 "Timers NUM_SCANS usage counter timed out", 10892 poll_cnt)) { 10893 return -1; 10894 } 10895 10896 /* Wait DMAE PF usage counter to zero */ 10897 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10898 dmae_reg_go_c[INIT_DMAE_C(sc)], 10899 "DMAE dommand register timed out", 10900 poll_cnt)) { 10901 return -1; 10902 } 10903 10904 return 0; 10905 } 10906 10907 #define OP_GEN_PARAM(param) \ 10908 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 10909 #define OP_GEN_TYPE(type) \ 10910 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 10911 #define OP_GEN_AGG_VECT(index) \ 10912 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 10913 10914 static int 10915 bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func, 10916 uint32_t poll_cnt) 10917 { 10918 uint32_t op_gen_command = 0; 10919 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 10920 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 10921 int ret = 0; 10922 10923 if (REG_RD(sc, comp_addr)) { 10924 PMD_DRV_LOG(NOTICE, sc, 10925 "Cleanup complete was not 0 before sending"); 10926 return -1; 10927 } 10928 10929 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 10930 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 10931 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 10932 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 10933 10934 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 10935 10936 if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 10937 PMD_DRV_LOG(NOTICE, sc, "FW final cleanup did not succeed"); 10938 PMD_DRV_LOG(DEBUG, sc, "At timeout completion address contained %x", 10939 (REG_RD(sc, comp_addr))); 10940 rte_panic("FLR cleanup failed"); 10941 return -1; 10942 } 10943 10944 /* Zero completion for nxt FLR */ 10945 REG_WR(sc, comp_addr, 0); 10946 10947 return ret; 10948 } 10949 10950 static void 10951 bnx2x_pbf_pN_buf_flushed(struct bnx2x_softc *sc, struct pbf_pN_buf_regs *regs, 10952 uint32_t poll_count) 10953 { 10954 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 10955 uint32_t cur_cnt = poll_count; 10956 10957 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 10958 crd = crd_start = REG_RD(sc, regs->crd); 10959 init_crd = REG_RD(sc, regs->init_crd); 10960 10961 while ((crd != init_crd) && 10962 ((uint32_t) ((int32_t) crd_freed - (int32_t) crd_freed_start) < 10963 (init_crd - crd_start))) { 10964 if (cur_cnt--) { 10965 DELAY(FLR_WAIT_INTERVAL); 10966 crd = REG_RD(sc, regs->crd); 10967 crd_freed = REG_RD(sc, regs->crd_freed); 10968 } else { 10969 break; 10970 } 10971 } 10972 } 10973 10974 static void 10975 bnx2x_pbf_pN_cmd_flushed(struct bnx2x_softc *sc, struct pbf_pN_cmd_regs *regs, 10976 uint32_t poll_count) 10977 { 10978 uint32_t occup, to_free, freed, freed_start; 10979 uint32_t cur_cnt = poll_count; 10980 10981 occup = to_free = REG_RD(sc, regs->lines_occup); 10982 freed = freed_start = REG_RD(sc, regs->lines_freed); 10983 10984 while (occup && 10985 ((uint32_t) ((int32_t) freed - (int32_t) freed_start) < 10986 to_free)) { 10987 if (cur_cnt--) { 10988 DELAY(FLR_WAIT_INTERVAL); 10989 occup = REG_RD(sc, regs->lines_occup); 10990 freed = REG_RD(sc, regs->lines_freed); 10991 } else { 10992 break; 10993 } 10994 } 10995 } 10996 10997 static void bnx2x_tx_hw_flushed(struct bnx2x_softc *sc, uint32_t poll_count) 10998 { 10999 struct pbf_pN_cmd_regs cmd_regs[] = { 11000 {0, (CHIP_IS_E3B0(sc)) ? 11001 PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, 11002 (CHIP_IS_E3B0(sc)) ? 11003 PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, 11004 {1, (CHIP_IS_E3B0(sc)) ? 11005 PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, 11006 (CHIP_IS_E3B0(sc)) ? 11007 PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, 11008 {4, (CHIP_IS_E3B0(sc)) ? 11009 PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, 11010 (CHIP_IS_E3B0(sc)) ? 11011 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 11012 PBF_REG_P4_TQ_LINES_FREED_CNT} 11013 }; 11014 11015 struct pbf_pN_buf_regs buf_regs[] = { 11016 {0, (CHIP_IS_E3B0(sc)) ? 11017 PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD, 11018 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, 11019 (CHIP_IS_E3B0(sc)) ? 11020 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 11021 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 11022 {1, (CHIP_IS_E3B0(sc)) ? 11023 PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, 11024 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, 11025 (CHIP_IS_E3B0(sc)) ? 11026 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 11027 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 11028 {4, (CHIP_IS_E3B0(sc)) ? 11029 PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, 11030 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, 11031 (CHIP_IS_E3B0(sc)) ? 11032 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 11033 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 11034 }; 11035 11036 uint32_t i; 11037 11038 /* Verify the command queues are flushed P0, P1, P4 */ 11039 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 11040 bnx2x_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 11041 } 11042 11043 /* Verify the transmission buffers are flushed P0, P1, P4 */ 11044 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 11045 bnx2x_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 11046 } 11047 } 11048 11049 static void bnx2x_hw_enable_status(struct bnx2x_softc *sc) 11050 { 11051 __rte_unused uint32_t val; 11052 11053 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 11054 PMD_DRV_LOG(DEBUG, sc, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val); 11055 11056 val = REG_RD(sc, PBF_REG_DISABLE_PF); 11057 PMD_DRV_LOG(DEBUG, sc, "PBF_REG_DISABLE_PF is 0x%x", val); 11058 11059 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 11060 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val); 11061 11062 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 11063 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val); 11064 11065 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 11066 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val); 11067 11068 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 11069 PMD_DRV_LOG(DEBUG, sc, 11070 "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val); 11071 11072 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 11073 PMD_DRV_LOG(DEBUG, sc, 11074 "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val); 11075 11076 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 11077 PMD_DRV_LOG(DEBUG, sc, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x", 11078 val); 11079 } 11080 11081 /** 11082 * bnx2x_pf_flr_clnup 11083 * a. re-enable target read on the PF 11084 * b. poll cfc per function usage counter 11085 * c. poll the qm perfunction usage counter 11086 * d. poll the tm per function usage counter 11087 * e. poll the tm per function scan-done indication 11088 * f. clear the dmae channel associated wit hthe PF 11089 * g. zero the igu 'trailing edge' and 'leading edge' regs (attentions) 11090 * h. call the common flr cleanup code with -1 (pf indication) 11091 */ 11092 static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc) 11093 { 11094 uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(sc); 11095 11096 /* Re-enable PF target read access */ 11097 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 11098 11099 /* Poll HW usage counters */ 11100 if (bnx2x_poll_hw_usage_counters(sc, poll_cnt)) { 11101 return -1; 11102 } 11103 11104 /* Zero the igu 'trailing edge' and 'leading edge' */ 11105 11106 /* Send the FW cleanup command */ 11107 if (bnx2x_send_final_clnup(sc, (uint8_t) SC_FUNC(sc), poll_cnt)) { 11108 return -1; 11109 } 11110 11111 /* ATC cleanup */ 11112 11113 /* Verify TX hw is flushed */ 11114 bnx2x_tx_hw_flushed(sc, poll_cnt); 11115 11116 /* Wait 100ms (not adjusted according to platform) */ 11117 DELAY(100000); 11118 11119 /* Verify no pending pci transactions */ 11120 if (bnx2x_is_pcie_pending(sc)) { 11121 PMD_DRV_LOG(NOTICE, sc, "PCIE Transactions still pending"); 11122 } 11123 11124 /* Debug */ 11125 bnx2x_hw_enable_status(sc); 11126 11127 /* 11128 * Master enable - Due to WB DMAE writes performed before this 11129 * register is re-initialized as part of the regular function init 11130 */ 11131 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 11132 11133 return 0; 11134 } 11135 11136 static int bnx2x_init_hw_func(struct bnx2x_softc *sc) 11137 { 11138 int port = SC_PORT(sc); 11139 int func = SC_FUNC(sc); 11140 int init_phase = PHASE_PF0 + func; 11141 struct ecore_ilt *ilt = sc->ilt; 11142 uint16_t cdu_ilt_start; 11143 uint32_t addr, val; 11144 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 11145 int main_mem_width, rc; 11146 uint32_t i; 11147 11148 PMD_DRV_LOG(DEBUG, sc, "starting func init for func %d", func); 11149 11150 /* FLR cleanup */ 11151 if (!CHIP_IS_E1x(sc)) { 11152 rc = bnx2x_pf_flr_clnup(sc); 11153 if (rc) { 11154 PMD_DRV_LOG(NOTICE, sc, "FLR cleanup failed!"); 11155 return rc; 11156 } 11157 } 11158 11159 /* set MSI reconfigure capability */ 11160 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11161 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 11162 val = REG_RD(sc, addr); 11163 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 11164 REG_WR(sc, addr, val); 11165 } 11166 11167 ecore_init_block(sc, BLOCK_PXP, init_phase); 11168 ecore_init_block(sc, BLOCK_PXP2, init_phase); 11169 11170 ilt = sc->ilt; 11171 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 11172 11173 for (i = 0; i < L2_ILT_LINES(sc); i++) { 11174 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 11175 ilt->lines[cdu_ilt_start + i].page_mapping = 11176 (rte_iova_t)sc->context[i].vcxt_dma.paddr; 11177 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 11178 } 11179 ecore_ilt_init_op(sc, INITOP_SET); 11180 11181 REG_WR(sc, PRS_REG_NIC_MODE, 1); 11182 11183 if (!CHIP_IS_E1x(sc)) { 11184 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 11185 11186 /* Turn on a single ISR mode in IGU if driver is going to use 11187 * INT#x or MSI 11188 */ 11189 if (sc->interrupt_mode == INTR_MODE_INTX || 11190 sc->interrupt_mode == INTR_MODE_MSI) 11191 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 11192 /* 11193 * Timers workaround bug: function init part. 11194 * Need to wait 20msec after initializing ILT, 11195 * needed to make sure there are no requests in 11196 * one of the PXP internal queues with "old" ILT addresses 11197 */ 11198 DELAY(20000); 11199 11200 /* 11201 * Master enable - Due to WB DMAE writes performed before this 11202 * register is re-initialized as part of the regular function 11203 * init 11204 */ 11205 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 11206 /* Enable the function in IGU */ 11207 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 11208 } 11209 11210 sc->dmae_ready = 1; 11211 11212 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 11213 11214 if (!CHIP_IS_E1x(sc)) 11215 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 11216 11217 ecore_init_block(sc, BLOCK_ATC, init_phase); 11218 ecore_init_block(sc, BLOCK_DMAE, init_phase); 11219 ecore_init_block(sc, BLOCK_NIG, init_phase); 11220 ecore_init_block(sc, BLOCK_SRC, init_phase); 11221 ecore_init_block(sc, BLOCK_MISC, init_phase); 11222 ecore_init_block(sc, BLOCK_TCM, init_phase); 11223 ecore_init_block(sc, BLOCK_UCM, init_phase); 11224 ecore_init_block(sc, BLOCK_CCM, init_phase); 11225 ecore_init_block(sc, BLOCK_XCM, init_phase); 11226 ecore_init_block(sc, BLOCK_TSEM, init_phase); 11227 ecore_init_block(sc, BLOCK_USEM, init_phase); 11228 ecore_init_block(sc, BLOCK_CSEM, init_phase); 11229 ecore_init_block(sc, BLOCK_XSEM, init_phase); 11230 11231 if (!CHIP_IS_E1x(sc)) 11232 REG_WR(sc, QM_REG_PF_EN, 1); 11233 11234 if (!CHIP_IS_E1x(sc)) { 11235 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11236 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11237 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11238 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11239 } 11240 ecore_init_block(sc, BLOCK_QM, init_phase); 11241 11242 ecore_init_block(sc, BLOCK_TM, init_phase); 11243 ecore_init_block(sc, BLOCK_DORQ, init_phase); 11244 11245 ecore_init_block(sc, BLOCK_BRB1, init_phase); 11246 ecore_init_block(sc, BLOCK_PRS, init_phase); 11247 ecore_init_block(sc, BLOCK_TSDM, init_phase); 11248 ecore_init_block(sc, BLOCK_CSDM, init_phase); 11249 ecore_init_block(sc, BLOCK_USDM, init_phase); 11250 ecore_init_block(sc, BLOCK_XSDM, init_phase); 11251 ecore_init_block(sc, BLOCK_UPB, init_phase); 11252 ecore_init_block(sc, BLOCK_XPB, init_phase); 11253 ecore_init_block(sc, BLOCK_PBF, init_phase); 11254 if (!CHIP_IS_E1x(sc)) 11255 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 11256 11257 ecore_init_block(sc, BLOCK_CDU, init_phase); 11258 11259 ecore_init_block(sc, BLOCK_CFC, init_phase); 11260 11261 if (!CHIP_IS_E1x(sc)) 11262 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 11263 11264 if (IS_MF(sc)) { 11265 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 11266 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, OVLAN(sc)); 11267 } 11268 11269 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 11270 11271 /* HC init per function */ 11272 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11273 if (CHIP_IS_E1H(sc)) { 11274 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 11275 11276 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); 11277 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); 11278 } 11279 ecore_init_block(sc, BLOCK_HC, init_phase); 11280 11281 } else { 11282 uint32_t num_segs, sb_idx, prod_offset; 11283 11284 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 11285 11286 if (!CHIP_IS_E1x(sc)) { 11287 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 11288 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 11289 } 11290 11291 ecore_init_block(sc, BLOCK_IGU, init_phase); 11292 11293 if (!CHIP_IS_E1x(sc)) { 11294 int dsb_idx = 0; 11295 /** 11296 * Producer memory: 11297 * E2 mode: address 0-135 match to the mapping memory; 11298 * 136 - PF0 default prod; 137 - PF1 default prod; 11299 * 138 - PF2 default prod; 139 - PF3 default prod; 11300 * 140 - PF0 attn prod; 141 - PF1 attn prod; 11301 * 142 - PF2 attn prod; 143 - PF3 attn prod; 11302 * 144-147 reserved. 11303 * 11304 * E1.5 mode - In backward compatible mode; 11305 * for non default SB; each even line in the memory 11306 * holds the U producer and each odd line hold 11307 * the C producer. The first 128 producers are for 11308 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 11309 * producers are for the DSB for each PF. 11310 * Each PF has five segments: (the order inside each 11311 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 11312 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 11313 * 144-147 attn prods; 11314 */ 11315 /* non-default-status-blocks */ 11316 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 11317 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 11318 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 11319 prod_offset = (sc->igu_base_sb + sb_idx) * 11320 num_segs; 11321 11322 for (i = 0; i < num_segs; i++) { 11323 addr = IGU_REG_PROD_CONS_MEMORY + 11324 (prod_offset + i) * 4; 11325 REG_WR(sc, addr, 0); 11326 } 11327 /* send consumer update with value 0 */ 11328 bnx2x_ack_sb(sc, sc->igu_base_sb + sb_idx, 11329 USTORM_ID, 0, IGU_INT_NOP, 1); 11330 bnx2x_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 11331 } 11332 11333 /* default-status-blocks */ 11334 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 11335 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 11336 11337 if (CHIP_IS_MODE_4_PORT(sc)) 11338 dsb_idx = SC_FUNC(sc); 11339 else 11340 dsb_idx = SC_VN(sc); 11341 11342 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 11343 IGU_BC_BASE_DSB_PROD + dsb_idx : 11344 IGU_NORM_BASE_DSB_PROD + dsb_idx); 11345 11346 /* 11347 * igu prods come in chunks of E1HVN_MAX (4) - 11348 * does not matters what is the current chip mode 11349 */ 11350 for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { 11351 addr = IGU_REG_PROD_CONS_MEMORY + 11352 (prod_offset + i) * 4; 11353 REG_WR(sc, addr, 0); 11354 } 11355 /* send consumer update with 0 */ 11356 if (CHIP_INT_MODE_IS_BC(sc)) { 11357 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11358 USTORM_ID, 0, IGU_INT_NOP, 1); 11359 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11360 CSTORM_ID, 0, IGU_INT_NOP, 1); 11361 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11362 XSTORM_ID, 0, IGU_INT_NOP, 1); 11363 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11364 TSTORM_ID, 0, IGU_INT_NOP, 1); 11365 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11366 ATTENTION_ID, 0, IGU_INT_NOP, 1); 11367 } else { 11368 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11369 USTORM_ID, 0, IGU_INT_NOP, 1); 11370 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11371 ATTENTION_ID, 0, IGU_INT_NOP, 1); 11372 } 11373 bnx2x_igu_clear_sb(sc, sc->igu_dsb_id); 11374 11375 /* !!! these should become driver const once 11376 rf-tool supports split-68 const */ 11377 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 11378 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 11379 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 11380 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 11381 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 11382 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 11383 } 11384 } 11385 11386 /* Reset PCIE errors for debug */ 11387 REG_WR(sc, 0x2114, 0xffffffff); 11388 REG_WR(sc, 0x2120, 0xffffffff); 11389 11390 if (CHIP_IS_E1x(sc)) { 11391 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords */ 11392 main_mem_base = HC_REG_MAIN_MEMORY + 11393 SC_PORT(sc) * (main_mem_size * 4); 11394 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 11395 main_mem_width = 8; 11396 11397 val = REG_RD(sc, main_mem_prty_clr); 11398 if (val) { 11399 PMD_DRV_LOG(DEBUG, sc, 11400 "Parity errors in HC block during function init (0x%x)!", 11401 val); 11402 } 11403 11404 /* Clear "false" parity errors in MSI-X table */ 11405 for (i = main_mem_base; 11406 i < main_mem_base + main_mem_size * 4; 11407 i += main_mem_width) { 11408 bnx2x_read_dmae(sc, i, main_mem_width / 4); 11409 bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), 11410 i, main_mem_width / 4); 11411 } 11412 /* Clear HC parity attention */ 11413 REG_RD(sc, main_mem_prty_clr); 11414 } 11415 11416 /* Enable STORMs SP logging */ 11417 REG_WR8(sc, BAR_USTRORM_INTMEM + 11418 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11419 REG_WR8(sc, BAR_TSTRORM_INTMEM + 11420 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11421 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11422 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11423 REG_WR8(sc, BAR_XSTRORM_INTMEM + 11424 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11425 11426 elink_phy_probe(&sc->link_params); 11427 11428 return 0; 11429 } 11430 11431 static void bnx2x_link_reset(struct bnx2x_softc *sc) 11432 { 11433 if (!BNX2X_NOMCP(sc)) { 11434 bnx2x_acquire_phy_lock(sc); 11435 elink_lfa_reset(&sc->link_params, &sc->link_vars); 11436 bnx2x_release_phy_lock(sc); 11437 } else { 11438 if (!CHIP_REV_IS_SLOW(sc)) { 11439 PMD_DRV_LOG(WARNING, sc, 11440 "Bootcode is missing - cannot reset link"); 11441 } 11442 } 11443 } 11444 11445 static void bnx2x_reset_port(struct bnx2x_softc *sc) 11446 { 11447 int port = SC_PORT(sc); 11448 uint32_t val; 11449 11450 /* reset physical Link */ 11451 bnx2x_link_reset(sc); 11452 11453 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); 11454 11455 /* Do not rcv packets to BRB */ 11456 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0); 11457 /* Do not direct rcv packets that are not for MCP to the BRB */ 11458 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 11459 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 11460 11461 /* Configure AEU */ 11462 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0); 11463 11464 DELAY(100000); 11465 11466 /* Check for BRB port occupancy */ 11467 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4); 11468 if (val) { 11469 PMD_DRV_LOG(DEBUG, sc, 11470 "BRB1 is not empty, %d blocks are occupied", val); 11471 } 11472 } 11473 11474 static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr) 11475 { 11476 int reg; 11477 uint32_t wb_write[2]; 11478 11479 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8; 11480 11481 wb_write[0] = ONCHIP_ADDR1(addr); 11482 wb_write[1] = ONCHIP_ADDR2(addr); 11483 REG_WR_DMAE(sc, reg, wb_write, 2); 11484 } 11485 11486 static void bnx2x_clear_func_ilt(struct bnx2x_softc *sc, uint32_t func) 11487 { 11488 uint32_t i, base = FUNC_ILT_BASE(func); 11489 for (i = base; i < base + ILT_PER_FUNC; i++) { 11490 bnx2x_ilt_wr(sc, i, 0); 11491 } 11492 } 11493 11494 static void bnx2x_reset_func(struct bnx2x_softc *sc) 11495 { 11496 struct bnx2x_fastpath *fp; 11497 int port = SC_PORT(sc); 11498 int func = SC_FUNC(sc); 11499 int i; 11500 11501 /* Disable the function in the FW */ 11502 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 11503 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 11504 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 11505 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 11506 11507 /* FP SBs */ 11508 FOR_EACH_ETH_QUEUE(sc, i) { 11509 fp = &sc->fp[i]; 11510 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11511 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 11512 SB_DISABLED); 11513 } 11514 11515 /* SP SB */ 11516 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11517 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); 11518 11519 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 11520 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 11521 0); 11522 } 11523 11524 /* Configure IGU */ 11525 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11526 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); 11527 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); 11528 } else { 11529 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 11530 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 11531 } 11532 11533 if (CNIC_LOADED(sc)) { 11534 /* Disable Timer scan */ 11535 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port * 4, 0); 11536 /* 11537 * Wait for at least 10ms and up to 2 second for the timers 11538 * scan to complete 11539 */ 11540 for (i = 0; i < 200; i++) { 11541 DELAY(10000); 11542 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port * 4)) 11543 break; 11544 } 11545 } 11546 11547 /* Clear ILT */ 11548 bnx2x_clear_func_ilt(sc, func); 11549 11550 /* 11551 * Timers workaround bug for E2: if this is vnic-3, 11552 * we need to set the entire ilt range for this timers. 11553 */ 11554 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 11555 struct ilt_client_info ilt_cli; 11556 /* use dummy TM client */ 11557 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 11558 ilt_cli.start = 0; 11559 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 11560 ilt_cli.client_num = ILT_CLIENT_TM; 11561 11562 ecore_ilt_boundary_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 11563 } 11564 11565 /* this assumes that reset_port() called before reset_func() */ 11566 if (!CHIP_IS_E1x(sc)) { 11567 bnx2x_pf_disable(sc); 11568 } 11569 11570 sc->dmae_ready = 0; 11571 } 11572 11573 static void bnx2x_release_firmware(struct bnx2x_softc *sc) 11574 { 11575 rte_free(sc->init_ops); 11576 rte_free(sc->init_ops_offsets); 11577 rte_free(sc->init_data); 11578 rte_free(sc->iro_array); 11579 } 11580 11581 static int bnx2x_init_firmware(struct bnx2x_softc *sc) 11582 { 11583 uint32_t len, i; 11584 uint8_t *p = sc->firmware; 11585 uint32_t off[24]; 11586 11587 for (i = 0; i < 24; ++i) 11588 off[i] = rte_be_to_cpu_32(*((uint32_t *) sc->firmware + i)); 11589 11590 len = off[0]; 11591 sc->init_ops = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11592 if (!sc->init_ops) 11593 goto alloc_failed; 11594 bnx2x_data_to_init_ops(p + off[1], sc->init_ops, len); 11595 11596 len = off[2]; 11597 sc->init_ops_offsets = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11598 if (!sc->init_ops_offsets) 11599 goto alloc_failed; 11600 bnx2x_data_to_init_offsets(p + off[3], sc->init_ops_offsets, len); 11601 11602 len = off[4]; 11603 sc->init_data = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11604 if (!sc->init_data) 11605 goto alloc_failed; 11606 bnx2x_data_to_init_data(p + off[5], sc->init_data, len); 11607 11608 sc->tsem_int_table_data = p + off[7]; 11609 sc->tsem_pram_data = p + off[9]; 11610 sc->usem_int_table_data = p + off[11]; 11611 sc->usem_pram_data = p + off[13]; 11612 sc->csem_int_table_data = p + off[15]; 11613 sc->csem_pram_data = p + off[17]; 11614 sc->xsem_int_table_data = p + off[19]; 11615 sc->xsem_pram_data = p + off[21]; 11616 11617 len = off[22]; 11618 sc->iro_array = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11619 if (!sc->iro_array) 11620 goto alloc_failed; 11621 bnx2x_data_to_iro_array(p + off[23], sc->iro_array, len); 11622 11623 return 0; 11624 11625 alloc_failed: 11626 bnx2x_release_firmware(sc); 11627 return -1; 11628 } 11629 11630 static int cut_gzip_prefix(const uint8_t * zbuf, int len) 11631 { 11632 #define MIN_PREFIX_SIZE (10) 11633 11634 int n = MIN_PREFIX_SIZE; 11635 uint16_t xlen; 11636 11637 if (!(zbuf[0] == 0x1f && zbuf[1] == 0x8b && zbuf[2] == Z_DEFLATED) || 11638 len <= MIN_PREFIX_SIZE) { 11639 return -1; 11640 } 11641 11642 /* optional extra fields are present */ 11643 if (zbuf[3] & 0x4) { 11644 xlen = zbuf[13]; 11645 xlen <<= 8; 11646 xlen += zbuf[12]; 11647 11648 n += xlen; 11649 } 11650 /* file name is present */ 11651 if (zbuf[3] & 0x8) { 11652 while ((zbuf[n++] != 0) && (n < len)) ; 11653 } 11654 11655 return n; 11656 } 11657 11658 static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len) 11659 { 11660 int ret; 11661 int data_begin = cut_gzip_prefix(zbuf, len); 11662 11663 PMD_DRV_LOG(DEBUG, sc, "ecore_gunzip %d", len); 11664 11665 if (data_begin <= 0) { 11666 PMD_DRV_LOG(NOTICE, sc, "bad gzip prefix"); 11667 return -1; 11668 } 11669 11670 memset(&zlib_stream, 0, sizeof(zlib_stream)); 11671 zlib_stream.next_in = zbuf + data_begin; 11672 zlib_stream.avail_in = len - data_begin; 11673 zlib_stream.next_out = sc->gz_buf; 11674 zlib_stream.avail_out = FW_BUF_SIZE; 11675 11676 ret = inflateInit2(&zlib_stream, -MAX_WBITS); 11677 if (ret != Z_OK) { 11678 PMD_DRV_LOG(NOTICE, sc, "zlib inflateInit2 error"); 11679 return ret; 11680 } 11681 11682 ret = inflate(&zlib_stream, Z_FINISH); 11683 if ((ret != Z_STREAM_END) && (ret != Z_OK)) { 11684 PMD_DRV_LOG(NOTICE, sc, "zlib inflate error: %d %s", ret, 11685 zlib_stream.msg); 11686 } 11687 11688 sc->gz_outlen = zlib_stream.total_out; 11689 if (sc->gz_outlen & 0x3) { 11690 PMD_DRV_LOG(NOTICE, sc, "firmware is not aligned. gz_outlen == %d", 11691 sc->gz_outlen); 11692 } 11693 sc->gz_outlen >>= 2; 11694 11695 inflateEnd(&zlib_stream); 11696 11697 if (ret == Z_STREAM_END) 11698 return 0; 11699 11700 return ret; 11701 } 11702 11703 static void 11704 ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, 11705 uint32_t addr, uint32_t len) 11706 { 11707 bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len); 11708 } 11709 11710 void 11711 ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, size_t size, 11712 uint32_t * data) 11713 { 11714 uint8_t i; 11715 for (i = 0; i < size / 4; i++) { 11716 REG_WR(sc, addr + (i * 4), data[i]); 11717 } 11718 } 11719 11720 static const char *get_ext_phy_type(uint32_t ext_phy_type) 11721 { 11722 uint32_t phy_type_idx = ext_phy_type >> 8; 11723 static const char *types[] = 11724 { "DIRECT", "BNX2X-8071", "BNX2X-8072", "BNX2X-8073", 11725 "BNX2X-8705", "BNX2X-8706", "BNX2X-8726", "BNX2X-8481", "SFX-7101", 11726 "BNX2X-8727", 11727 "BNX2X-8727-NOC", "BNX2X-84823", "NOT_CONN", "FAILURE" 11728 }; 11729 11730 if (phy_type_idx < 12) 11731 return types[phy_type_idx]; 11732 else if (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN == ext_phy_type) 11733 return types[12]; 11734 else 11735 return types[13]; 11736 } 11737 11738 static const char *get_state(uint32_t state) 11739 { 11740 uint32_t state_idx = state >> 12; 11741 static const char *states[] = { "CLOSED", "OPENING_WAIT4_LOAD", 11742 "OPENING_WAIT4_PORT", "OPEN", "CLOSING_WAIT4_HALT", 11743 "CLOSING_WAIT4_DELETE", "CLOSING_WAIT4_UNLOAD", 11744 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", 11745 "UNKNOWN", "DISABLED", "DIAG", "ERROR", "UNDEFINED" 11746 }; 11747 11748 if (state_idx <= 0xF) 11749 return states[state_idx]; 11750 else 11751 return states[0x10]; 11752 } 11753 11754 static const char *get_recovery_state(uint32_t state) 11755 { 11756 static const char *states[] = { "NONE", "DONE", "INIT", 11757 "WAIT", "FAILED", "NIC_LOADING" 11758 }; 11759 return states[state]; 11760 } 11761 11762 static const char *get_rx_mode(uint32_t mode) 11763 { 11764 static const char *modes[] = { "NONE", "NORMAL", "ALLMULTI", 11765 "PROMISC", "MAX_MULTICAST", "ERROR" 11766 }; 11767 11768 if (mode < 0x4) 11769 return modes[mode]; 11770 else if (BNX2X_MAX_MULTICAST == mode) 11771 return modes[4]; 11772 else 11773 return modes[5]; 11774 } 11775 11776 #define BNX2X_INFO_STR_MAX 256 11777 static const char *get_bnx2x_flags(uint32_t flags) 11778 { 11779 int i; 11780 static const char *flag[] = { "ONE_PORT ", "NO_ISCSI ", 11781 "NO_FCOE ", "NO_WOL ", "USING_DAC ", "USING_MSIX ", 11782 "USING_MSI ", "DISABLE_MSI ", "UNKNOWN ", "NO_MCP ", 11783 "SAFC_TX_FLAG ", "MF_FUNC_DIS ", "TX_SWITCHING " 11784 }; 11785 static char flag_str[BNX2X_INFO_STR_MAX]; 11786 memset(flag_str, 0, BNX2X_INFO_STR_MAX); 11787 11788 for (i = 0; i < 5; i++) 11789 if (flags & (1 << i)) { 11790 strlcat(flag_str, flag[i], sizeof(flag_str)); 11791 flags ^= (1 << i); 11792 } 11793 if (flags) { 11794 static char unknown[BNX2X_INFO_STR_MAX]; 11795 snprintf(unknown, 32, "Unknown flag mask %x", flags); 11796 strlcat(flag_str, unknown, sizeof(flag_str)); 11797 } 11798 return flag_str; 11799 } 11800 11801 /* Prints useful adapter info. */ 11802 void bnx2x_print_adapter_info(struct bnx2x_softc *sc) 11803 { 11804 int i = 0; 11805 11806 PMD_DRV_LOG(INFO, sc, "========================================"); 11807 /* DPDK and Driver versions */ 11808 PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK", 11809 rte_version()); 11810 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver", 11811 bnx2x_pmd_version()); 11812 /* Firmware versions. */ 11813 PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d", 11814 "Firmware", 11815 BNX2X_5710_FW_MAJOR_VERSION, 11816 BNX2X_5710_FW_MINOR_VERSION, 11817 BNX2X_5710_FW_REVISION_VERSION); 11818 PMD_DRV_LOG(INFO, sc, "%12s : %s", 11819 "Bootcode", sc->devinfo.bc_ver_str); 11820 /* Hardware chip info. */ 11821 PMD_DRV_LOG(INFO, sc, "%12s : %#08x", "ASIC", sc->devinfo.chip_id); 11822 PMD_DRV_LOG(INFO, sc, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A', 11823 (CHIP_METAL(sc) >> 4)); 11824 /* Bus PCIe info. */ 11825 PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Vendor Id", 11826 sc->devinfo.vendor_id); 11827 PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Device Id", 11828 sc->devinfo.device_id); 11829 PMD_DRV_LOG(INFO, sc, "%12s : width x%d, ", "Bus PCIe", 11830 sc->devinfo.pcie_link_width); 11831 switch (sc->devinfo.pcie_link_speed) { 11832 case 1: 11833 PMD_DRV_LOG(INFO, sc, "%23s", "2.5 Gbps"); 11834 break; 11835 case 2: 11836 PMD_DRV_LOG(INFO, sc, "%21s", "5 Gbps"); 11837 break; 11838 case 4: 11839 PMD_DRV_LOG(INFO, sc, "%21s", "8 Gbps"); 11840 break; 11841 default: 11842 PMD_DRV_LOG(INFO, sc, "%33s", "Unknown link speed"); 11843 } 11844 /* Device features. */ 11845 PMD_DRV_LOG(INFO, sc, "%12s : ", "Flags"); 11846 /* Miscellaneous flags. */ 11847 if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) { 11848 PMD_DRV_LOG(INFO, sc, "%18s", "MSI"); 11849 i++; 11850 } 11851 if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) { 11852 if (i > 0) 11853 PMD_DRV_LOG(INFO, sc, "|"); 11854 PMD_DRV_LOG(INFO, sc, "%20s", "MSI-X"); 11855 i++; 11856 } 11857 PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO")); 11858 PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO")); 11859 PMD_DRV_LOG(INFO, sc, "========================================"); 11860 } 11861 11862 /* Prints useful device info. */ 11863 void bnx2x_print_device_info(struct bnx2x_softc *sc) 11864 { 11865 __rte_unused uint32_t ext_phy_type; 11866 uint32_t offset, reg_val; 11867 11868 PMD_INIT_FUNC_TRACE(sc); 11869 offset = offsetof(struct shmem_region, 11870 dev_info.port_hw_config[0].external_phy_config); 11871 reg_val = REG_RD(sc, sc->devinfo.shmem_base + offset); 11872 if (sc->link_vars.phy_flags & PHY_XGXS_FLAG) 11873 ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(reg_val); 11874 else 11875 ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(reg_val); 11876 11877 /* Device features. */ 11878 PMD_DRV_LOG(INFO, sc, "%12s : %u", "Bnx2x Func", sc->pcie_func); 11879 PMD_DRV_LOG(INFO, sc, 11880 "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags)); 11881 PMD_DRV_LOG(INFO, sc, "%12s : %s", "DMAE Is", 11882 (sc->dmae_ready ? "Ready" : "Not Ready")); 11883 PMD_DRV_LOG(INFO, sc, "%12s : %u", "MTU", sc->mtu); 11884 PMD_DRV_LOG(INFO, sc, 11885 "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type)); 11886 PMD_DRV_LOG(INFO, sc, "%12s : " RTE_ETHER_ADDR_PRT_FMT, "MAC Addr", 11887 sc->link_params.mac_addr[0], 11888 sc->link_params.mac_addr[1], 11889 sc->link_params.mac_addr[2], 11890 sc->link_params.mac_addr[3], 11891 sc->link_params.mac_addr[4], 11892 sc->link_params.mac_addr[5]); 11893 PMD_DRV_LOG(INFO, sc, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode)); 11894 PMD_DRV_LOG(INFO, sc, "%12s : %s", "State", get_state(sc->state)); 11895 if (sc->recovery_state) 11896 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Recovery", 11897 get_recovery_state(sc->recovery_state)); 11898 /* Queue info. */ 11899 if (IS_PF(sc)) { 11900 switch (sc->sp->rss_rdata.rss_mode) { 11901 case ETH_RSS_MODE_DISABLED: 11902 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - None"); 11903 break; 11904 case ETH_RSS_MODE_REGULAR: 11905 PMD_DRV_LOG(INFO, sc, "%12s : %s,", "Queues", "RSS mode - Regular"); 11906 PMD_DRV_LOG(INFO, sc, "%16d", sc->num_queues); 11907 break; 11908 default: 11909 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - Unknown"); 11910 break; 11911 } 11912 } 11913 PMD_DRV_LOG(INFO, sc, "%12s : CQ = %lx, EQ = %lx", "SPQ Left", 11914 sc->cq_spq_left, sc->eq_spq_left); 11915 11916 PMD_DRV_LOG(INFO, sc, 11917 "%12s : %x", "Switch", sc->link_params.switch_cfg); 11918 PMD_DRV_LOG(INFO, sc, "pcie_bus=%d, pcie_device=%d", 11919 sc->pcie_bus, sc->pcie_device); 11920 PMD_DRV_LOG(INFO, sc, "bar0.addr=%p, bar1.addr=%p", 11921 sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr); 11922 PMD_DRV_LOG(INFO, sc, "port=%d, path=%d, vnic=%d, func=%d", 11923 PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc)); 11924 } 11925