1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2007-2013 Broadcom Corporation. 3 * 4 * Eric Davis <edavis@broadcom.com> 5 * David Christensen <davidch@broadcom.com> 6 * Gary Zambrano <zambrano@broadcom.com> 7 * 8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. 9 * Copyright (c) 2015-2018 Cavium Inc. 10 * All rights reserved. 11 * www.cavium.com 12 */ 13 14 #define BNX2X_DRIVER_VERSION "1.78.18" 15 16 #include "bnx2x.h" 17 #include "bnx2x_vfpf.h" 18 #include "ecore_sp.h" 19 #include "ecore_init.h" 20 #include "ecore_init_ops.h" 21 22 #include "rte_version.h" 23 24 #include <sys/types.h> 25 #include <sys/stat.h> 26 #include <fcntl.h> 27 #include <zlib.h> 28 #include <rte_string_fns.h> 29 30 #define BNX2X_PMD_VER_PREFIX "BNX2X PMD" 31 #define BNX2X_PMD_VERSION_MAJOR 1 32 #define BNX2X_PMD_VERSION_MINOR 0 33 #define BNX2X_PMD_VERSION_REVISION 7 34 #define BNX2X_PMD_VERSION_PATCH 1 35 36 static inline const char * 37 bnx2x_pmd_version(void) 38 { 39 static char version[32]; 40 41 snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d", 42 BNX2X_PMD_VER_PREFIX, 43 BNX2X_DRIVER_VERSION, 44 BNX2X_PMD_VERSION_MAJOR, 45 BNX2X_PMD_VERSION_MINOR, 46 BNX2X_PMD_VERSION_REVISION, 47 BNX2X_PMD_VERSION_PATCH); 48 49 return version; 50 } 51 52 static z_stream zlib_stream; 53 54 #define EVL_VLID_MASK 0x0FFF 55 56 #define BNX2X_DEF_SB_ATT_IDX 0x0001 57 #define BNX2X_DEF_SB_IDX 0x0002 58 59 /* 60 * FLR Support - bnx2x_pf_flr_clnup() is called during nic_load in the per 61 * function HW initialization. 62 */ 63 #define FLR_WAIT_USEC 10000 /* 10 msecs */ 64 #define FLR_WAIT_INTERVAL 50 /* usecs */ 65 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 66 67 struct pbf_pN_buf_regs { 68 int pN; 69 uint32_t init_crd; 70 uint32_t crd; 71 uint32_t crd_freed; 72 }; 73 74 struct pbf_pN_cmd_regs { 75 int pN; 76 uint32_t lines_occup; 77 uint32_t lines_freed; 78 }; 79 80 /* resources needed for unloading a previously loaded device */ 81 82 #define BNX2X_PREV_WAIT_NEEDED 1 83 rte_spinlock_t bnx2x_prev_mtx; 84 struct bnx2x_prev_list_node { 85 LIST_ENTRY(bnx2x_prev_list_node) node; 86 uint8_t bus; 87 uint8_t slot; 88 uint8_t path; 89 uint8_t aer; 90 uint8_t undi; 91 }; 92 93 static LIST_HEAD(, bnx2x_prev_list_node) bnx2x_prev_list 94 = LIST_HEAD_INITIALIZER(bnx2x_prev_list); 95 96 static int load_count[2][3] = { { 0 } }; 97 /* per-path: 0-common, 1-port0, 2-port1 */ 98 99 static void bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, 100 uint8_t cmng_type); 101 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc); 102 static void storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, 103 uint8_t port); 104 static void bnx2x_set_reset_global(struct bnx2x_softc *sc); 105 static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc); 106 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine); 107 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc); 108 static uint8_t bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, 109 uint8_t print); 110 static void bnx2x_int_disable(struct bnx2x_softc *sc); 111 static int bnx2x_release_leader_lock(struct bnx2x_softc *sc); 112 static void bnx2x_pf_disable(struct bnx2x_softc *sc); 113 static void bnx2x_update_rx_prod(struct bnx2x_softc *sc, 114 struct bnx2x_fastpath *fp, 115 uint16_t rx_bd_prod, uint16_t rx_cq_prod); 116 static void bnx2x_link_report_locked(struct bnx2x_softc *sc); 117 static void bnx2x_link_report(struct bnx2x_softc *sc); 118 void bnx2x_link_status_update(struct bnx2x_softc *sc); 119 static int bnx2x_alloc_mem(struct bnx2x_softc *sc); 120 static void bnx2x_free_mem(struct bnx2x_softc *sc); 121 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc); 122 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc); 123 static __rte_noinline 124 int bnx2x_nic_load(struct bnx2x_softc *sc); 125 126 static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); 127 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp); 128 static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, 129 uint8_t storm, uint16_t index, uint8_t op, 130 uint8_t update); 131 132 int bnx2x_test_bit(int nr, volatile unsigned long *addr) 133 { 134 int res; 135 136 mb(); 137 res = ((*addr) & (1UL << nr)) != 0; 138 mb(); 139 return res; 140 } 141 142 void bnx2x_set_bit(unsigned int nr, volatile unsigned long *addr) 143 { 144 __sync_fetch_and_or(addr, (1UL << nr)); 145 } 146 147 void bnx2x_clear_bit(int nr, volatile unsigned long *addr) 148 { 149 __sync_fetch_and_and(addr, ~(1UL << nr)); 150 } 151 152 int bnx2x_test_and_clear_bit(int nr, volatile unsigned long *addr) 153 { 154 unsigned long mask = (1UL << nr); 155 return __sync_fetch_and_and(addr, ~mask) & mask; 156 } 157 158 int bnx2x_cmpxchg(volatile int *addr, int old, int new) 159 { 160 return __sync_val_compare_and_swap(addr, old, new); 161 } 162 163 int 164 bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, 165 const char *msg, uint32_t align) 166 { 167 char mz_name[RTE_MEMZONE_NAMESIZE]; 168 const struct rte_memzone *z; 169 170 dma->sc = sc; 171 if (IS_PF(sc)) 172 snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, 173 rte_get_timer_cycles()); 174 else 175 snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, 176 rte_get_timer_cycles()); 177 178 /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ 179 z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size, 180 SOCKET_ID_ANY, 181 RTE_MEMZONE_IOVA_CONTIG, align); 182 if (z == NULL) { 183 PMD_DRV_LOG(ERR, sc, "DMA alloc failed for %s", msg); 184 return -ENOMEM; 185 } 186 dma->paddr = (uint64_t) z->iova; 187 dma->vaddr = z->addr; 188 dma->mzone = (const void *)z; 189 190 PMD_DRV_LOG(DEBUG, sc, 191 "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr); 192 193 return 0; 194 } 195 196 void bnx2x_dma_free(struct bnx2x_dma *dma) 197 { 198 if (dma->mzone == NULL) 199 return; 200 201 rte_memzone_free((const struct rte_memzone *)dma->mzone); 202 dma->sc = NULL; 203 dma->paddr = 0; 204 dma->vaddr = NULL; 205 dma->nseg = 0; 206 dma->mzone = NULL; 207 } 208 209 static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 210 { 211 uint32_t lock_status; 212 uint32_t resource_bit = (1 << resource); 213 int func = SC_FUNC(sc); 214 uint32_t hw_lock_control_reg; 215 int cnt; 216 217 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC 218 if (resource) 219 PMD_INIT_FUNC_TRACE(sc); 220 #else 221 PMD_INIT_FUNC_TRACE(sc); 222 #endif 223 224 /* validate the resource is within range */ 225 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 226 PMD_DRV_LOG(NOTICE, sc, 227 "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE", 228 resource); 229 return -1; 230 } 231 232 if (func <= 5) { 233 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 234 } else { 235 hw_lock_control_reg = 236 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 237 } 238 239 /* validate the resource is not already taken */ 240 lock_status = REG_RD(sc, hw_lock_control_reg); 241 if (lock_status & resource_bit) { 242 PMD_DRV_LOG(NOTICE, sc, 243 "resource in use (status 0x%x bit 0x%x)", 244 lock_status, resource_bit); 245 return -1; 246 } 247 248 /* try every 5ms for 5 seconds */ 249 for (cnt = 0; cnt < 1000; cnt++) { 250 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 251 lock_status = REG_RD(sc, hw_lock_control_reg); 252 if (lock_status & resource_bit) { 253 return 0; 254 } 255 DELAY(5000); 256 } 257 258 PMD_DRV_LOG(NOTICE, sc, "Resource 0x%x resource_bit 0x%x lock timeout!", 259 resource, resource_bit); 260 return -1; 261 } 262 263 static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 264 { 265 uint32_t lock_status; 266 uint32_t resource_bit = (1 << resource); 267 int func = SC_FUNC(sc); 268 uint32_t hw_lock_control_reg; 269 270 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC 271 if (resource) 272 PMD_INIT_FUNC_TRACE(sc); 273 #else 274 PMD_INIT_FUNC_TRACE(sc); 275 #endif 276 277 /* validate the resource is within range */ 278 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 279 PMD_DRV_LOG(NOTICE, sc, 280 "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 281 " resource_bit 0x%x", resource, resource_bit); 282 return -1; 283 } 284 285 if (func <= 5) { 286 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 287 } else { 288 hw_lock_control_reg = 289 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 290 } 291 292 /* validate the resource is currently taken */ 293 lock_status = REG_RD(sc, hw_lock_control_reg); 294 if (!(lock_status & resource_bit)) { 295 PMD_DRV_LOG(NOTICE, sc, 296 "resource not in use (status 0x%x bit 0x%x)", 297 lock_status, resource_bit); 298 return -1; 299 } 300 301 REG_WR(sc, hw_lock_control_reg, resource_bit); 302 return 0; 303 } 304 305 static void bnx2x_acquire_phy_lock(struct bnx2x_softc *sc) 306 { 307 BNX2X_PHY_LOCK(sc); 308 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); 309 } 310 311 static void bnx2x_release_phy_lock(struct bnx2x_softc *sc) 312 { 313 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); 314 BNX2X_PHY_UNLOCK(sc); 315 } 316 317 /* copy command into DMAE command memory and set DMAE command Go */ 318 void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx) 319 { 320 uint32_t cmd_offset; 321 uint32_t i; 322 323 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 324 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 325 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *) dmae) + i)); 326 } 327 328 REG_WR(sc, dmae_reg_go_c[idx], 1); 329 } 330 331 uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) 332 { 333 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 334 DMAE_COMMAND_C_TYPE_ENABLE); 335 } 336 337 uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode) 338 { 339 return opcode & ~DMAE_COMMAND_SRC_RESET; 340 } 341 342 uint32_t 343 bnx2x_dmae_opcode(struct bnx2x_softc * sc, uint8_t src_type, uint8_t dst_type, 344 uint8_t with_comp, uint8_t comp_type) 345 { 346 uint32_t opcode = 0; 347 348 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 349 (dst_type << DMAE_COMMAND_DST_SHIFT)); 350 351 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 352 353 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 354 355 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 356 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 357 358 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 359 360 #ifdef __BIG_ENDIAN 361 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 362 #else 363 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 364 #endif 365 366 if (with_comp) { 367 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 368 } 369 370 return opcode; 371 } 372 373 static void 374 bnx2x_prep_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae, 375 uint8_t src_type, uint8_t dst_type) 376 { 377 memset(dmae, 0, sizeof(struct dmae_command)); 378 379 /* set the opcode */ 380 dmae->opcode = bnx2x_dmae_opcode(sc, src_type, dst_type, 381 TRUE, DMAE_COMP_PCI); 382 383 /* fill in the completion parameters */ 384 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_comp)); 385 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_comp)); 386 dmae->comp_val = DMAE_COMP_VAL; 387 } 388 389 /* issue a DMAE command over the init channel and wait for completion */ 390 static int 391 bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae) 392 { 393 uint32_t *wb_comp = BNX2X_SP(sc, wb_comp); 394 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 395 396 /* reset completion */ 397 *wb_comp = 0; 398 399 /* post the command on the channel used for initializations */ 400 bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 401 402 /* wait for completion */ 403 DELAY(500); 404 405 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 406 if (!timeout || 407 (sc->recovery_state != BNX2X_RECOVERY_DONE && 408 sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 409 PMD_DRV_LOG(INFO, sc, "DMAE timeout!"); 410 return DMAE_TIMEOUT; 411 } 412 413 timeout--; 414 DELAY(50); 415 } 416 417 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 418 PMD_DRV_LOG(INFO, sc, "DMAE PCI error!"); 419 return DMAE_PCI_ERROR; 420 } 421 422 return 0; 423 } 424 425 void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32) 426 { 427 struct dmae_command dmae; 428 uint32_t *data; 429 uint32_t i; 430 int rc; 431 432 if (!sc->dmae_ready) { 433 data = BNX2X_SP(sc, wb_data[0]); 434 435 for (i = 0; i < len32; i++) { 436 data[i] = REG_RD(sc, (src_addr + (i * 4))); 437 } 438 439 return; 440 } 441 442 /* set opcode and fixed command fields */ 443 bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 444 445 /* fill in addresses and len */ 446 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 447 dmae.src_addr_hi = 0; 448 dmae.dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_data)); 449 dmae.dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_data)); 450 dmae.len = len32; 451 452 /* issue the command and wait for completion */ 453 if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { 454 rte_panic("DMAE failed (%d)", rc); 455 }; 456 } 457 458 void 459 bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr, 460 uint32_t len32) 461 { 462 struct dmae_command dmae; 463 int rc; 464 465 if (!sc->dmae_ready) { 466 ecore_init_str_wr(sc, dst_addr, BNX2X_SP(sc, wb_data[0]), len32); 467 return; 468 } 469 470 /* set opcode and fixed command fields */ 471 bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 472 473 /* fill in addresses and len */ 474 dmae.src_addr_lo = U64_LO(dma_addr); 475 dmae.src_addr_hi = U64_HI(dma_addr); 476 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 477 dmae.dst_addr_hi = 0; 478 dmae.len = len32; 479 480 /* issue the command and wait for completion */ 481 if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { 482 rte_panic("DMAE failed (%d)", rc); 483 } 484 } 485 486 static void 487 bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, 488 uint32_t addr, uint32_t len) 489 { 490 uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 491 uint32_t offset = 0; 492 493 while (len > dmae_wr_max) { 494 bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ 495 (addr + offset), /* dst GRC address */ 496 dmae_wr_max); 497 offset += (dmae_wr_max * 4); 498 len -= dmae_wr_max; 499 } 500 501 bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ 502 (addr + offset), /* dst GRC address */ 503 len); 504 } 505 506 void 507 bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt, 508 uint32_t cid) 509 { 510 /* ustorm cxt validation */ 511 cxt->ustorm_ag_context.cdu_usage = 512 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 513 CDU_REGION_NUMBER_UCM_AG, 514 ETH_CONNECTION_TYPE); 515 /* xcontext validation */ 516 cxt->xstorm_ag_context.cdu_reserved = 517 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 518 CDU_REGION_NUMBER_XCM_AG, 519 ETH_CONNECTION_TYPE); 520 } 521 522 static void 523 bnx2x_storm_memset_hc_timeout(struct bnx2x_softc *sc, uint8_t fw_sb_id, 524 uint8_t sb_index, uint8_t ticks) 525 { 526 uint32_t addr = 527 (BAR_CSTRORM_INTMEM + 528 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 529 530 REG_WR8(sc, addr, ticks); 531 } 532 533 static void 534 bnx2x_storm_memset_hc_disable(struct bnx2x_softc *sc, uint16_t fw_sb_id, 535 uint8_t sb_index, uint8_t disable) 536 { 537 uint32_t enable_flag = 538 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 539 uint32_t addr = 540 (BAR_CSTRORM_INTMEM + 541 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 542 uint8_t flags; 543 544 /* clear and set */ 545 flags = REG_RD8(sc, addr); 546 flags &= ~HC_INDEX_DATA_HC_ENABLED; 547 flags |= enable_flag; 548 REG_WR8(sc, addr, flags); 549 } 550 551 void 552 bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id, 553 uint8_t sb_index, uint8_t disable, uint16_t usec) 554 { 555 uint8_t ticks = (usec / 4); 556 557 bnx2x_storm_memset_hc_timeout(sc, fw_sb_id, sb_index, ticks); 558 559 disable = (disable) ? 1 : ((usec) ? 0 : 1); 560 bnx2x_storm_memset_hc_disable(sc, fw_sb_id, sb_index, disable); 561 } 562 563 uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr) 564 { 565 return REG_RD(sc, reg_addr); 566 } 567 568 void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val) 569 { 570 REG_WR(sc, reg_addr, val); 571 } 572 573 void 574 elink_cb_event_log(__rte_unused struct bnx2x_softc *sc, 575 __rte_unused const elink_log_id_t elink_log_id, ...) 576 { 577 PMD_DRV_LOG(DEBUG, sc, "ELINK EVENT LOG (%d)", elink_log_id); 578 } 579 580 static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode) 581 { 582 uint32_t spio_reg; 583 584 /* Only 2 SPIOs are configurable */ 585 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 586 PMD_DRV_LOG(NOTICE, sc, "Invalid SPIO 0x%x", spio); 587 return -1; 588 } 589 590 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 591 592 /* read SPIO and mask except the float bits */ 593 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 594 595 switch (mode) { 596 case MISC_SPIO_OUTPUT_LOW: 597 /* clear FLOAT and set CLR */ 598 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 599 spio_reg |= (spio << MISC_SPIO_CLR_POS); 600 break; 601 602 case MISC_SPIO_OUTPUT_HIGH: 603 /* clear FLOAT and set SET */ 604 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 605 spio_reg |= (spio << MISC_SPIO_SET_POS); 606 break; 607 608 case MISC_SPIO_INPUT_HI_Z: 609 /* set FLOAT */ 610 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 611 break; 612 613 default: 614 break; 615 } 616 617 REG_WR(sc, MISC_REG_SPIO, spio_reg); 618 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 619 620 return 0; 621 } 622 623 static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port) 624 { 625 /* The GPIO should be swapped if swap register is set and active */ 626 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 627 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 628 int gpio_shift = gpio_num; 629 if (gpio_port) 630 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 631 632 uint32_t gpio_mask = (1 << gpio_shift); 633 uint32_t gpio_reg; 634 635 if (gpio_num > MISC_REGISTERS_GPIO_3) { 636 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 637 return -1; 638 } 639 640 /* read GPIO value */ 641 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 642 643 /* get the requested pin value */ 644 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 645 } 646 647 static int 648 bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t port) 649 { 650 /* The GPIO should be swapped if swap register is set and active */ 651 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 652 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 653 int gpio_shift = gpio_num; 654 if (gpio_port) 655 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 656 657 uint32_t gpio_mask = (1 << gpio_shift); 658 uint32_t gpio_reg; 659 660 if (gpio_num > MISC_REGISTERS_GPIO_3) { 661 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 662 return -1; 663 } 664 665 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 666 667 /* read GPIO and mask except the float bits */ 668 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 669 670 switch (mode) { 671 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 672 /* clear FLOAT and set CLR */ 673 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 674 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 675 break; 676 677 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 678 /* clear FLOAT and set SET */ 679 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 680 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 681 break; 682 683 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 684 /* set FLOAT */ 685 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 686 break; 687 688 default: 689 break; 690 } 691 692 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 693 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 694 695 return 0; 696 } 697 698 static int 699 bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode) 700 { 701 uint32_t gpio_reg; 702 703 /* any port swapping should be handled by caller */ 704 705 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 706 707 /* read GPIO and mask except the float bits */ 708 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 709 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 710 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 711 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 712 713 switch (mode) { 714 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 715 /* set CLR */ 716 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 717 break; 718 719 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 720 /* set SET */ 721 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 722 break; 723 724 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 725 /* set FLOAT */ 726 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 727 break; 728 729 default: 730 PMD_DRV_LOG(NOTICE, sc, 731 "Invalid GPIO mode assignment %d", mode); 732 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 733 return -1; 734 } 735 736 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 737 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 738 739 return 0; 740 } 741 742 static int 743 bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, 744 uint8_t port) 745 { 746 /* The GPIO should be swapped if swap register is set and active */ 747 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 748 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 749 int gpio_shift = gpio_num; 750 if (gpio_port) 751 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 752 753 uint32_t gpio_mask = (1 << gpio_shift); 754 uint32_t gpio_reg; 755 756 if (gpio_num > MISC_REGISTERS_GPIO_3) { 757 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 758 return -1; 759 } 760 761 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 762 763 /* read GPIO int */ 764 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 765 766 switch (mode) { 767 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 768 /* clear SET and set CLR */ 769 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 770 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 771 break; 772 773 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 774 /* clear CLR and set SET */ 775 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 776 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 777 break; 778 779 default: 780 break; 781 } 782 783 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 784 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 785 786 return 0; 787 } 788 789 uint32_t 790 elink_cb_gpio_read(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t port) 791 { 792 return bnx2x_gpio_read(sc, gpio_num, port); 793 } 794 795 uint8_t elink_cb_gpio_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ 796 uint8_t port) 797 { 798 return bnx2x_gpio_write(sc, gpio_num, mode, port); 799 } 800 801 uint8_t 802 elink_cb_gpio_mult_write(struct bnx2x_softc * sc, uint8_t pins, 803 uint8_t mode /* 0=low 1=high */ ) 804 { 805 return bnx2x_gpio_mult_write(sc, pins, mode); 806 } 807 808 uint8_t elink_cb_gpio_int_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ 809 uint8_t port) 810 { 811 return bnx2x_gpio_int_write(sc, gpio_num, mode, port); 812 } 813 814 void elink_cb_notify_link_changed(struct bnx2x_softc *sc) 815 { 816 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 817 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 818 } 819 820 /* send the MCP a request, block until there is a reply */ 821 uint32_t 822 elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) 823 { 824 int mb_idx = SC_FW_MB_IDX(sc); 825 uint32_t seq; 826 uint32_t rc = 0; 827 uint32_t cnt = 1; 828 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 829 830 seq = ++sc->fw_seq; 831 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 832 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 833 834 PMD_DRV_LOG(DEBUG, sc, 835 "wrote command 0x%08x to FW MB param 0x%08x", 836 (command | seq), param); 837 838 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 839 do { 840 DELAY(delay * 1000); 841 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 842 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 843 844 /* is this a reply to our command? */ 845 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 846 rc &= FW_MSG_CODE_MASK; 847 } else { 848 /* Ruh-roh! */ 849 PMD_DRV_LOG(NOTICE, sc, "FW failed to respond!"); 850 rc = 0; 851 } 852 853 return rc; 854 } 855 856 static uint32_t 857 bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) 858 { 859 return elink_cb_fw_command(sc, command, param); 860 } 861 862 static void 863 __storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr, 864 rte_iova_t mapping) 865 { 866 REG_WR(sc, addr, U64_LO(mapping)); 867 REG_WR(sc, (addr + 4), U64_HI(mapping)); 868 } 869 870 static void 871 storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping, 872 uint16_t abs_fid) 873 { 874 uint32_t addr = (XSEM_REG_FAST_MEMORY + 875 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 876 __storm_memset_dma_mapping(sc, addr, mapping); 877 } 878 879 static void 880 storm_memset_vf_to_pf(struct bnx2x_softc *sc, uint16_t abs_fid, uint16_t pf_id) 881 { 882 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), 883 pf_id); 884 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), 885 pf_id); 886 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), 887 pf_id); 888 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), 889 pf_id); 890 } 891 892 static void 893 storm_memset_func_en(struct bnx2x_softc *sc, uint16_t abs_fid, uint8_t enable) 894 { 895 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), 896 enable); 897 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), 898 enable); 899 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), 900 enable); 901 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), 902 enable); 903 } 904 905 static void 906 storm_memset_eq_data(struct bnx2x_softc *sc, struct event_ring_data *eq_data, 907 uint16_t pfid) 908 { 909 uint32_t addr; 910 size_t size; 911 912 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 913 size = sizeof(struct event_ring_data); 914 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) eq_data); 915 } 916 917 static void 918 storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid) 919 { 920 uint32_t addr = (BAR_CSTRORM_INTMEM + 921 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 922 REG_WR16(sc, addr, eq_prod); 923 } 924 925 /* 926 * Post a slowpath command. 927 * 928 * A slowpath command is used to propagate a configuration change through 929 * the controller in a controlled manner, allowing each STORM processor and 930 * other H/W blocks to phase in the change. The commands sent on the 931 * slowpath are referred to as ramrods. Depending on the ramrod used the 932 * completion of the ramrod will occur in different ways. Here's a 933 * breakdown of ramrods and how they complete: 934 * 935 * RAMROD_CMD_ID_ETH_PORT_SETUP 936 * Used to setup the leading connection on a port. Completes on the 937 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 938 * 939 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 940 * Used to setup an additional connection on a port. Completes on the 941 * RCQ of the multi-queue/RSS connection being initialized. 942 * 943 * RAMROD_CMD_ID_ETH_STAT_QUERY 944 * Used to force the storm processors to update the statistics database 945 * in host memory. This ramrod is send on the leading connection CID and 946 * completes as an index increment of the CSTORM on the default status 947 * block. 948 * 949 * RAMROD_CMD_ID_ETH_UPDATE 950 * Used to update the state of the leading connection, usually to udpate 951 * the RSS indirection table. Completes on the RCQ of the leading 952 * connection. (Not currently used under FreeBSD until OS support becomes 953 * available.) 954 * 955 * RAMROD_CMD_ID_ETH_HALT 956 * Used when tearing down a connection prior to driver unload. Completes 957 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 958 * use this on the leading connection. 959 * 960 * RAMROD_CMD_ID_ETH_SET_MAC 961 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 962 * the RCQ of the leading connection. 963 * 964 * RAMROD_CMD_ID_ETH_CFC_DEL 965 * Used when tearing down a conneciton prior to driver unload. Completes 966 * on the RCQ of the leading connection (since the current connection 967 * has been completely removed from controller memory). 968 * 969 * RAMROD_CMD_ID_ETH_PORT_DEL 970 * Used to tear down the leading connection prior to driver unload, 971 * typically fp[0]. Completes as an index increment of the CSTORM on the 972 * default status block. 973 * 974 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 975 * Used for connection offload. Completes on the RCQ of the multi-queue 976 * RSS connection that is being offloaded. (Not currently used under 977 * FreeBSD.) 978 * 979 * There can only be one command pending per function. 980 * 981 * Returns: 982 * 0 = Success, !0 = Failure. 983 */ 984 985 /* must be called under the spq lock */ 986 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x_softc *sc) 987 { 988 struct eth_spe *next_spe = sc->spq_prod_bd; 989 990 if (sc->spq_prod_bd == sc->spq_last_bd) { 991 /* wrap back to the first eth_spq */ 992 sc->spq_prod_bd = sc->spq; 993 sc->spq_prod_idx = 0; 994 } else { 995 sc->spq_prod_bd++; 996 sc->spq_prod_idx++; 997 } 998 999 return next_spe; 1000 } 1001 1002 /* must be called under the spq lock */ 1003 static void bnx2x_sp_prod_update(struct bnx2x_softc *sc) 1004 { 1005 int func = SC_FUNC(sc); 1006 1007 /* 1008 * Make sure that BD data is updated before writing the producer. 1009 * BD data is written to the memory, the producer is read from the 1010 * memory, thus we need a full memory barrier to ensure the ordering. 1011 */ 1012 mb(); 1013 1014 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 1015 sc->spq_prod_idx); 1016 1017 mb(); 1018 } 1019 1020 /** 1021 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 1022 * 1023 * @cmd: command to check 1024 * @cmd_type: command type 1025 */ 1026 static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 1027 { 1028 if ((cmd_type == NONE_CONNECTION_TYPE) || 1029 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 1030 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 1031 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 1032 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 1033 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 1034 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 1035 return TRUE; 1036 } else { 1037 return FALSE; 1038 } 1039 } 1040 1041 /** 1042 * bnx2x_sp_post - place a single command on an SP ring 1043 * 1044 * @sc: driver handle 1045 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 1046 * @cid: SW CID the command is related to 1047 * @data_hi: command private data address (high 32 bits) 1048 * @data_lo: command private data address (low 32 bits) 1049 * @cmd_type: command type (e.g. NONE, ETH) 1050 * 1051 * SP data is handled as if it's always an address pair, thus data fields are 1052 * not swapped to little endian in upper functions. Instead this function swaps 1053 * data as if it's two uint32 fields. 1054 */ 1055 int 1056 bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi, 1057 uint32_t data_lo, int cmd_type) 1058 { 1059 struct eth_spe *spe; 1060 uint16_t type; 1061 int common; 1062 1063 common = bnx2x_is_contextless_ramrod(command, cmd_type); 1064 1065 if (common) { 1066 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 1067 PMD_DRV_LOG(INFO, sc, "EQ ring is full!"); 1068 return -1; 1069 } 1070 } else { 1071 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 1072 PMD_DRV_LOG(INFO, sc, "SPQ ring is full!"); 1073 return -1; 1074 } 1075 } 1076 1077 spe = bnx2x_sp_get_next(sc); 1078 1079 /* CID needs port number to be encoded int it */ 1080 spe->hdr.conn_and_cmd_data = 1081 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 1082 1083 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1084 1085 /* TBD: Check if it works for VFs */ 1086 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 1087 SPE_HDR_FUNCTION_ID); 1088 1089 spe->hdr.type = htole16(type); 1090 1091 spe->data.update_data_addr.hi = htole32(data_hi); 1092 spe->data.update_data_addr.lo = htole32(data_lo); 1093 1094 /* 1095 * It's ok if the actual decrement is issued towards the memory 1096 * somewhere between the lock and unlock. Thus no more explict 1097 * memory barrier is needed. 1098 */ 1099 if (common) { 1100 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 1101 } else { 1102 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 1103 } 1104 1105 PMD_DRV_LOG(DEBUG, sc, 1106 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x" 1107 "data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)", 1108 sc->spq_prod_idx, 1109 (uint32_t) U64_HI(sc->spq_dma.paddr), 1110 (uint32_t) (U64_LO(sc->spq_dma.paddr) + 1111 (uint8_t *) sc->spq_prod_bd - 1112 (uint8_t *) sc->spq), command, common, 1113 HW_CID(sc, cid), data_hi, data_lo, type, 1114 atomic_load_acq_long(&sc->cq_spq_left), 1115 atomic_load_acq_long(&sc->eq_spq_left)); 1116 1117 /* RAMROD completion is processed in bnx2x_intr_legacy() 1118 * which can run from different contexts. 1119 * Ask bnx2x_intr_intr() to process RAMROD 1120 * completion whenever it gets scheduled. 1121 */ 1122 rte_atomic32_set(&sc->scan_fp, 1); 1123 bnx2x_sp_prod_update(sc); 1124 1125 return 0; 1126 } 1127 1128 static void bnx2x_drv_pulse(struct bnx2x_softc *sc) 1129 { 1130 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 1131 sc->fw_drv_pulse_wr_seq); 1132 } 1133 1134 static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath *fp) 1135 { 1136 uint16_t hw_cons; 1137 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 1138 1139 if (unlikely(!txq)) { 1140 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 1141 return 0; 1142 } 1143 1144 mb(); /* status block fields can change */ 1145 hw_cons = le16toh(*fp->tx_cons_sb); 1146 return hw_cons != txq->tx_pkt_head; 1147 } 1148 1149 static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 1150 { 1151 /* expand this for multi-cos if ever supported */ 1152 return bnx2x_tx_queue_has_work(fp); 1153 } 1154 1155 static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 1156 { 1157 uint16_t rx_cq_cons_sb; 1158 struct bnx2x_rx_queue *rxq; 1159 rxq = fp->sc->rx_queues[fp->index]; 1160 if (unlikely(!rxq)) { 1161 PMD_RX_LOG(ERR, "ERROR: RX queue is NULL"); 1162 return 0; 1163 } 1164 1165 mb(); /* status block fields can change */ 1166 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 1167 if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) == 1168 MAX_RCQ_ENTRIES(rxq))) 1169 rx_cq_cons_sb++; 1170 return rxq->rx_cq_head != rx_cq_cons_sb; 1171 } 1172 1173 static void 1174 bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 1175 union eth_rx_cqe *rr_cqe) 1176 { 1177 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1178 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1179 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 1180 struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; 1181 1182 PMD_DRV_LOG(DEBUG, sc, 1183 "fp=%d cid=%d got ramrod #%d state is %x type is %d", 1184 fp->index, cid, command, sc->state, 1185 rr_cqe->ramrod_cqe.ramrod_type); 1186 1187 switch (command) { 1188 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1189 PMD_DRV_LOG(DEBUG, sc, "got UPDATE ramrod. CID %d", cid); 1190 drv_cmd = ECORE_Q_CMD_UPDATE; 1191 break; 1192 1193 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1194 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] setup ramrod", cid); 1195 drv_cmd = ECORE_Q_CMD_SETUP; 1196 break; 1197 1198 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1199 PMD_DRV_LOG(DEBUG, sc, 1200 "got MULTI[%d] tx-only setup ramrod", cid); 1201 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 1202 break; 1203 1204 case (RAMROD_CMD_ID_ETH_HALT): 1205 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] halt ramrod", cid); 1206 drv_cmd = ECORE_Q_CMD_HALT; 1207 break; 1208 1209 case (RAMROD_CMD_ID_ETH_TERMINATE): 1210 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid); 1211 drv_cmd = ECORE_Q_CMD_TERMINATE; 1212 break; 1213 1214 case (RAMROD_CMD_ID_ETH_EMPTY): 1215 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] empty ramrod", cid); 1216 drv_cmd = ECORE_Q_CMD_EMPTY; 1217 break; 1218 1219 default: 1220 PMD_DRV_LOG(DEBUG, sc, 1221 "ERROR: unexpected MC reply (%d)" 1222 "on fp[%d]", command, fp->index); 1223 return; 1224 } 1225 1226 if ((drv_cmd != ECORE_Q_CMD_MAX) && 1227 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 1228 /* 1229 * q_obj->complete_cmd() failure means that this was 1230 * an unexpected completion. 1231 * 1232 * In this case we don't want to increase the sc->spq_left 1233 * because apparently we haven't sent this command the first 1234 * place. 1235 */ 1236 // rte_panic("Unexpected SP completion"); 1237 return; 1238 } 1239 1240 atomic_add_acq_long(&sc->cq_spq_left, 1); 1241 1242 PMD_DRV_LOG(DEBUG, sc, "sc->cq_spq_left 0x%lx", 1243 atomic_load_acq_long(&sc->cq_spq_left)); 1244 } 1245 1246 static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) 1247 { 1248 struct bnx2x_rx_queue *rxq; 1249 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1250 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 1251 1252 rxq = sc->rx_queues[fp->index]; 1253 if (!rxq) { 1254 PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index); 1255 return 0; 1256 } 1257 1258 /* CQ "next element" is of the size of the regular element */ 1259 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 1260 if (unlikely((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == 1261 USABLE_RCQ_ENTRIES_PER_PAGE)) { 1262 hw_cq_cons++; 1263 } 1264 1265 bd_cons = rxq->rx_bd_head; 1266 bd_prod = rxq->rx_bd_tail; 1267 bd_prod_fw = bd_prod; 1268 sw_cq_cons = rxq->rx_cq_head; 1269 sw_cq_prod = rxq->rx_cq_tail; 1270 1271 /* 1272 * Memory barrier necessary as speculative reads of the rx 1273 * buffer can be ahead of the index in the status block 1274 */ 1275 rmb(); 1276 1277 while (sw_cq_cons != hw_cq_cons) { 1278 union eth_rx_cqe *cqe; 1279 struct eth_fast_path_rx_cqe *cqe_fp; 1280 uint8_t cqe_fp_flags; 1281 enum eth_rx_cqe_type cqe_fp_type; 1282 1283 comp_ring_cons = RCQ_ENTRY(sw_cq_cons, rxq); 1284 bd_prod = RX_BD(bd_prod, rxq); 1285 bd_cons = RX_BD(bd_cons, rxq); 1286 1287 cqe = &rxq->cq_ring[comp_ring_cons]; 1288 cqe_fp = &cqe->fast_path_cqe; 1289 cqe_fp_flags = cqe_fp->type_error_flags; 1290 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 1291 1292 /* is this a slowpath msg? */ 1293 if (CQE_TYPE_SLOW(cqe_fp_type)) { 1294 bnx2x_sp_event(sc, fp, cqe); 1295 goto next_cqe; 1296 } 1297 1298 /* is this an error packet? */ 1299 if (unlikely(cqe_fp_flags & 1300 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 1301 PMD_RX_LOG(DEBUG, "flags 0x%x rx packet %u", 1302 cqe_fp_flags, sw_cq_cons); 1303 goto next_rx; 1304 } 1305 1306 PMD_RX_LOG(DEBUG, "Dropping fastpath called from attn poller!"); 1307 1308 next_rx: 1309 bd_cons = NEXT_RX_BD(bd_cons); 1310 bd_prod = NEXT_RX_BD(bd_prod); 1311 bd_prod_fw = NEXT_RX_BD(bd_prod_fw); 1312 1313 next_cqe: 1314 sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); 1315 sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); 1316 1317 } /* while work to do */ 1318 1319 rxq->rx_bd_head = bd_cons; 1320 rxq->rx_bd_tail = bd_prod_fw; 1321 rxq->rx_cq_head = sw_cq_cons; 1322 rxq->rx_cq_tail = sw_cq_prod; 1323 1324 /* Update producers */ 1325 bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod); 1326 1327 return sw_cq_cons != hw_cq_cons; 1328 } 1329 1330 static uint16_t 1331 bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue *txq, 1332 uint16_t pkt_idx, uint16_t bd_idx) 1333 { 1334 struct eth_tx_start_bd *tx_start_bd = 1335 &txq->tx_ring[TX_BD(bd_idx, txq)].start_bd; 1336 uint16_t nbd = rte_le_to_cpu_16(tx_start_bd->nbd); 1337 struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)]; 1338 1339 if (likely(tx_mbuf != NULL)) { 1340 rte_pktmbuf_free_seg(tx_mbuf); 1341 } else { 1342 PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu", 1343 fp->index, (unsigned long)TX_BD(pkt_idx, txq)); 1344 } 1345 1346 txq->sw_ring[TX_BD(pkt_idx, txq)] = NULL; 1347 txq->nb_tx_avail += nbd; 1348 1349 while (nbd--) 1350 bd_idx = NEXT_TX_BD(bd_idx); 1351 1352 return bd_idx; 1353 } 1354 1355 /* processes transmit completions */ 1356 uint8_t bnx2x_txeof(__rte_unused struct bnx2x_softc * sc, struct bnx2x_fastpath * fp) 1357 { 1358 uint16_t bd_cons, hw_cons, sw_cons; 1359 __rte_unused uint16_t tx_bd_avail; 1360 1361 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 1362 1363 if (unlikely(!txq)) { 1364 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 1365 return 0; 1366 } 1367 1368 bd_cons = txq->tx_bd_head; 1369 hw_cons = rte_le_to_cpu_16(*fp->tx_cons_sb); 1370 sw_cons = txq->tx_pkt_head; 1371 1372 while (sw_cons != hw_cons) { 1373 bd_cons = bnx2x_free_tx_pkt(fp, txq, sw_cons, bd_cons); 1374 sw_cons++; 1375 } 1376 1377 txq->tx_pkt_head = sw_cons; 1378 txq->tx_bd_head = bd_cons; 1379 1380 tx_bd_avail = txq->nb_tx_avail; 1381 1382 PMD_TX_LOG(DEBUG, "fp[%02d] avail=%u cons_sb=%u, " 1383 "pkt_head=%u pkt_tail=%u bd_head=%u bd_tail=%u", 1384 fp->index, tx_bd_avail, hw_cons, 1385 txq->tx_pkt_head, txq->tx_pkt_tail, 1386 txq->tx_bd_head, txq->tx_bd_tail); 1387 return TRUE; 1388 } 1389 1390 static void bnx2x_drain_tx_queues(struct bnx2x_softc *sc) 1391 { 1392 struct bnx2x_fastpath *fp; 1393 int i, count; 1394 1395 /* wait until all TX fastpath tasks have completed */ 1396 for (i = 0; i < sc->num_queues; i++) { 1397 fp = &sc->fp[i]; 1398 1399 count = 1000; 1400 1401 while (bnx2x_has_tx_work(fp)) { 1402 bnx2x_txeof(sc, fp); 1403 1404 if (count == 0) { 1405 PMD_TX_LOG(ERR, 1406 "Timeout waiting for fp[%d] " 1407 "transmits to complete!", i); 1408 rte_panic("tx drain failure"); 1409 return; 1410 } 1411 1412 count--; 1413 DELAY(1000); 1414 rmb(); 1415 } 1416 } 1417 1418 return; 1419 } 1420 1421 static int 1422 bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj, 1423 int mac_type, uint8_t wait_for_comp) 1424 { 1425 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 1426 int rc; 1427 1428 /* wait for completion of requested */ 1429 if (wait_for_comp) { 1430 bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 1431 } 1432 1433 /* Set the mac type of addresses we want to clear */ 1434 bnx2x_set_bit(mac_type, &vlan_mac_flags); 1435 1436 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 1437 if (rc < 0) 1438 PMD_DRV_LOG(ERR, sc, "Failed to delete MACs (%d)", rc); 1439 1440 return rc; 1441 } 1442 1443 static int 1444 bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode, 1445 unsigned long *rx_accept_flags, 1446 unsigned long *tx_accept_flags) 1447 { 1448 /* Clear the flags first */ 1449 *rx_accept_flags = 0; 1450 *tx_accept_flags = 0; 1451 1452 switch (rx_mode) { 1453 case BNX2X_RX_MODE_NONE: 1454 /* 1455 * 'drop all' supersedes any accept flags that may have been 1456 * passed to the function. 1457 */ 1458 break; 1459 1460 case BNX2X_RX_MODE_NORMAL: 1461 bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1462 bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 1463 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1464 1465 /* internal switching mode */ 1466 bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 1467 bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 1468 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1469 1470 break; 1471 1472 case BNX2X_RX_MODE_ALLMULTI: 1473 bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1474 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 1475 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1476 1477 /* internal switching mode */ 1478 bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 1479 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 1480 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1481 1482 break; 1483 1484 case BNX2X_RX_MODE_ALLMULTI_PROMISC: 1485 case BNX2X_RX_MODE_PROMISC: 1486 /* 1487 * According to deffinition of SI mode, iface in promisc mode 1488 * should receive matched and unmatched (in resolution of port) 1489 * unicast packets. 1490 */ 1491 bnx2x_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 1492 bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1493 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 1494 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1495 1496 /* internal switching mode */ 1497 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 1498 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1499 1500 if (IS_MF_SI(sc)) { 1501 bnx2x_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 1502 } else { 1503 bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 1504 } 1505 1506 break; 1507 1508 default: 1509 PMD_RX_LOG(ERR, "Unknown rx_mode (%d)", rx_mode); 1510 return -1; 1511 } 1512 1513 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 1514 if (rx_mode != BNX2X_RX_MODE_NONE) { 1515 bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 1516 bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 1517 } 1518 1519 return 0; 1520 } 1521 1522 static int 1523 bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id, 1524 unsigned long rx_mode_flags, 1525 unsigned long rx_accept_flags, 1526 unsigned long tx_accept_flags, unsigned long ramrod_flags) 1527 { 1528 struct ecore_rx_mode_ramrod_params ramrod_param; 1529 int rc; 1530 1531 memset(&ramrod_param, 0, sizeof(ramrod_param)); 1532 1533 /* Prepare ramrod parameters */ 1534 ramrod_param.cid = 0; 1535 ramrod_param.cl_id = cl_id; 1536 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 1537 ramrod_param.func_id = SC_FUNC(sc); 1538 1539 ramrod_param.pstate = &sc->sp_state; 1540 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 1541 1542 ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata); 1543 ramrod_param.rdata_mapping = 1544 (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata), 1545 bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 1546 1547 ramrod_param.ramrod_flags = ramrod_flags; 1548 ramrod_param.rx_mode_flags = rx_mode_flags; 1549 1550 ramrod_param.rx_accept_flags = rx_accept_flags; 1551 ramrod_param.tx_accept_flags = tx_accept_flags; 1552 1553 rc = ecore_config_rx_mode(sc, &ramrod_param); 1554 if (rc < 0) { 1555 PMD_RX_LOG(ERR, "Set rx_mode %d failed", sc->rx_mode); 1556 return rc; 1557 } 1558 1559 return 0; 1560 } 1561 1562 int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc) 1563 { 1564 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 1565 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 1566 int rc; 1567 1568 rc = bnx2x_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 1569 &tx_accept_flags); 1570 if (rc) { 1571 return rc; 1572 } 1573 1574 bnx2x_set_bit(RAMROD_RX, &ramrod_flags); 1575 bnx2x_set_bit(RAMROD_TX, &ramrod_flags); 1576 bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 1577 1578 return bnx2x_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 1579 rx_accept_flags, tx_accept_flags, 1580 ramrod_flags); 1581 } 1582 1583 /* returns the "mcp load_code" according to global load_count array */ 1584 static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc) 1585 { 1586 int path = SC_PATH(sc); 1587 int port = SC_PORT(sc); 1588 1589 PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", 1590 path, load_count[path][0], load_count[path][1], 1591 load_count[path][2]); 1592 1593 load_count[path][0]++; 1594 load_count[path][1 + port]++; 1595 PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", 1596 path, load_count[path][0], load_count[path][1], 1597 load_count[path][2]); 1598 if (load_count[path][0] == 1) 1599 return FW_MSG_CODE_DRV_LOAD_COMMON; 1600 else if (load_count[path][1 + port] == 1) 1601 return FW_MSG_CODE_DRV_LOAD_PORT; 1602 else 1603 return FW_MSG_CODE_DRV_LOAD_FUNCTION; 1604 } 1605 1606 /* returns the "mcp load_code" according to global load_count array */ 1607 static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc) 1608 { 1609 int port = SC_PORT(sc); 1610 int path = SC_PATH(sc); 1611 1612 PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", 1613 path, load_count[path][0], load_count[path][1], 1614 load_count[path][2]); 1615 load_count[path][0]--; 1616 load_count[path][1 + port]--; 1617 PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", 1618 path, load_count[path][0], load_count[path][1], 1619 load_count[path][2]); 1620 if (load_count[path][0] == 0) { 1621 return FW_MSG_CODE_DRV_UNLOAD_COMMON; 1622 } else if (load_count[path][1 + port] == 0) { 1623 return FW_MSG_CODE_DRV_UNLOAD_PORT; 1624 } else { 1625 return FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 1626 } 1627 } 1628 1629 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 1630 static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode) 1631 { 1632 uint32_t reset_code = 0; 1633 1634 /* Select the UNLOAD request mode */ 1635 if (unload_mode == UNLOAD_NORMAL) { 1636 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 1637 } else { 1638 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 1639 } 1640 1641 /* Send the request to the MCP */ 1642 if (!BNX2X_NOMCP(sc)) { 1643 reset_code = bnx2x_fw_command(sc, reset_code, 0); 1644 } else { 1645 reset_code = bnx2x_nic_unload_no_mcp(sc); 1646 } 1647 1648 return reset_code; 1649 } 1650 1651 /* send UNLOAD_DONE command to the MCP */ 1652 static void bnx2x_send_unload_done(struct bnx2x_softc *sc, uint8_t keep_link) 1653 { 1654 uint32_t reset_param = 1655 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 1656 1657 /* Report UNLOAD_DONE to MCP */ 1658 if (!BNX2X_NOMCP(sc)) { 1659 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 1660 } 1661 } 1662 1663 static int bnx2x_func_wait_started(struct bnx2x_softc *sc) 1664 { 1665 int tout = 50; 1666 1667 if (!sc->port.pmf) { 1668 return 0; 1669 } 1670 1671 /* 1672 * (assumption: No Attention from MCP at this stage) 1673 * PMF probably in the middle of TX disable/enable transaction 1674 * 1. Sync IRS for default SB 1675 * 2. Sync SP queue - this guarantees us that attention handling started 1676 * 3. Wait, that TX disable/enable transaction completes 1677 * 1678 * 1+2 guarantee that if DCBX attention was scheduled it already changed 1679 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 1680 * received completion for the transaction the state is TX_STOPPED. 1681 * State will return to STARTED after completion of TX_STOPPED-->STARTED 1682 * transaction. 1683 */ 1684 1685 while (ecore_func_get_state(sc, &sc->func_obj) != 1686 ECORE_F_STATE_STARTED && tout--) { 1687 DELAY(20000); 1688 } 1689 1690 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 1691 /* 1692 * Failed to complete the transaction in a "good way" 1693 * Force both transactions with CLR bit. 1694 */ 1695 struct ecore_func_state_params func_params = { NULL }; 1696 1697 PMD_DRV_LOG(NOTICE, sc, "Unexpected function state! " 1698 "Forcing STARTED-->TX_STOPPED-->STARTED"); 1699 1700 func_params.f_obj = &sc->func_obj; 1701 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 1702 1703 /* STARTED-->TX_STOPPED */ 1704 func_params.cmd = ECORE_F_CMD_TX_STOP; 1705 ecore_func_state_change(sc, &func_params); 1706 1707 /* TX_STOPPED-->STARTED */ 1708 func_params.cmd = ECORE_F_CMD_TX_START; 1709 return ecore_func_state_change(sc, &func_params); 1710 } 1711 1712 return 0; 1713 } 1714 1715 static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index) 1716 { 1717 struct bnx2x_fastpath *fp = &sc->fp[index]; 1718 struct ecore_queue_state_params q_params = { NULL }; 1719 int rc; 1720 1721 PMD_DRV_LOG(DEBUG, sc, "stopping queue %d cid %d", index, fp->index); 1722 1723 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 1724 /* We want to wait for completion in this context */ 1725 bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 1726 1727 /* Stop the primary connection: */ 1728 1729 /* ...halt the connection */ 1730 q_params.cmd = ECORE_Q_CMD_HALT; 1731 rc = ecore_queue_state_change(sc, &q_params); 1732 if (rc) { 1733 return rc; 1734 } 1735 1736 /* ...terminate the connection */ 1737 q_params.cmd = ECORE_Q_CMD_TERMINATE; 1738 memset(&q_params.params.terminate, 0, 1739 sizeof(q_params.params.terminate)); 1740 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 1741 rc = ecore_queue_state_change(sc, &q_params); 1742 if (rc) { 1743 return rc; 1744 } 1745 1746 /* ...delete cfc entry */ 1747 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 1748 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 1749 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 1750 return ecore_queue_state_change(sc, &q_params); 1751 } 1752 1753 /* wait for the outstanding SP commands */ 1754 static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, unsigned long mask) 1755 { 1756 unsigned long tmp; 1757 int tout = 5000; /* wait for 5 secs tops */ 1758 1759 while (tout--) { 1760 mb(); 1761 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 1762 return TRUE; 1763 } 1764 1765 DELAY(1000); 1766 } 1767 1768 mb(); 1769 1770 tmp = atomic_load_acq_long(&sc->sp_state); 1771 if (tmp & mask) { 1772 PMD_DRV_LOG(INFO, sc, "Filtering completion timed out: " 1773 "sp_state 0x%lx, mask 0x%lx", tmp, mask); 1774 return FALSE; 1775 } 1776 1777 return FALSE; 1778 } 1779 1780 static int bnx2x_func_stop(struct bnx2x_softc *sc) 1781 { 1782 struct ecore_func_state_params func_params = { NULL }; 1783 int rc; 1784 1785 /* prepare parameters for function state transitions */ 1786 bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 1787 func_params.f_obj = &sc->func_obj; 1788 func_params.cmd = ECORE_F_CMD_STOP; 1789 1790 /* 1791 * Try to stop the function the 'good way'. If it fails (in case 1792 * of a parity error during bnx2x_chip_cleanup()) and we are 1793 * not in a debug mode, perform a state transaction in order to 1794 * enable further HW_RESET transaction. 1795 */ 1796 rc = ecore_func_state_change(sc, &func_params); 1797 if (rc) { 1798 PMD_DRV_LOG(NOTICE, sc, "FUNC_STOP ramrod failed. " 1799 "Running a dry transaction"); 1800 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 1801 return ecore_func_state_change(sc, &func_params); 1802 } 1803 1804 return 0; 1805 } 1806 1807 static int bnx2x_reset_hw(struct bnx2x_softc *sc, uint32_t load_code) 1808 { 1809 struct ecore_func_state_params func_params = { NULL }; 1810 1811 /* Prepare parameters for function state transitions */ 1812 bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 1813 1814 func_params.f_obj = &sc->func_obj; 1815 func_params.cmd = ECORE_F_CMD_HW_RESET; 1816 1817 func_params.params.hw_init.load_phase = load_code; 1818 1819 return ecore_func_state_change(sc, &func_params); 1820 } 1821 1822 static void bnx2x_int_disable_sync(struct bnx2x_softc *sc, int disable_hw) 1823 { 1824 if (disable_hw) { 1825 /* prevent the HW from sending interrupts */ 1826 bnx2x_int_disable(sc); 1827 } 1828 } 1829 1830 static void 1831 bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) 1832 { 1833 int port = SC_PORT(sc); 1834 struct ecore_mcast_ramrod_params rparam = { NULL }; 1835 uint32_t reset_code; 1836 int i, rc = 0; 1837 1838 bnx2x_drain_tx_queues(sc); 1839 1840 /* give HW time to discard old tx messages */ 1841 DELAY(1000); 1842 1843 /* Clean all ETH MACs */ 1844 rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, 1845 FALSE); 1846 if (rc < 0) { 1847 PMD_DRV_LOG(NOTICE, sc, 1848 "Failed to delete all ETH MACs (%d)", rc); 1849 } 1850 1851 /* Clean up UC list */ 1852 rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, 1853 TRUE); 1854 if (rc < 0) { 1855 PMD_DRV_LOG(NOTICE, sc, 1856 "Failed to delete UC MACs list (%d)", rc); 1857 } 1858 1859 /* Disable LLH */ 1860 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); 1861 1862 /* Set "drop all" to stop Rx */ 1863 1864 /* 1865 * We need to take the if_maddr_lock() here in order to prevent 1866 * a race between the completion code and this code. 1867 */ 1868 1869 if (bnx2x_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 1870 bnx2x_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 1871 } else { 1872 bnx2x_set_storm_rx_mode(sc); 1873 } 1874 1875 /* Clean up multicast configuration */ 1876 rparam.mcast_obj = &sc->mcast_obj; 1877 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 1878 if (rc < 0) { 1879 PMD_DRV_LOG(NOTICE, sc, 1880 "Failed to send DEL MCAST command (%d)", rc); 1881 } 1882 1883 /* 1884 * Send the UNLOAD_REQUEST to the MCP. This will return if 1885 * this function should perform FUNCTION, PORT, or COMMON HW 1886 * reset. 1887 */ 1888 reset_code = bnx2x_send_unload_req(sc, unload_mode); 1889 1890 /* 1891 * (assumption: No Attention from MCP at this stage) 1892 * PMF probably in the middle of TX disable/enable transaction 1893 */ 1894 rc = bnx2x_func_wait_started(sc); 1895 if (rc) { 1896 PMD_DRV_LOG(NOTICE, sc, "bnx2x_func_wait_started failed"); 1897 } 1898 1899 /* 1900 * Close multi and leading connections 1901 * Completions for ramrods are collected in a synchronous way 1902 */ 1903 for (i = 0; i < sc->num_queues; i++) { 1904 if (bnx2x_stop_queue(sc, i)) { 1905 goto unload_error; 1906 } 1907 } 1908 1909 /* 1910 * If SP settings didn't get completed so far - something 1911 * very wrong has happen. 1912 */ 1913 if (!bnx2x_wait_sp_comp(sc, ~0x0UL)) { 1914 PMD_DRV_LOG(NOTICE, sc, "Common slow path ramrods got stuck!"); 1915 } 1916 1917 unload_error: 1918 1919 rc = bnx2x_func_stop(sc); 1920 if (rc) { 1921 PMD_DRV_LOG(NOTICE, sc, "Function stop failed!"); 1922 } 1923 1924 /* disable HW interrupts */ 1925 bnx2x_int_disable_sync(sc, TRUE); 1926 1927 /* Reset the chip */ 1928 rc = bnx2x_reset_hw(sc, reset_code); 1929 if (rc) { 1930 PMD_DRV_LOG(NOTICE, sc, "Hardware reset failed"); 1931 } 1932 1933 /* Report UNLOAD_DONE to MCP */ 1934 bnx2x_send_unload_done(sc, keep_link); 1935 } 1936 1937 static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc) 1938 { 1939 uint32_t val; 1940 1941 PMD_DRV_LOG(DEBUG, sc, "Disabling 'close the gates'"); 1942 1943 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 1944 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 1945 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 1946 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 1947 } 1948 1949 /* 1950 * Cleans the object that have internal lists without sending 1951 * ramrods. Should be run when interrutps are disabled. 1952 */ 1953 static void bnx2x_squeeze_objects(struct bnx2x_softc *sc) 1954 { 1955 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 1956 struct ecore_mcast_ramrod_params rparam = { NULL }; 1957 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 1958 int rc; 1959 1960 /* Cleanup MACs' object first... */ 1961 1962 /* Wait for completion of requested */ 1963 bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 1964 /* Perform a dry cleanup */ 1965 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 1966 1967 /* Clean ETH primary MAC */ 1968 bnx2x_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 1969 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 1970 &ramrod_flags); 1971 if (rc != 0) { 1972 PMD_DRV_LOG(NOTICE, sc, "Failed to clean ETH MACs (%d)", rc); 1973 } 1974 1975 /* Cleanup UC list */ 1976 vlan_mac_flags = 0; 1977 bnx2x_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 1978 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 1979 if (rc != 0) { 1980 PMD_DRV_LOG(NOTICE, sc, 1981 "Failed to clean UC list MACs (%d)", rc); 1982 } 1983 1984 /* Now clean mcast object... */ 1985 1986 rparam.mcast_obj = &sc->mcast_obj; 1987 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 1988 1989 /* Add a DEL command... */ 1990 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 1991 if (rc < 0) { 1992 PMD_DRV_LOG(NOTICE, sc, 1993 "Failed to send DEL MCAST command (%d)", rc); 1994 } 1995 1996 /* now wait until all pending commands are cleared */ 1997 1998 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 1999 while (rc != 0) { 2000 if (rc < 0) { 2001 PMD_DRV_LOG(NOTICE, sc, 2002 "Failed to clean MCAST object (%d)", rc); 2003 return; 2004 } 2005 2006 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 2007 } 2008 } 2009 2010 /* stop the controller */ 2011 __rte_noinline 2012 int 2013 bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) 2014 { 2015 uint8_t global = FALSE; 2016 uint32_t val; 2017 2018 PMD_INIT_FUNC_TRACE(sc); 2019 2020 PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload..."); 2021 2022 /* mark driver as unloaded in shmem2 */ 2023 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 2024 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 2025 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 2026 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 2027 } 2028 2029 if (IS_PF(sc) && sc->recovery_state != BNX2X_RECOVERY_DONE && 2030 (sc->state == BNX2X_STATE_CLOSED || sc->state == BNX2X_STATE_ERROR)) { 2031 /* 2032 * We can get here if the driver has been unloaded 2033 * during parity error recovery and is either waiting for a 2034 * leader to complete or for other functions to unload and 2035 * then ifconfig down has been issued. In this case we want to 2036 * unload and let other functions to complete a recovery 2037 * process. 2038 */ 2039 sc->recovery_state = BNX2X_RECOVERY_DONE; 2040 sc->is_leader = 0; 2041 bnx2x_release_leader_lock(sc); 2042 mb(); 2043 2044 PMD_DRV_LOG(NOTICE, sc, "Can't unload in closed or error state"); 2045 return -1; 2046 } 2047 2048 /* 2049 * Nothing to do during unload if previous bnx2x_nic_load() 2050 * did not completed successfully - all resourses are released. 2051 */ 2052 if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) { 2053 return 0; 2054 } 2055 2056 sc->state = BNX2X_STATE_CLOSING_WAITING_HALT; 2057 mb(); 2058 2059 sc->rx_mode = BNX2X_RX_MODE_NONE; 2060 bnx2x_set_rx_mode(sc); 2061 mb(); 2062 2063 if (IS_PF(sc)) { 2064 /* set ALWAYS_ALIVE bit in shmem */ 2065 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 2066 2067 bnx2x_drv_pulse(sc); 2068 2069 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 2070 bnx2x_save_statistics(sc); 2071 } 2072 2073 /* wait till consumers catch up with producers in all queues */ 2074 bnx2x_drain_tx_queues(sc); 2075 2076 /* if VF indicate to PF this function is going down (PF will delete sp 2077 * elements and clear initializations 2078 */ 2079 if (IS_VF(sc)) { 2080 bnx2x_vf_unload(sc); 2081 } else if (unload_mode != UNLOAD_RECOVERY) { 2082 /* if this is a normal/close unload need to clean up chip */ 2083 bnx2x_chip_cleanup(sc, unload_mode, keep_link); 2084 } else { 2085 /* Send the UNLOAD_REQUEST to the MCP */ 2086 bnx2x_send_unload_req(sc, unload_mode); 2087 2088 /* 2089 * Prevent transactions to host from the functions on the 2090 * engine that doesn't reset global blocks in case of global 2091 * attention once gloabl blocks are reset and gates are opened 2092 * (the engine which leader will perform the recovery 2093 * last). 2094 */ 2095 if (!CHIP_IS_E1x(sc)) { 2096 bnx2x_pf_disable(sc); 2097 } 2098 2099 /* disable HW interrupts */ 2100 bnx2x_int_disable_sync(sc, TRUE); 2101 2102 /* Report UNLOAD_DONE to MCP */ 2103 bnx2x_send_unload_done(sc, FALSE); 2104 } 2105 2106 /* 2107 * At this stage no more interrupts will arrive so we may safely clean 2108 * the queue'able objects here in case they failed to get cleaned so far. 2109 */ 2110 if (IS_PF(sc)) { 2111 bnx2x_squeeze_objects(sc); 2112 } 2113 2114 /* There should be no more pending SP commands at this stage */ 2115 sc->sp_state = 0; 2116 2117 sc->port.pmf = 0; 2118 2119 if (IS_PF(sc)) { 2120 bnx2x_free_mem(sc); 2121 } 2122 2123 /* free the host hardware/software hsi structures */ 2124 bnx2x_free_hsi_mem(sc); 2125 2126 bnx2x_free_fw_stats_mem(sc); 2127 2128 sc->state = BNX2X_STATE_CLOSED; 2129 2130 /* 2131 * Check if there are pending parity attentions. If there are - set 2132 * RECOVERY_IN_PROGRESS. 2133 */ 2134 if (IS_PF(sc) && bnx2x_chk_parity_attn(sc, &global, FALSE)) { 2135 bnx2x_set_reset_in_progress(sc); 2136 2137 /* Set RESET_IS_GLOBAL if needed */ 2138 if (global) { 2139 bnx2x_set_reset_global(sc); 2140 } 2141 } 2142 2143 /* 2144 * The last driver must disable a "close the gate" if there is no 2145 * parity attention or "process kill" pending. 2146 */ 2147 if (IS_PF(sc) && !bnx2x_clear_pf_load(sc) && 2148 bnx2x_reset_is_done(sc, SC_PATH(sc))) { 2149 bnx2x_disable_close_the_gate(sc); 2150 } 2151 2152 PMD_DRV_LOG(DEBUG, sc, "Ended NIC unload"); 2153 2154 return 0; 2155 } 2156 2157 /* 2158 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 2159 * visible to the controller. 2160 * 2161 * If an mbuf is submitted to this routine and cannot be given to the 2162 * controller (e.g. it has too many fragments) then the function may free 2163 * the mbuf and return to the caller. 2164 * 2165 * Returns: 2166 * int: Number of TX BDs used for the mbuf 2167 * 2168 * Note the side effect that an mbuf may be freed if it causes a problem. 2169 */ 2170 int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0) 2171 { 2172 struct eth_tx_start_bd *tx_start_bd; 2173 uint16_t bd_prod, pkt_prod; 2174 struct bnx2x_softc *sc; 2175 uint32_t nbds = 0; 2176 2177 sc = txq->sc; 2178 bd_prod = txq->tx_bd_tail; 2179 pkt_prod = txq->tx_pkt_tail; 2180 2181 txq->sw_ring[TX_BD(pkt_prod, txq)] = m0; 2182 2183 tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; 2184 2185 tx_start_bd->addr = 2186 rte_cpu_to_le_64(rte_mbuf_data_iova(m0)); 2187 tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); 2188 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2189 tx_start_bd->general_data = 2190 (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 2191 2192 tx_start_bd->nbd = rte_cpu_to_le_16(2); 2193 2194 if (m0->ol_flags & PKT_TX_VLAN_PKT) { 2195 tx_start_bd->vlan_or_ethertype = 2196 rte_cpu_to_le_16(m0->vlan_tci); 2197 tx_start_bd->bd_flags.as_bitfield |= 2198 (X_ETH_OUTBAND_VLAN << 2199 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 2200 } else { 2201 if (IS_PF(sc)) 2202 tx_start_bd->vlan_or_ethertype = 2203 rte_cpu_to_le_16(pkt_prod); 2204 else { 2205 struct rte_ether_hdr *eh = 2206 rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); 2207 2208 tx_start_bd->vlan_or_ethertype = 2209 rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type)); 2210 } 2211 } 2212 2213 bd_prod = NEXT_TX_BD(bd_prod); 2214 if (IS_VF(sc)) { 2215 struct eth_tx_parse_bd_e2 *tx_parse_bd; 2216 const struct rte_ether_hdr *eh = 2217 rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); 2218 uint8_t mac_type = UNICAST_ADDRESS; 2219 2220 tx_parse_bd = 2221 &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2; 2222 if (rte_is_multicast_ether_addr(&eh->d_addr)) { 2223 if (rte_is_broadcast_ether_addr(&eh->d_addr)) 2224 mac_type = BROADCAST_ADDRESS; 2225 else 2226 mac_type = MULTICAST_ADDRESS; 2227 } 2228 tx_parse_bd->parsing_data = 2229 (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); 2230 2231 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi, 2232 &eh->d_addr.addr_bytes[0], 2); 2233 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid, 2234 &eh->d_addr.addr_bytes[2], 2); 2235 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo, 2236 &eh->d_addr.addr_bytes[4], 2); 2237 rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi, 2238 &eh->s_addr.addr_bytes[0], 2); 2239 rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid, 2240 &eh->s_addr.addr_bytes[2], 2); 2241 rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo, 2242 &eh->s_addr.addr_bytes[4], 2); 2243 2244 tx_parse_bd->data.mac_addr.dst_hi = 2245 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi); 2246 tx_parse_bd->data.mac_addr.dst_mid = 2247 rte_cpu_to_be_16(tx_parse_bd->data. 2248 mac_addr.dst_mid); 2249 tx_parse_bd->data.mac_addr.dst_lo = 2250 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo); 2251 tx_parse_bd->data.mac_addr.src_hi = 2252 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi); 2253 tx_parse_bd->data.mac_addr.src_mid = 2254 rte_cpu_to_be_16(tx_parse_bd->data. 2255 mac_addr.src_mid); 2256 tx_parse_bd->data.mac_addr.src_lo = 2257 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo); 2258 2259 PMD_TX_LOG(DEBUG, 2260 "PBD dst %x %x %x src %x %x %x p_data %x", 2261 tx_parse_bd->data.mac_addr.dst_hi, 2262 tx_parse_bd->data.mac_addr.dst_mid, 2263 tx_parse_bd->data.mac_addr.dst_lo, 2264 tx_parse_bd->data.mac_addr.src_hi, 2265 tx_parse_bd->data.mac_addr.src_mid, 2266 tx_parse_bd->data.mac_addr.src_lo, 2267 tx_parse_bd->parsing_data); 2268 } 2269 2270 PMD_TX_LOG(DEBUG, 2271 "start bd: nbytes %d flags %x vlan %x", 2272 tx_start_bd->nbytes, 2273 tx_start_bd->bd_flags.as_bitfield, 2274 tx_start_bd->vlan_or_ethertype); 2275 2276 bd_prod = NEXT_TX_BD(bd_prod); 2277 pkt_prod++; 2278 2279 if (TX_IDX(bd_prod) < 2) 2280 nbds++; 2281 2282 txq->nb_tx_avail -= 2; 2283 txq->tx_bd_tail = bd_prod; 2284 txq->tx_pkt_tail = pkt_prod; 2285 2286 return nbds + 2; 2287 } 2288 2289 static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc) 2290 { 2291 return L2_ILT_LINES(sc); 2292 } 2293 2294 static void bnx2x_ilt_set_info(struct bnx2x_softc *sc) 2295 { 2296 struct ilt_client_info *ilt_client; 2297 struct ecore_ilt *ilt = sc->ilt; 2298 uint16_t line = 0; 2299 2300 PMD_INIT_FUNC_TRACE(sc); 2301 2302 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 2303 2304 /* CDU */ 2305 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 2306 ilt_client->client_num = ILT_CLIENT_CDU; 2307 ilt_client->page_size = CDU_ILT_PAGE_SZ; 2308 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 2309 ilt_client->start = line; 2310 line += bnx2x_cid_ilt_lines(sc); 2311 2312 if (CNIC_SUPPORT(sc)) { 2313 line += CNIC_ILT_LINES; 2314 } 2315 2316 ilt_client->end = (line - 1); 2317 2318 /* QM */ 2319 if (QM_INIT(sc->qm_cid_count)) { 2320 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 2321 ilt_client->client_num = ILT_CLIENT_QM; 2322 ilt_client->page_size = QM_ILT_PAGE_SZ; 2323 ilt_client->flags = 0; 2324 ilt_client->start = line; 2325 2326 /* 4 bytes for each cid */ 2327 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 2328 QM_ILT_PAGE_SZ); 2329 2330 ilt_client->end = (line - 1); 2331 } 2332 2333 if (CNIC_SUPPORT(sc)) { 2334 /* SRC */ 2335 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 2336 ilt_client->client_num = ILT_CLIENT_SRC; 2337 ilt_client->page_size = SRC_ILT_PAGE_SZ; 2338 ilt_client->flags = 0; 2339 ilt_client->start = line; 2340 line += SRC_ILT_LINES; 2341 ilt_client->end = (line - 1); 2342 2343 /* TM */ 2344 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 2345 ilt_client->client_num = ILT_CLIENT_TM; 2346 ilt_client->page_size = TM_ILT_PAGE_SZ; 2347 ilt_client->flags = 0; 2348 ilt_client->start = line; 2349 line += TM_ILT_LINES; 2350 ilt_client->end = (line - 1); 2351 } 2352 2353 assert((line <= ILT_MAX_LINES)); 2354 } 2355 2356 static void bnx2x_set_fp_rx_buf_size(struct bnx2x_softc *sc) 2357 { 2358 int i; 2359 2360 for (i = 0; i < sc->num_queues; i++) { 2361 /* get the Rx buffer size for RX frames */ 2362 sc->fp[i].rx_buf_size = 2363 (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 2364 } 2365 } 2366 2367 int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) 2368 { 2369 2370 sc->ilt = rte_malloc("", sizeof(struct ecore_ilt), RTE_CACHE_LINE_SIZE); 2371 2372 return sc->ilt == NULL; 2373 } 2374 2375 static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) 2376 { 2377 sc->ilt->lines = rte_calloc("", 2378 sizeof(struct ilt_line), ILT_MAX_LINES, 2379 RTE_CACHE_LINE_SIZE); 2380 return sc->ilt->lines == NULL; 2381 } 2382 2383 void bnx2x_free_ilt_mem(struct bnx2x_softc *sc) 2384 { 2385 rte_free(sc->ilt); 2386 sc->ilt = NULL; 2387 } 2388 2389 static void bnx2x_free_ilt_lines_mem(struct bnx2x_softc *sc) 2390 { 2391 if (sc->ilt->lines != NULL) { 2392 rte_free(sc->ilt->lines); 2393 sc->ilt->lines = NULL; 2394 } 2395 } 2396 2397 static void bnx2x_free_mem(struct bnx2x_softc *sc) 2398 { 2399 uint32_t i; 2400 2401 for (i = 0; i < L2_ILT_LINES(sc); i++) { 2402 sc->context[i].vcxt = NULL; 2403 sc->context[i].size = 0; 2404 } 2405 2406 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 2407 2408 bnx2x_free_ilt_lines_mem(sc); 2409 } 2410 2411 static int bnx2x_alloc_mem(struct bnx2x_softc *sc) 2412 { 2413 int context_size; 2414 int allocated; 2415 int i; 2416 char cdu_name[RTE_MEMZONE_NAMESIZE]; 2417 2418 /* 2419 * Allocate memory for CDU context: 2420 * This memory is allocated separately and not in the generic ILT 2421 * functions because CDU differs in few aspects: 2422 * 1. There can be multiple entities allocating memory for context - 2423 * regular L2, CNIC, and SRIOV drivers. Each separately controls 2424 * its own ILT lines. 2425 * 2. Since CDU page-size is not a single 4KB page (which is the case 2426 * for the other ILT clients), to be efficient we want to support 2427 * allocation of sub-page-size in the last entry. 2428 * 3. Context pointers are used by the driver to pass to FW / update 2429 * the context (for the other ILT clients the pointers are used just to 2430 * free the memory during unload). 2431 */ 2432 context_size = (sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(sc)); 2433 for (i = 0, allocated = 0; allocated < context_size; i++) { 2434 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 2435 (context_size - allocated)); 2436 2437 snprintf(cdu_name, sizeof(cdu_name), "cdu_%d", i); 2438 if (bnx2x_dma_alloc(sc, sc->context[i].size, 2439 &sc->context[i].vcxt_dma, 2440 cdu_name, BNX2X_PAGE_SIZE) != 0) { 2441 bnx2x_free_mem(sc); 2442 return -1; 2443 } 2444 2445 sc->context[i].vcxt = 2446 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 2447 2448 allocated += sc->context[i].size; 2449 } 2450 2451 bnx2x_alloc_ilt_lines_mem(sc); 2452 2453 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 2454 PMD_DRV_LOG(NOTICE, sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed"); 2455 bnx2x_free_mem(sc); 2456 return -1; 2457 } 2458 2459 return 0; 2460 } 2461 2462 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc) 2463 { 2464 bnx2x_dma_free(&sc->fw_stats_dma); 2465 sc->fw_stats_num = 0; 2466 2467 sc->fw_stats_req_size = 0; 2468 sc->fw_stats_req = NULL; 2469 sc->fw_stats_req_mapping = 0; 2470 2471 sc->fw_stats_data_size = 0; 2472 sc->fw_stats_data = NULL; 2473 sc->fw_stats_data_mapping = 0; 2474 } 2475 2476 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc) 2477 { 2478 uint8_t num_queue_stats; 2479 int num_groups, vf_headroom = 0; 2480 2481 /* number of queues for statistics is number of eth queues */ 2482 num_queue_stats = BNX2X_NUM_ETH_QUEUES(sc); 2483 2484 /* 2485 * Total number of FW statistics requests = 2486 * 1 for port stats + 1 for PF stats + num of queues 2487 */ 2488 sc->fw_stats_num = (2 + num_queue_stats); 2489 2490 /* 2491 * Request is built from stats_query_header and an array of 2492 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 2493 * rules. The real number or requests is configured in the 2494 * stats_query_header. 2495 */ 2496 num_groups = (sc->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT; 2497 if ((sc->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) 2498 num_groups++; 2499 2500 sc->fw_stats_req_size = 2501 (sizeof(struct stats_query_header) + 2502 (num_groups * sizeof(struct stats_query_cmd_group))); 2503 2504 /* 2505 * Data for statistics requests + stats_counter. 2506 * stats_counter holds per-STORM counters that are incremented when 2507 * STORM has finished with the current request. Memory for FCoE 2508 * offloaded statistics are counted anyway, even if they will not be sent. 2509 * VF stats are not accounted for here as the data of VF stats is stored 2510 * in memory allocated by the VF, not here. 2511 */ 2512 sc->fw_stats_data_size = 2513 (sizeof(struct stats_counter) + 2514 sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + 2515 /* sizeof(struct fcoe_statistics_params) + */ 2516 (sizeof(struct per_queue_stats) * num_queue_stats)); 2517 2518 if (bnx2x_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 2519 &sc->fw_stats_dma, "fw_stats", 2520 RTE_CACHE_LINE_SIZE) != 0) { 2521 bnx2x_free_fw_stats_mem(sc); 2522 return -1; 2523 } 2524 2525 /* set up the shortcuts */ 2526 2527 sc->fw_stats_req = (struct bnx2x_fw_stats_req *)sc->fw_stats_dma.vaddr; 2528 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 2529 2530 sc->fw_stats_data = 2531 (struct bnx2x_fw_stats_data *)((uint8_t *) sc->fw_stats_dma.vaddr + 2532 sc->fw_stats_req_size); 2533 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 2534 sc->fw_stats_req_size); 2535 2536 return 0; 2537 } 2538 2539 /* 2540 * Bits map: 2541 * 0-7 - Engine0 load counter. 2542 * 8-15 - Engine1 load counter. 2543 * 16 - Engine0 RESET_IN_PROGRESS bit. 2544 * 17 - Engine1 RESET_IN_PROGRESS bit. 2545 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 2546 * function on the engine 2547 * 19 - Engine1 ONE_IS_LOADED. 2548 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 2549 * leader to complete (check for both RESET_IN_PROGRESS bits and not 2550 * for just the one belonging to its engine). 2551 */ 2552 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 2553 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 2554 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 2555 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 2556 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 2557 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 2558 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 2559 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 2560 2561 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ 2562 static void bnx2x_set_reset_global(struct bnx2x_softc *sc) 2563 { 2564 uint32_t val; 2565 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2566 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2567 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 2568 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2569 } 2570 2571 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 2572 static void bnx2x_clear_reset_global(struct bnx2x_softc *sc) 2573 { 2574 uint32_t val; 2575 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2576 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2577 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 2578 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2579 } 2580 2581 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 2582 static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc) 2583 { 2584 return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT; 2585 } 2586 2587 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 2588 static void bnx2x_set_reset_done(struct bnx2x_softc *sc) 2589 { 2590 uint32_t val; 2591 uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : 2592 BNX2X_PATH0_RST_IN_PROG_BIT; 2593 2594 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2595 2596 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2597 /* Clear the bit */ 2598 val &= ~bit; 2599 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2600 2601 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2602 } 2603 2604 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 2605 static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc) 2606 { 2607 uint32_t val; 2608 uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : 2609 BNX2X_PATH0_RST_IN_PROG_BIT; 2610 2611 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2612 2613 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2614 /* Set the bit */ 2615 val |= bit; 2616 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2617 2618 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2619 } 2620 2621 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 2622 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine) 2623 { 2624 uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2625 uint32_t bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : 2626 BNX2X_PATH0_RST_IN_PROG_BIT; 2627 2628 /* return false if bit is set */ 2629 return (val & bit) ? FALSE : TRUE; 2630 } 2631 2632 /* get the load status for an engine, should be run under rtnl lock */ 2633 static uint8_t bnx2x_get_load_status(struct bnx2x_softc *sc, int engine) 2634 { 2635 uint32_t mask = engine ? BNX2X_PATH1_LOAD_CNT_MASK : 2636 BNX2X_PATH0_LOAD_CNT_MASK; 2637 uint32_t shift = engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2638 BNX2X_PATH0_LOAD_CNT_SHIFT; 2639 uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2640 2641 val = ((val & mask) >> shift); 2642 2643 return val != 0; 2644 } 2645 2646 /* set pf load mark */ 2647 static void bnx2x_set_pf_load(struct bnx2x_softc *sc) 2648 { 2649 uint32_t val; 2650 uint32_t val1; 2651 uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : 2652 BNX2X_PATH0_LOAD_CNT_MASK; 2653 uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2654 BNX2X_PATH0_LOAD_CNT_SHIFT; 2655 2656 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2657 2658 PMD_INIT_FUNC_TRACE(sc); 2659 2660 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2661 2662 /* get the current counter value */ 2663 val1 = ((val & mask) >> shift); 2664 2665 /* set bit of this PF */ 2666 val1 |= (1 << SC_ABS_FUNC(sc)); 2667 2668 /* clear the old value */ 2669 val &= ~mask; 2670 2671 /* set the new one */ 2672 val |= ((val1 << shift) & mask); 2673 2674 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2675 2676 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2677 } 2678 2679 /* clear pf load mark */ 2680 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc) 2681 { 2682 uint32_t val1, val; 2683 uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : 2684 BNX2X_PATH0_LOAD_CNT_MASK; 2685 uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2686 BNX2X_PATH0_LOAD_CNT_SHIFT; 2687 2688 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2689 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2690 2691 /* get the current counter value */ 2692 val1 = (val & mask) >> shift; 2693 2694 /* clear bit of that PF */ 2695 val1 &= ~(1 << SC_ABS_FUNC(sc)); 2696 2697 /* clear the old value */ 2698 val &= ~mask; 2699 2700 /* set the new one */ 2701 val |= ((val1 << shift) & mask); 2702 2703 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2704 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2705 return val1 != 0; 2706 } 2707 2708 /* send load requrest to mcp and analyze response */ 2709 static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code) 2710 { 2711 PMD_INIT_FUNC_TRACE(sc); 2712 2713 /* init fw_seq */ 2714 sc->fw_seq = 2715 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 2716 DRV_MSG_SEQ_NUMBER_MASK); 2717 2718 PMD_DRV_LOG(DEBUG, sc, "initial fw_seq 0x%04x", sc->fw_seq); 2719 2720 #ifdef BNX2X_PULSE 2721 /* get the current FW pulse sequence */ 2722 sc->fw_drv_pulse_wr_seq = 2723 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 2724 DRV_PULSE_SEQ_MASK); 2725 #else 2726 /* set ALWAYS_ALIVE bit in shmem */ 2727 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 2728 bnx2x_drv_pulse(sc); 2729 #endif 2730 2731 /* load request */ 2732 (*load_code) = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 2733 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 2734 2735 /* if the MCP fails to respond we must abort */ 2736 if (!(*load_code)) { 2737 PMD_DRV_LOG(NOTICE, sc, "MCP response failure!"); 2738 return -1; 2739 } 2740 2741 /* if MCP refused then must abort */ 2742 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 2743 PMD_DRV_LOG(NOTICE, sc, "MCP refused load request"); 2744 return -1; 2745 } 2746 2747 return 0; 2748 } 2749 2750 /* 2751 * Check whether another PF has already loaded FW to chip. In virtualized 2752 * environments a pf from anoth VM may have already initialized the device 2753 * including loading FW. 2754 */ 2755 static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code) 2756 { 2757 uint32_t my_fw, loaded_fw; 2758 2759 /* is another pf loaded on this engine? */ 2760 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 2761 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 2762 /* build my FW version dword */ 2763 my_fw = (BNX2X_5710_FW_MAJOR_VERSION + 2764 (BNX2X_5710_FW_MINOR_VERSION << 8) + 2765 (BNX2X_5710_FW_REVISION_VERSION << 16) + 2766 (BNX2X_5710_FW_ENGINEERING_VERSION << 24)); 2767 2768 /* read loaded FW from chip */ 2769 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 2770 PMD_DRV_LOG(DEBUG, sc, "loaded FW 0x%08x / my FW 0x%08x", 2771 loaded_fw, my_fw); 2772 2773 /* abort nic load if version mismatch */ 2774 if (my_fw != loaded_fw) { 2775 PMD_DRV_LOG(NOTICE, sc, 2776 "FW 0x%08x already loaded (mine is 0x%08x)", 2777 loaded_fw, my_fw); 2778 return -1; 2779 } 2780 } 2781 2782 return 0; 2783 } 2784 2785 /* mark PMF if applicable */ 2786 static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code) 2787 { 2788 uint32_t ncsi_oem_data_addr; 2789 2790 PMD_INIT_FUNC_TRACE(sc); 2791 2792 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 2793 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 2794 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 2795 /* 2796 * Barrier here for ordering between the writing to sc->port.pmf here 2797 * and reading it from the periodic task. 2798 */ 2799 sc->port.pmf = 1; 2800 mb(); 2801 } else { 2802 sc->port.pmf = 0; 2803 } 2804 2805 PMD_DRV_LOG(DEBUG, sc, "pmf %d", sc->port.pmf); 2806 2807 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 2808 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 2809 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 2810 if (ncsi_oem_data_addr) { 2811 REG_WR(sc, 2812 (ncsi_oem_data_addr + 2813 offsetof(struct glob_ncsi_oem_data, 2814 driver_version)), 0); 2815 } 2816 } 2817 } 2818 } 2819 2820 static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc) 2821 { 2822 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 2823 int abs_func; 2824 int vn; 2825 2826 if (BNX2X_NOMCP(sc)) { 2827 return; /* what should be the default bvalue in this case */ 2828 } 2829 2830 /* 2831 * The formula for computing the absolute function number is... 2832 * For 2 port configuration (4 functions per port): 2833 * abs_func = 2 * vn + SC_PORT + SC_PATH 2834 * For 4 port configuration (2 functions per port): 2835 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 2836 */ 2837 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 2838 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 2839 if (abs_func >= E1H_FUNC_MAX) { 2840 break; 2841 } 2842 sc->devinfo.mf_info.mf_config[vn] = 2843 MFCFG_RD(sc, func_mf_config[abs_func].config); 2844 } 2845 2846 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 2847 FUNC_MF_CFG_FUNC_DISABLED) { 2848 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); 2849 sc->flags |= BNX2X_MF_FUNC_DIS; 2850 } else { 2851 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); 2852 sc->flags &= ~BNX2X_MF_FUNC_DIS; 2853 } 2854 } 2855 2856 /* acquire split MCP access lock register */ 2857 static int bnx2x_acquire_alr(struct bnx2x_softc *sc) 2858 { 2859 uint32_t j, val; 2860 2861 for (j = 0; j < 1000; j++) { 2862 val = (1UL << 31); 2863 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 2864 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 2865 if (val & (1L << 31)) 2866 break; 2867 2868 DELAY(5000); 2869 } 2870 2871 if (!(val & (1L << 31))) { 2872 PMD_DRV_LOG(NOTICE, sc, "Cannot acquire MCP access lock register"); 2873 return -1; 2874 } 2875 2876 return 0; 2877 } 2878 2879 /* release split MCP access lock register */ 2880 static void bnx2x_release_alr(struct bnx2x_softc *sc) 2881 { 2882 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 2883 } 2884 2885 static void bnx2x_fan_failure(struct bnx2x_softc *sc) 2886 { 2887 int port = SC_PORT(sc); 2888 uint32_t ext_phy_config; 2889 2890 /* mark the failure */ 2891 ext_phy_config = 2892 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 2893 2894 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2895 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 2896 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 2897 ext_phy_config); 2898 2899 /* log the failure */ 2900 PMD_DRV_LOG(INFO, sc, 2901 "Fan Failure has caused the driver to shutdown " 2902 "the card to prevent permanent damage. " 2903 "Please contact OEM Support for assistance"); 2904 2905 rte_panic("Schedule task to handle fan failure"); 2906 } 2907 2908 /* this function is called upon a link interrupt */ 2909 static void bnx2x_link_attn(struct bnx2x_softc *sc) 2910 { 2911 uint32_t pause_enabled = 0; 2912 struct host_port_stats *pstats; 2913 int cmng_fns; 2914 2915 /* Make sure that we are synced with the current statistics */ 2916 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 2917 2918 elink_link_update(&sc->link_params, &sc->link_vars); 2919 2920 if (sc->link_vars.link_up) { 2921 2922 /* dropless flow control */ 2923 if (sc->dropless_fc) { 2924 pause_enabled = 0; 2925 2926 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 2927 pause_enabled = 1; 2928 } 2929 2930 REG_WR(sc, 2931 (BAR_USTRORM_INTMEM + 2932 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 2933 pause_enabled); 2934 } 2935 2936 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 2937 pstats = BNX2X_SP(sc, port_stats); 2938 /* reset old mac stats */ 2939 memset(&(pstats->mac_stx[0]), 0, 2940 sizeof(struct mac_stx)); 2941 } 2942 2943 if (sc->state == BNX2X_STATE_OPEN) { 2944 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 2945 } 2946 } 2947 2948 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 2949 cmng_fns = bnx2x_get_cmng_fns_mode(sc); 2950 2951 if (cmng_fns != CMNG_FNS_NONE) { 2952 bnx2x_cmng_fns_init(sc, FALSE, cmng_fns); 2953 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 2954 } 2955 } 2956 2957 bnx2x_link_report_locked(sc); 2958 2959 if (IS_MF(sc)) { 2960 bnx2x_link_sync_notify(sc); 2961 } 2962 } 2963 2964 static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted) 2965 { 2966 int port = SC_PORT(sc); 2967 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2968 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2969 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2970 NIG_REG_MASK_INTERRUPT_PORT0; 2971 uint32_t aeu_mask; 2972 uint32_t nig_mask = 0; 2973 uint32_t reg_addr; 2974 uint32_t igu_acked; 2975 uint32_t cnt; 2976 2977 if (sc->attn_state & asserted) { 2978 PMD_DRV_LOG(ERR, sc, "IGU ERROR attn=0x%08x", asserted); 2979 } 2980 2981 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 2982 2983 aeu_mask = REG_RD(sc, aeu_addr); 2984 2985 aeu_mask &= ~(asserted & 0x3ff); 2986 2987 REG_WR(sc, aeu_addr, aeu_mask); 2988 2989 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 2990 2991 sc->attn_state |= asserted; 2992 2993 if (asserted & ATTN_HARD_WIRED_MASK) { 2994 if (asserted & ATTN_NIG_FOR_FUNC) { 2995 2996 bnx2x_acquire_phy_lock(sc); 2997 /* save nig interrupt mask */ 2998 nig_mask = REG_RD(sc, nig_int_mask_addr); 2999 3000 /* If nig_mask is not set, no need to call the update function */ 3001 if (nig_mask) { 3002 REG_WR(sc, nig_int_mask_addr, 0); 3003 3004 bnx2x_link_attn(sc); 3005 } 3006 3007 /* handle unicore attn? */ 3008 } 3009 3010 if (asserted & ATTN_SW_TIMER_4_FUNC) { 3011 PMD_DRV_LOG(DEBUG, sc, "ATTN_SW_TIMER_4_FUNC!"); 3012 } 3013 3014 if (asserted & GPIO_2_FUNC) { 3015 PMD_DRV_LOG(DEBUG, sc, "GPIO_2_FUNC!"); 3016 } 3017 3018 if (asserted & GPIO_3_FUNC) { 3019 PMD_DRV_LOG(DEBUG, sc, "GPIO_3_FUNC!"); 3020 } 3021 3022 if (asserted & GPIO_4_FUNC) { 3023 PMD_DRV_LOG(DEBUG, sc, "GPIO_4_FUNC!"); 3024 } 3025 3026 if (port == 0) { 3027 if (asserted & ATTN_GENERAL_ATTN_1) { 3028 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_1!"); 3029 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3030 } 3031 if (asserted & ATTN_GENERAL_ATTN_2) { 3032 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_2!"); 3033 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3034 } 3035 if (asserted & ATTN_GENERAL_ATTN_3) { 3036 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_3!"); 3037 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3038 } 3039 } else { 3040 if (asserted & ATTN_GENERAL_ATTN_4) { 3041 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_4!"); 3042 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3043 } 3044 if (asserted & ATTN_GENERAL_ATTN_5) { 3045 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_5!"); 3046 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3047 } 3048 if (asserted & ATTN_GENERAL_ATTN_6) { 3049 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_6!"); 3050 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3051 } 3052 } 3053 } 3054 /* hardwired */ 3055 if (sc->devinfo.int_block == INT_BLOCK_HC) { 3056 reg_addr = 3057 (HC_REG_COMMAND_REG + port * 32 + 3058 COMMAND_REG_ATTN_BITS_SET); 3059 } else { 3060 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8); 3061 } 3062 3063 PMD_DRV_LOG(DEBUG, sc, "about to mask 0x%08x at %s addr 0x%08x", 3064 asserted, 3065 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", 3066 reg_addr); 3067 REG_WR(sc, reg_addr, asserted); 3068 3069 /* now set back the mask */ 3070 if (asserted & ATTN_NIG_FOR_FUNC) { 3071 /* 3072 * Verify that IGU ack through BAR was written before restoring 3073 * NIG mask. This loop should exit after 2-3 iterations max. 3074 */ 3075 if (sc->devinfo.int_block != INT_BLOCK_HC) { 3076 cnt = 0; 3077 3078 do { 3079 igu_acked = 3080 REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 3081 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) 3082 && (++cnt < MAX_IGU_ATTN_ACK_TO)); 3083 3084 if (!igu_acked) { 3085 PMD_DRV_LOG(ERR, sc, 3086 "Failed to verify IGU ack on time"); 3087 } 3088 3089 mb(); 3090 } 3091 3092 REG_WR(sc, nig_int_mask_addr, nig_mask); 3093 3094 bnx2x_release_phy_lock(sc); 3095 } 3096 } 3097 3098 static void 3099 bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx, 3100 __rte_unused const char *blk) 3101 { 3102 PMD_DRV_LOG(INFO, sc, "%s%s", idx ? ", " : "", blk); 3103 } 3104 3105 static int 3106 bnx2x_check_blocks_with_parity0(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3107 uint8_t print) 3108 { 3109 uint32_t cur_bit = 0; 3110 int i = 0; 3111 3112 for (i = 0; sig; i++) { 3113 cur_bit = ((uint32_t) 0x1 << i); 3114 if (sig & cur_bit) { 3115 switch (cur_bit) { 3116 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 3117 if (print) 3118 bnx2x_print_next_block(sc, par_num++, 3119 "BRB"); 3120 break; 3121 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 3122 if (print) 3123 bnx2x_print_next_block(sc, par_num++, 3124 "PARSER"); 3125 break; 3126 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 3127 if (print) 3128 bnx2x_print_next_block(sc, par_num++, 3129 "TSDM"); 3130 break; 3131 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 3132 if (print) 3133 bnx2x_print_next_block(sc, par_num++, 3134 "SEARCHER"); 3135 break; 3136 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 3137 if (print) 3138 bnx2x_print_next_block(sc, par_num++, 3139 "TCM"); 3140 break; 3141 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 3142 if (print) 3143 bnx2x_print_next_block(sc, par_num++, 3144 "TSEMI"); 3145 break; 3146 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 3147 if (print) 3148 bnx2x_print_next_block(sc, par_num++, 3149 "XPB"); 3150 break; 3151 } 3152 3153 /* Clear the bit */ 3154 sig &= ~cur_bit; 3155 } 3156 } 3157 3158 return par_num; 3159 } 3160 3161 static int 3162 bnx2x_check_blocks_with_parity1(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3163 uint8_t * global, uint8_t print) 3164 { 3165 int i = 0; 3166 uint32_t cur_bit = 0; 3167 for (i = 0; sig; i++) { 3168 cur_bit = ((uint32_t) 0x1 << i); 3169 if (sig & cur_bit) { 3170 switch (cur_bit) { 3171 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 3172 if (print) 3173 bnx2x_print_next_block(sc, par_num++, 3174 "PBF"); 3175 break; 3176 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 3177 if (print) 3178 bnx2x_print_next_block(sc, par_num++, 3179 "QM"); 3180 break; 3181 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 3182 if (print) 3183 bnx2x_print_next_block(sc, par_num++, 3184 "TM"); 3185 break; 3186 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 3187 if (print) 3188 bnx2x_print_next_block(sc, par_num++, 3189 "XSDM"); 3190 break; 3191 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 3192 if (print) 3193 bnx2x_print_next_block(sc, par_num++, 3194 "XCM"); 3195 break; 3196 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 3197 if (print) 3198 bnx2x_print_next_block(sc, par_num++, 3199 "XSEMI"); 3200 break; 3201 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 3202 if (print) 3203 bnx2x_print_next_block(sc, par_num++, 3204 "DOORBELLQ"); 3205 break; 3206 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 3207 if (print) 3208 bnx2x_print_next_block(sc, par_num++, 3209 "NIG"); 3210 break; 3211 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 3212 if (print) 3213 bnx2x_print_next_block(sc, par_num++, 3214 "VAUX PCI CORE"); 3215 *global = TRUE; 3216 break; 3217 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 3218 if (print) 3219 bnx2x_print_next_block(sc, par_num++, 3220 "DEBUG"); 3221 break; 3222 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 3223 if (print) 3224 bnx2x_print_next_block(sc, par_num++, 3225 "USDM"); 3226 break; 3227 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 3228 if (print) 3229 bnx2x_print_next_block(sc, par_num++, 3230 "UCM"); 3231 break; 3232 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 3233 if (print) 3234 bnx2x_print_next_block(sc, par_num++, 3235 "USEMI"); 3236 break; 3237 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 3238 if (print) 3239 bnx2x_print_next_block(sc, par_num++, 3240 "UPB"); 3241 break; 3242 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 3243 if (print) 3244 bnx2x_print_next_block(sc, par_num++, 3245 "CSDM"); 3246 break; 3247 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 3248 if (print) 3249 bnx2x_print_next_block(sc, par_num++, 3250 "CCM"); 3251 break; 3252 } 3253 3254 /* Clear the bit */ 3255 sig &= ~cur_bit; 3256 } 3257 } 3258 3259 return par_num; 3260 } 3261 3262 static int 3263 bnx2x_check_blocks_with_parity2(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3264 uint8_t print) 3265 { 3266 uint32_t cur_bit = 0; 3267 int i = 0; 3268 3269 for (i = 0; sig; i++) { 3270 cur_bit = ((uint32_t) 0x1 << i); 3271 if (sig & cur_bit) { 3272 switch (cur_bit) { 3273 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 3274 if (print) 3275 bnx2x_print_next_block(sc, par_num++, 3276 "CSEMI"); 3277 break; 3278 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 3279 if (print) 3280 bnx2x_print_next_block(sc, par_num++, 3281 "PXP"); 3282 break; 3283 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 3284 if (print) 3285 bnx2x_print_next_block(sc, par_num++, 3286 "PXPPCICLOCKCLIENT"); 3287 break; 3288 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 3289 if (print) 3290 bnx2x_print_next_block(sc, par_num++, 3291 "CFC"); 3292 break; 3293 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 3294 if (print) 3295 bnx2x_print_next_block(sc, par_num++, 3296 "CDU"); 3297 break; 3298 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 3299 if (print) 3300 bnx2x_print_next_block(sc, par_num++, 3301 "DMAE"); 3302 break; 3303 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 3304 if (print) 3305 bnx2x_print_next_block(sc, par_num++, 3306 "IGU"); 3307 break; 3308 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 3309 if (print) 3310 bnx2x_print_next_block(sc, par_num++, 3311 "MISC"); 3312 break; 3313 } 3314 3315 /* Clear the bit */ 3316 sig &= ~cur_bit; 3317 } 3318 } 3319 3320 return par_num; 3321 } 3322 3323 static int 3324 bnx2x_check_blocks_with_parity3(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3325 uint8_t * global, uint8_t print) 3326 { 3327 uint32_t cur_bit = 0; 3328 int i = 0; 3329 3330 for (i = 0; sig; i++) { 3331 cur_bit = ((uint32_t) 0x1 << i); 3332 if (sig & cur_bit) { 3333 switch (cur_bit) { 3334 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 3335 if (print) 3336 bnx2x_print_next_block(sc, par_num++, 3337 "MCP ROM"); 3338 *global = TRUE; 3339 break; 3340 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 3341 if (print) 3342 bnx2x_print_next_block(sc, par_num++, 3343 "MCP UMP RX"); 3344 *global = TRUE; 3345 break; 3346 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 3347 if (print) 3348 bnx2x_print_next_block(sc, par_num++, 3349 "MCP UMP TX"); 3350 *global = TRUE; 3351 break; 3352 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 3353 if (print) 3354 bnx2x_print_next_block(sc, par_num++, 3355 "MCP SCPAD"); 3356 *global = TRUE; 3357 break; 3358 } 3359 3360 /* Clear the bit */ 3361 sig &= ~cur_bit; 3362 } 3363 } 3364 3365 return par_num; 3366 } 3367 3368 static int 3369 bnx2x_check_blocks_with_parity4(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3370 uint8_t print) 3371 { 3372 uint32_t cur_bit = 0; 3373 int i = 0; 3374 3375 for (i = 0; sig; i++) { 3376 cur_bit = ((uint32_t) 0x1 << i); 3377 if (sig & cur_bit) { 3378 switch (cur_bit) { 3379 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 3380 if (print) 3381 bnx2x_print_next_block(sc, par_num++, 3382 "PGLUE_B"); 3383 break; 3384 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 3385 if (print) 3386 bnx2x_print_next_block(sc, par_num++, 3387 "ATC"); 3388 break; 3389 } 3390 3391 /* Clear the bit */ 3392 sig &= ~cur_bit; 3393 } 3394 } 3395 3396 return par_num; 3397 } 3398 3399 static uint8_t 3400 bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print, 3401 uint32_t * sig) 3402 { 3403 int par_num = 0; 3404 3405 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 3406 (sig[1] & HW_PRTY_ASSERT_SET_1) || 3407 (sig[2] & HW_PRTY_ASSERT_SET_2) || 3408 (sig[3] & HW_PRTY_ASSERT_SET_3) || 3409 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 3410 PMD_DRV_LOG(ERR, sc, 3411 "Parity error: HW block parity attention:" 3412 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x", 3413 (uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0), 3414 (uint32_t) (sig[1] & HW_PRTY_ASSERT_SET_1), 3415 (uint32_t) (sig[2] & HW_PRTY_ASSERT_SET_2), 3416 (uint32_t) (sig[3] & HW_PRTY_ASSERT_SET_3), 3417 (uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4)); 3418 3419 if (print) 3420 PMD_DRV_LOG(INFO, sc, "Parity errors detected in blocks: "); 3421 3422 par_num = 3423 bnx2x_check_blocks_with_parity0(sc, sig[0] & 3424 HW_PRTY_ASSERT_SET_0, 3425 par_num, print); 3426 par_num = 3427 bnx2x_check_blocks_with_parity1(sc, sig[1] & 3428 HW_PRTY_ASSERT_SET_1, 3429 par_num, global, print); 3430 par_num = 3431 bnx2x_check_blocks_with_parity2(sc, sig[2] & 3432 HW_PRTY_ASSERT_SET_2, 3433 par_num, print); 3434 par_num = 3435 bnx2x_check_blocks_with_parity3(sc, sig[3] & 3436 HW_PRTY_ASSERT_SET_3, 3437 par_num, global, print); 3438 par_num = 3439 bnx2x_check_blocks_with_parity4(sc, sig[4] & 3440 HW_PRTY_ASSERT_SET_4, 3441 par_num, print); 3442 3443 if (print) 3444 PMD_DRV_LOG(INFO, sc, ""); 3445 3446 return TRUE; 3447 } 3448 3449 return FALSE; 3450 } 3451 3452 static uint8_t 3453 bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print) 3454 { 3455 struct attn_route attn = { {0} }; 3456 int port = SC_PORT(sc); 3457 3458 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); 3459 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); 3460 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); 3461 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); 3462 3463 if (!CHIP_IS_E1x(sc)) 3464 attn.sig[4] = 3465 REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); 3466 3467 return bnx2x_parity_attn(sc, global, print, attn.sig); 3468 } 3469 3470 static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn) 3471 { 3472 uint32_t val; 3473 3474 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 3475 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 3476 PMD_DRV_LOG(INFO, sc, "ERROR: PGLUE hw attention 0x%08x", val); 3477 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 3478 PMD_DRV_LOG(INFO, sc, 3479 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR"); 3480 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 3481 PMD_DRV_LOG(INFO, sc, 3482 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR"); 3483 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 3484 PMD_DRV_LOG(INFO, sc, 3485 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN"); 3486 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 3487 PMD_DRV_LOG(INFO, sc, 3488 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN"); 3489 if (val & 3490 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 3491 PMD_DRV_LOG(INFO, sc, 3492 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN"); 3493 if (val & 3494 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 3495 PMD_DRV_LOG(INFO, sc, 3496 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN"); 3497 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 3498 PMD_DRV_LOG(INFO, sc, 3499 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN"); 3500 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 3501 PMD_DRV_LOG(INFO, sc, 3502 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN"); 3503 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 3504 PMD_DRV_LOG(INFO, sc, 3505 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW"); 3506 } 3507 3508 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 3509 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 3510 PMD_DRV_LOG(INFO, sc, "ERROR: ATC hw attention 0x%08x", val); 3511 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 3512 PMD_DRV_LOG(INFO, sc, 3513 "ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR"); 3514 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 3515 PMD_DRV_LOG(INFO, sc, 3516 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND"); 3517 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 3518 PMD_DRV_LOG(INFO, sc, 3519 "ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS"); 3520 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 3521 PMD_DRV_LOG(INFO, sc, 3522 "ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT"); 3523 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 3524 PMD_DRV_LOG(INFO, sc, 3525 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR"); 3526 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 3527 PMD_DRV_LOG(INFO, sc, 3528 "ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU"); 3529 } 3530 3531 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 3532 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 3533 PMD_DRV_LOG(INFO, sc, 3534 "ERROR: FATAL parity attention set4 0x%08x", 3535 (uint32_t) (attn & 3536 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR 3537 | 3538 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 3539 } 3540 } 3541 3542 static void bnx2x_e1h_disable(struct bnx2x_softc *sc) 3543 { 3544 int port = SC_PORT(sc); 3545 3546 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); 3547 } 3548 3549 static void bnx2x_e1h_enable(struct bnx2x_softc *sc) 3550 { 3551 int port = SC_PORT(sc); 3552 3553 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 3554 } 3555 3556 /* 3557 * called due to MCP event (on pmf): 3558 * reread new bandwidth configuration 3559 * configure FW 3560 * notify others function about the change 3561 */ 3562 static void bnx2x_config_mf_bw(struct bnx2x_softc *sc) 3563 { 3564 if (sc->link_vars.link_up) { 3565 bnx2x_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 3566 bnx2x_link_sync_notify(sc); 3567 } 3568 3569 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 3570 } 3571 3572 static void bnx2x_set_mf_bw(struct bnx2x_softc *sc) 3573 { 3574 bnx2x_config_mf_bw(sc); 3575 bnx2x_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3576 } 3577 3578 static void bnx2x_handle_eee_event(struct bnx2x_softc *sc) 3579 { 3580 bnx2x_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3581 } 3582 3583 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3584 3585 static void bnx2x_drv_info_ether_stat(struct bnx2x_softc *sc) 3586 { 3587 struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; 3588 3589 strncpy(ether_stat->version, BNX2X_DRIVER_VERSION, 3590 ETH_STAT_INFO_VERSION_LEN); 3591 3592 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 3593 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3594 ether_stat->mac_local + MAC_PAD, 3595 MAC_PAD, ETH_ALEN); 3596 3597 ether_stat->mtu_size = sc->mtu; 3598 3599 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3600 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 3601 3602 ether_stat->txq_size = sc->tx_ring_size; 3603 ether_stat->rxq_size = sc->rx_ring_size; 3604 } 3605 3606 static void bnx2x_handle_drv_info_req(struct bnx2x_softc *sc) 3607 { 3608 enum drv_info_opcode op_code; 3609 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 3610 3611 /* if drv_info version supported by MFW doesn't match - send NACK */ 3612 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3613 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3614 return; 3615 } 3616 3617 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3618 DRV_INFO_CONTROL_OP_CODE_SHIFT); 3619 3620 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 3621 3622 switch (op_code) { 3623 case ETH_STATS_OPCODE: 3624 bnx2x_drv_info_ether_stat(sc); 3625 break; 3626 case FCOE_STATS_OPCODE: 3627 case ISCSI_STATS_OPCODE: 3628 default: 3629 /* if op code isn't supported - send NACK */ 3630 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3631 return; 3632 } 3633 3634 /* 3635 * If we got drv_info attn from MFW then these fields are defined in 3636 * shmem2 for sure 3637 */ 3638 SHMEM2_WR(sc, drv_info_host_addr_lo, 3639 U64_LO(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); 3640 SHMEM2_WR(sc, drv_info_host_addr_hi, 3641 U64_HI(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); 3642 3643 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3644 } 3645 3646 static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event) 3647 { 3648 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3649 /* 3650 * This is the only place besides the function initialization 3651 * where the sc->flags can change so it is done without any 3652 * locks 3653 */ 3654 if (sc->devinfo. 3655 mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 3656 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); 3657 sc->flags |= BNX2X_MF_FUNC_DIS; 3658 bnx2x_e1h_disable(sc); 3659 } else { 3660 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); 3661 sc->flags &= ~BNX2X_MF_FUNC_DIS; 3662 bnx2x_e1h_enable(sc); 3663 } 3664 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3665 } 3666 3667 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3668 bnx2x_config_mf_bw(sc); 3669 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3670 } 3671 3672 /* Report results to MCP */ 3673 if (dcc_event) 3674 bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 3675 else 3676 bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 3677 } 3678 3679 static void bnx2x_pmf_update(struct bnx2x_softc *sc) 3680 { 3681 int port = SC_PORT(sc); 3682 uint32_t val; 3683 3684 sc->port.pmf = 1; 3685 3686 /* 3687 * We need the mb() to ensure the ordering between the writing to 3688 * sc->port.pmf here and reading it from the bnx2x_periodic_task(). 3689 */ 3690 mb(); 3691 3692 /* enable nig attention */ 3693 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 3694 if (sc->devinfo.int_block == INT_BLOCK_HC) { 3695 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val); 3696 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val); 3697 } else if (!CHIP_IS_E1x(sc)) { 3698 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 3699 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 3700 } 3701 3702 bnx2x_stats_handle(sc, STATS_EVENT_PMF); 3703 } 3704 3705 static int bnx2x_mc_assert(struct bnx2x_softc *sc) 3706 { 3707 char last_idx; 3708 int i, rc = 0; 3709 __rte_unused uint32_t row0, row1, row2, row3; 3710 3711 /* XSTORM */ 3712 last_idx = 3713 REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 3714 if (last_idx) 3715 PMD_DRV_LOG(ERR, sc, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3716 3717 /* print the asserts */ 3718 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3719 3720 row0 = 3721 REG_RD(sc, 3722 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 3723 row1 = 3724 REG_RD(sc, 3725 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3726 4); 3727 row2 = 3728 REG_RD(sc, 3729 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3730 8); 3731 row3 = 3732 REG_RD(sc, 3733 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3734 12); 3735 3736 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3737 PMD_DRV_LOG(ERR, sc, 3738 "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3739 i, row3, row2, row1, row0); 3740 rc++; 3741 } else { 3742 break; 3743 } 3744 } 3745 3746 /* TSTORM */ 3747 last_idx = 3748 REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 3749 if (last_idx) { 3750 PMD_DRV_LOG(ERR, sc, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3751 } 3752 3753 /* print the asserts */ 3754 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3755 3756 row0 = 3757 REG_RD(sc, 3758 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 3759 row1 = 3760 REG_RD(sc, 3761 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3762 4); 3763 row2 = 3764 REG_RD(sc, 3765 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3766 8); 3767 row3 = 3768 REG_RD(sc, 3769 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3770 12); 3771 3772 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3773 PMD_DRV_LOG(ERR, sc, 3774 "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3775 i, row3, row2, row1, row0); 3776 rc++; 3777 } else { 3778 break; 3779 } 3780 } 3781 3782 /* CSTORM */ 3783 last_idx = 3784 REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 3785 if (last_idx) { 3786 PMD_DRV_LOG(ERR, sc, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3787 } 3788 3789 /* print the asserts */ 3790 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3791 3792 row0 = 3793 REG_RD(sc, 3794 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 3795 row1 = 3796 REG_RD(sc, 3797 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3798 4); 3799 row2 = 3800 REG_RD(sc, 3801 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3802 8); 3803 row3 = 3804 REG_RD(sc, 3805 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3806 12); 3807 3808 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3809 PMD_DRV_LOG(ERR, sc, 3810 "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3811 i, row3, row2, row1, row0); 3812 rc++; 3813 } else { 3814 break; 3815 } 3816 } 3817 3818 /* USTORM */ 3819 last_idx = 3820 REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 3821 if (last_idx) { 3822 PMD_DRV_LOG(ERR, sc, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3823 } 3824 3825 /* print the asserts */ 3826 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3827 3828 row0 = 3829 REG_RD(sc, 3830 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 3831 row1 = 3832 REG_RD(sc, 3833 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3834 4); 3835 row2 = 3836 REG_RD(sc, 3837 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3838 8); 3839 row3 = 3840 REG_RD(sc, 3841 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3842 12); 3843 3844 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3845 PMD_DRV_LOG(ERR, sc, 3846 "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3847 i, row3, row2, row1, row0); 3848 rc++; 3849 } else { 3850 break; 3851 } 3852 } 3853 3854 return rc; 3855 } 3856 3857 static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn) 3858 { 3859 int func = SC_FUNC(sc); 3860 uint32_t val; 3861 3862 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 3863 3864 if (attn & BNX2X_PMF_LINK_ASSERT(sc)) { 3865 3866 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 3867 bnx2x_read_mf_cfg(sc); 3868 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 3869 MFCFG_RD(sc, 3870 func_mf_config[SC_ABS_FUNC(sc)].config); 3871 val = 3872 SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 3873 3874 if (val & DRV_STATUS_DCC_EVENT_MASK) 3875 bnx2x_dcc_event(sc, 3876 (val & 3877 DRV_STATUS_DCC_EVENT_MASK)); 3878 3879 if (val & DRV_STATUS_SET_MF_BW) 3880 bnx2x_set_mf_bw(sc); 3881 3882 if (val & DRV_STATUS_DRV_INFO_REQ) 3883 bnx2x_handle_drv_info_req(sc); 3884 3885 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3886 bnx2x_pmf_update(sc); 3887 3888 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 3889 bnx2x_handle_eee_event(sc); 3890 3891 if (sc->link_vars.periodic_flags & 3892 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 3893 /* sync with link */ 3894 bnx2x_acquire_phy_lock(sc); 3895 sc->link_vars.periodic_flags &= 3896 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 3897 bnx2x_release_phy_lock(sc); 3898 if (IS_MF(sc)) { 3899 bnx2x_link_sync_notify(sc); 3900 } 3901 bnx2x_link_report(sc); 3902 } 3903 3904 /* 3905 * Always call it here: bnx2x_link_report() will 3906 * prevent the link indication duplication. 3907 */ 3908 bnx2x_link_status_update(sc); 3909 3910 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3911 3912 PMD_DRV_LOG(ERR, sc, "MC assert!"); 3913 bnx2x_mc_assert(sc); 3914 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 3915 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 3916 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 3917 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 3918 rte_panic("MC assert!"); 3919 3920 } else if (attn & BNX2X_MCP_ASSERT) { 3921 3922 PMD_DRV_LOG(ERR, sc, "MCP assert!"); 3923 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 3924 3925 } else { 3926 PMD_DRV_LOG(ERR, sc, 3927 "Unknown HW assert! (attn 0x%08x)", attn); 3928 } 3929 } 3930 3931 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3932 PMD_DRV_LOG(ERR, sc, "LATCHED attention 0x%08x (masked)", attn); 3933 if (attn & BNX2X_GRC_TIMEOUT) { 3934 val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 3935 PMD_DRV_LOG(ERR, sc, "GRC time-out 0x%08x", val); 3936 } 3937 if (attn & BNX2X_GRC_RSV) { 3938 val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 3939 PMD_DRV_LOG(ERR, sc, "GRC reserved 0x%08x", val); 3940 } 3941 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3942 } 3943 } 3944 3945 static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn) 3946 { 3947 int port = SC_PORT(sc); 3948 int reg_offset; 3949 uint32_t val0, mask0, val1, mask1; 3950 uint32_t val; 3951 3952 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3953 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 3954 PMD_DRV_LOG(ERR, sc, "CFC hw attention 0x%08x", val); 3955 /* CFC error attention */ 3956 if (val & 0x2) { 3957 PMD_DRV_LOG(ERR, sc, "FATAL error from CFC"); 3958 } 3959 } 3960 3961 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3962 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 3963 PMD_DRV_LOG(ERR, sc, "PXP hw attention-0 0x%08x", val); 3964 /* RQ_USDMDP_FIFO_OVERFLOW */ 3965 if (val & 0x18000) { 3966 PMD_DRV_LOG(ERR, sc, "FATAL error from PXP"); 3967 } 3968 3969 if (!CHIP_IS_E1x(sc)) { 3970 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 3971 PMD_DRV_LOG(ERR, sc, "PXP hw attention-1 0x%08x", val); 3972 } 3973 } 3974 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 3975 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 3976 3977 if (attn & AEU_PXP2_HW_INT_BIT) { 3978 /* CQ47854 workaround do not panic on 3979 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 3980 */ 3981 if (!CHIP_IS_E1x(sc)) { 3982 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 3983 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 3984 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 3985 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 3986 /* 3987 * If the only PXP2_EOP_ERROR_BIT is set in 3988 * STS0 and STS1 - clear it 3989 * 3990 * probably we lose additional attentions between 3991 * STS0 and STS_CLR0, in this case user will not 3992 * be notified about them 3993 */ 3994 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 3995 !(val1 & mask1)) 3996 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 3997 3998 /* print the register, since no one can restore it */ 3999 PMD_DRV_LOG(ERR, sc, 4000 "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0); 4001 4002 /* 4003 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 4004 * then notify 4005 */ 4006 if (val0 & PXP2_EOP_ERROR_BIT) { 4007 PMD_DRV_LOG(ERR, sc, "PXP2_WR_PGLUE_EOP_ERROR"); 4008 4009 /* 4010 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 4011 * set then clear attention from PXP2 block without panic 4012 */ 4013 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 4014 ((val1 & mask1) == 0)) 4015 attn &= ~AEU_PXP2_HW_INT_BIT; 4016 } 4017 } 4018 } 4019 4020 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4021 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 4022 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4023 4024 val = REG_RD(sc, reg_offset); 4025 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4026 REG_WR(sc, reg_offset, val); 4027 4028 PMD_DRV_LOG(ERR, sc, 4029 "FATAL HW block attention set2 0x%x", 4030 (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2)); 4031 rte_panic("HW block attention set2"); 4032 } 4033 } 4034 4035 static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn) 4036 { 4037 int port = SC_PORT(sc); 4038 int reg_offset; 4039 uint32_t val; 4040 4041 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 4042 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 4043 PMD_DRV_LOG(ERR, sc, "DB hw attention 0x%08x", val); 4044 /* DORQ discard attention */ 4045 if (val & 0x2) { 4046 PMD_DRV_LOG(ERR, sc, "FATAL error from DORQ"); 4047 } 4048 } 4049 4050 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4051 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 4052 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4053 4054 val = REG_RD(sc, reg_offset); 4055 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4056 REG_WR(sc, reg_offset, val); 4057 4058 PMD_DRV_LOG(ERR, sc, 4059 "FATAL HW block attention set1 0x%08x", 4060 (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1)); 4061 rte_panic("HW block attention set1"); 4062 } 4063 } 4064 4065 static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn) 4066 { 4067 int port = SC_PORT(sc); 4068 int reg_offset; 4069 uint32_t val; 4070 4071 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4072 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 4073 4074 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 4075 val = REG_RD(sc, reg_offset); 4076 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 4077 REG_WR(sc, reg_offset, val); 4078 4079 PMD_DRV_LOG(WARNING, sc, "SPIO5 hw attention"); 4080 4081 /* Fan failure attention */ 4082 elink_hw_reset_phy(&sc->link_params); 4083 bnx2x_fan_failure(sc); 4084 } 4085 4086 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 4087 bnx2x_acquire_phy_lock(sc); 4088 elink_handle_module_detect_int(&sc->link_params); 4089 bnx2x_release_phy_lock(sc); 4090 } 4091 4092 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4093 val = REG_RD(sc, reg_offset); 4094 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4095 REG_WR(sc, reg_offset, val); 4096 4097 rte_panic("FATAL HW block attention set0 0x%lx", 4098 (attn & HW_INTERRUT_ASSERT_SET_0)); 4099 } 4100 } 4101 4102 static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserted) 4103 { 4104 struct attn_route attn; 4105 struct attn_route *group_mask; 4106 int port = SC_PORT(sc); 4107 int index; 4108 uint32_t reg_addr; 4109 uint32_t val; 4110 uint32_t aeu_mask; 4111 uint8_t global = FALSE; 4112 4113 /* 4114 * Need to take HW lock because MCP or other port might also 4115 * try to handle this event. 4116 */ 4117 bnx2x_acquire_alr(sc); 4118 4119 if (bnx2x_chk_parity_attn(sc, &global, TRUE)) { 4120 sc->recovery_state = BNX2X_RECOVERY_INIT; 4121 4122 /* disable HW interrupts */ 4123 bnx2x_int_disable(sc); 4124 bnx2x_release_alr(sc); 4125 return; 4126 } 4127 4128 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); 4129 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); 4130 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); 4131 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); 4132 if (!CHIP_IS_E1x(sc)) { 4133 attn.sig[4] = 4134 REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); 4135 } else { 4136 attn.sig[4] = 0; 4137 } 4138 4139 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4140 if (deasserted & (1 << index)) { 4141 group_mask = &sc->attn_group[index]; 4142 4143 bnx2x_attn_int_deasserted4(sc, 4144 attn. 4145 sig[4] & group_mask->sig[4]); 4146 bnx2x_attn_int_deasserted3(sc, 4147 attn. 4148 sig[3] & group_mask->sig[3]); 4149 bnx2x_attn_int_deasserted1(sc, 4150 attn. 4151 sig[1] & group_mask->sig[1]); 4152 bnx2x_attn_int_deasserted2(sc, 4153 attn. 4154 sig[2] & group_mask->sig[2]); 4155 bnx2x_attn_int_deasserted0(sc, 4156 attn. 4157 sig[0] & group_mask->sig[0]); 4158 } 4159 } 4160 4161 bnx2x_release_alr(sc); 4162 4163 if (sc->devinfo.int_block == INT_BLOCK_HC) { 4164 reg_addr = (HC_REG_COMMAND_REG + port * 32 + 4165 COMMAND_REG_ATTN_BITS_CLR); 4166 } else { 4167 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER * 8); 4168 } 4169 4170 val = ~deasserted; 4171 PMD_DRV_LOG(DEBUG, sc, 4172 "about to mask 0x%08x at %s addr 0x%08x", val, 4173 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", 4174 reg_addr); 4175 REG_WR(sc, reg_addr, val); 4176 4177 if (~sc->attn_state & deasserted) { 4178 PMD_DRV_LOG(ERR, sc, "IGU error"); 4179 } 4180 4181 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4182 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4183 4184 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4185 4186 aeu_mask = REG_RD(sc, reg_addr); 4187 4188 aeu_mask |= (deasserted & 0x3ff); 4189 4190 REG_WR(sc, reg_addr, aeu_mask); 4191 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4192 4193 sc->attn_state &= ~deasserted; 4194 } 4195 4196 static void bnx2x_attn_int(struct bnx2x_softc *sc) 4197 { 4198 /* read local copy of bits */ 4199 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 4200 uint32_t attn_ack = 4201 le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 4202 uint32_t attn_state = sc->attn_state; 4203 4204 /* look for changed bits */ 4205 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 4206 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 4207 4208 PMD_DRV_LOG(DEBUG, sc, 4209 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x", 4210 attn_bits, attn_ack, asserted, deasserted); 4211 4212 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 4213 PMD_DRV_LOG(ERR, sc, "BAD attention state"); 4214 } 4215 4216 /* handle bits that were raised */ 4217 if (asserted) { 4218 bnx2x_attn_int_asserted(sc, asserted); 4219 } 4220 4221 if (deasserted) { 4222 bnx2x_attn_int_deasserted(sc, deasserted); 4223 } 4224 } 4225 4226 static uint16_t bnx2x_update_dsb_idx(struct bnx2x_softc *sc) 4227 { 4228 struct host_sp_status_block *def_sb = sc->def_sb; 4229 uint16_t rc = 0; 4230 4231 if (!def_sb) 4232 return 0; 4233 4234 mb(); /* status block is written to by the chip */ 4235 4236 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 4237 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 4238 rc |= BNX2X_DEF_SB_ATT_IDX; 4239 } 4240 4241 if (sc->def_idx != def_sb->sp_sb.running_index) { 4242 sc->def_idx = def_sb->sp_sb.running_index; 4243 rc |= BNX2X_DEF_SB_IDX; 4244 } 4245 4246 mb(); 4247 4248 return rc; 4249 } 4250 4251 static struct ecore_queue_sp_obj *bnx2x_cid_to_q_obj(struct bnx2x_softc *sc, 4252 uint32_t cid) 4253 { 4254 return &sc->sp_objs[CID_TO_FP(cid, sc)].q_obj; 4255 } 4256 4257 static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc) 4258 { 4259 struct ecore_mcast_ramrod_params rparam; 4260 int rc; 4261 4262 memset(&rparam, 0, sizeof(rparam)); 4263 4264 rparam.mcast_obj = &sc->mcast_obj; 4265 4266 /* clear pending state for the last command */ 4267 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 4268 4269 /* if there are pending mcast commands - send them */ 4270 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 4271 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4272 if (rc < 0) { 4273 PMD_DRV_LOG(INFO, sc, 4274 "Failed to send pending mcast commands (%d)", 4275 rc); 4276 } 4277 } 4278 } 4279 4280 static void 4281 bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *elem) 4282 { 4283 unsigned long ramrod_flags = 0; 4284 int rc = 0; 4285 uint32_t cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4286 struct ecore_vlan_mac_obj *vlan_mac_obj; 4287 4288 /* always push next commands out, don't wait here */ 4289 bnx2x_set_bit(RAMROD_CONT, &ramrod_flags); 4290 4291 switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) { 4292 case ECORE_FILTER_MAC_PENDING: 4293 PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MAC completions"); 4294 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 4295 break; 4296 4297 case ECORE_FILTER_MCAST_PENDING: 4298 PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MCAST completions"); 4299 bnx2x_handle_mcast_eqe(sc); 4300 return; 4301 4302 default: 4303 PMD_DRV_LOG(NOTICE, sc, "Unsupported classification command: %d", 4304 elem->message.data.eth_event.echo); 4305 return; 4306 } 4307 4308 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 4309 4310 if (rc < 0) { 4311 PMD_DRV_LOG(NOTICE, sc, 4312 "Failed to schedule new commands (%d)", rc); 4313 } else if (rc > 0) { 4314 PMD_DRV_LOG(DEBUG, sc, "Scheduled next pending commands..."); 4315 } 4316 } 4317 4318 static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc *sc) 4319 { 4320 bnx2x_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 4321 4322 /* send rx_mode command again if was requested */ 4323 if (bnx2x_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { 4324 bnx2x_set_storm_rx_mode(sc); 4325 } 4326 } 4327 4328 static void bnx2x_update_eq_prod(struct bnx2x_softc *sc, uint16_t prod) 4329 { 4330 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 4331 wmb(); /* keep prod updates ordered */ 4332 } 4333 4334 static void bnx2x_eq_int(struct bnx2x_softc *sc) 4335 { 4336 uint16_t hw_cons, sw_cons, sw_prod; 4337 union event_ring_elem *elem; 4338 uint8_t echo; 4339 uint32_t cid; 4340 uint8_t opcode; 4341 int spqe_cnt = 0; 4342 struct ecore_queue_sp_obj *q_obj; 4343 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 4344 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 4345 4346 hw_cons = le16toh(*sc->eq_cons_sb); 4347 4348 /* 4349 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 4350 * when we get to the next-page we need to adjust so the loop 4351 * condition below will be met. The next element is the size of a 4352 * regular element and hence incrementing by 1 4353 */ 4354 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 4355 hw_cons++; 4356 } 4357 4358 /* 4359 * This function may never run in parallel with itself for a 4360 * specific sc and no need for a read memory barrier here. 4361 */ 4362 sw_cons = sc->eq_cons; 4363 sw_prod = sc->eq_prod; 4364 4365 for (; 4366 sw_cons != hw_cons; 4367 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 4368 4369 elem = &sc->eq[EQ_DESC(sw_cons)]; 4370 4371 /* elem CID originates from FW, actually LE */ 4372 cid = SW_CID(elem->message.data.cfc_del_event.cid); 4373 opcode = elem->message.opcode; 4374 4375 /* handle eq element */ 4376 switch (opcode) { 4377 case EVENT_RING_OPCODE_STAT_QUERY: 4378 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "got statistics completion event %d", 4379 sc->stats_comp++); 4380 /* nothing to do with stats comp */ 4381 goto next_spqe; 4382 4383 case EVENT_RING_OPCODE_CFC_DEL: 4384 /* handle according to cid range */ 4385 /* we may want to verify here that the sc state is HALTING */ 4386 PMD_DRV_LOG(DEBUG, sc, "got delete ramrod for MULTI[%d]", 4387 cid); 4388 q_obj = bnx2x_cid_to_q_obj(sc, cid); 4389 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 4390 break; 4391 } 4392 goto next_spqe; 4393 4394 case EVENT_RING_OPCODE_STOP_TRAFFIC: 4395 PMD_DRV_LOG(DEBUG, sc, "got STOP TRAFFIC"); 4396 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 4397 break; 4398 } 4399 goto next_spqe; 4400 4401 case EVENT_RING_OPCODE_START_TRAFFIC: 4402 PMD_DRV_LOG(DEBUG, sc, "got START TRAFFIC"); 4403 if (f_obj->complete_cmd 4404 (sc, f_obj, ECORE_F_CMD_TX_START)) { 4405 break; 4406 } 4407 goto next_spqe; 4408 4409 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4410 echo = elem->message.data.function_update_event.echo; 4411 if (echo == SWITCH_UPDATE) { 4412 PMD_DRV_LOG(DEBUG, sc, 4413 "got FUNC_SWITCH_UPDATE ramrod"); 4414 if (f_obj->complete_cmd(sc, f_obj, 4415 ECORE_F_CMD_SWITCH_UPDATE)) 4416 { 4417 break; 4418 } 4419 } else { 4420 PMD_DRV_LOG(DEBUG, sc, 4421 "AFEX: ramrod completed FUNCTION_UPDATE"); 4422 f_obj->complete_cmd(sc, f_obj, 4423 ECORE_F_CMD_AFEX_UPDATE); 4424 } 4425 goto next_spqe; 4426 4427 case EVENT_RING_OPCODE_FORWARD_SETUP: 4428 q_obj = &bnx2x_fwd_sp_obj(sc, q_obj); 4429 if (q_obj->complete_cmd(sc, q_obj, 4430 ECORE_Q_CMD_SETUP_TX_ONLY)) { 4431 break; 4432 } 4433 goto next_spqe; 4434 4435 case EVENT_RING_OPCODE_FUNCTION_START: 4436 PMD_DRV_LOG(DEBUG, sc, "got FUNC_START ramrod"); 4437 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 4438 break; 4439 } 4440 goto next_spqe; 4441 4442 case EVENT_RING_OPCODE_FUNCTION_STOP: 4443 PMD_DRV_LOG(DEBUG, sc, "got FUNC_STOP ramrod"); 4444 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 4445 break; 4446 } 4447 goto next_spqe; 4448 } 4449 4450 switch (opcode | sc->state) { 4451 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPEN): 4452 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT): 4453 cid = 4454 elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4455 PMD_DRV_LOG(DEBUG, sc, "got RSS_UPDATE ramrod. CID %d", 4456 cid); 4457 rss_raw->clear_pending(rss_raw); 4458 break; 4459 4460 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 4461 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 4462 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAITING_HALT): 4463 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN): 4464 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG): 4465 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4466 PMD_DRV_LOG(DEBUG, sc, 4467 "got (un)set mac ramrod"); 4468 bnx2x_handle_classification_eqe(sc, elem); 4469 break; 4470 4471 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN): 4472 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG): 4473 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4474 PMD_DRV_LOG(DEBUG, sc, 4475 "got mcast ramrod"); 4476 bnx2x_handle_mcast_eqe(sc); 4477 break; 4478 4479 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN): 4480 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG): 4481 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4482 PMD_DRV_LOG(DEBUG, sc, 4483 "got rx_mode ramrod"); 4484 bnx2x_handle_rx_mode_eqe(sc); 4485 break; 4486 4487 default: 4488 /* unknown event log error and continue */ 4489 PMD_DRV_LOG(INFO, sc, "Unknown EQ event %d, sc->state 0x%x", 4490 elem->message.opcode, sc->state); 4491 } 4492 4493 next_spqe: 4494 spqe_cnt++; 4495 } /* for */ 4496 4497 mb(); 4498 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 4499 4500 sc->eq_cons = sw_cons; 4501 sc->eq_prod = sw_prod; 4502 4503 /* make sure that above mem writes were issued towards the memory */ 4504 wmb(); 4505 4506 /* update producer */ 4507 bnx2x_update_eq_prod(sc, sc->eq_prod); 4508 } 4509 4510 static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc) 4511 { 4512 uint16_t status; 4513 int rc = 0; 4514 4515 PMD_DRV_LOG(DEBUG, sc, "---> SP TASK <---"); 4516 4517 /* what work needs to be performed? */ 4518 status = bnx2x_update_dsb_idx(sc); 4519 4520 PMD_DRV_LOG(DEBUG, sc, "dsb status 0x%04x", status); 4521 4522 /* HW attentions */ 4523 if (status & BNX2X_DEF_SB_ATT_IDX) { 4524 PMD_DRV_LOG(DEBUG, sc, "---> ATTN INTR <---"); 4525 bnx2x_attn_int(sc); 4526 status &= ~BNX2X_DEF_SB_ATT_IDX; 4527 rc = 1; 4528 } 4529 4530 /* SP events: STAT_QUERY and others */ 4531 if (status & BNX2X_DEF_SB_IDX) { 4532 /* handle EQ completions */ 4533 PMD_DRV_LOG(DEBUG, sc, "---> EQ INTR <---"); 4534 bnx2x_eq_int(sc); 4535 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 4536 le16toh(sc->def_idx), IGU_INT_NOP, 1); 4537 status &= ~BNX2X_DEF_SB_IDX; 4538 } 4539 4540 /* if status is non zero then something went wrong */ 4541 if (unlikely(status)) { 4542 PMD_DRV_LOG(INFO, sc, 4543 "Got an unknown SP interrupt! (0x%04x)", status); 4544 } 4545 4546 /* ack status block only if something was actually handled */ 4547 bnx2x_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 4548 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 4549 4550 return rc; 4551 } 4552 4553 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp) 4554 { 4555 struct bnx2x_softc *sc = fp->sc; 4556 uint8_t more_rx = FALSE; 4557 4558 /* Make sure FP is initialized */ 4559 if (!fp->sb_running_index) 4560 return; 4561 4562 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, 4563 "---> FP TASK QUEUE (%d) <--", fp->index); 4564 4565 /* update the fastpath index */ 4566 bnx2x_update_fp_sb_idx(fp); 4567 4568 if (rte_atomic32_read(&sc->scan_fp) == 1) { 4569 if (bnx2x_has_rx_work(fp)) { 4570 more_rx = bnx2x_rxeof(sc, fp); 4571 } 4572 4573 if (more_rx) { 4574 /* still more work to do */ 4575 bnx2x_handle_fp_tq(fp); 4576 return; 4577 } 4578 } 4579 4580 /* Assuming we have completed slow path completion, clear the flag */ 4581 rte_atomic32_set(&sc->scan_fp, 0); 4582 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 4583 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 4584 } 4585 4586 /* 4587 * Legacy interrupt entry point. 4588 * 4589 * Verifies that the controller generated the interrupt and 4590 * then calls a separate routine to handle the various 4591 * interrupt causes: link, RX, and TX. 4592 */ 4593 int bnx2x_intr_legacy(struct bnx2x_softc *sc) 4594 { 4595 struct bnx2x_fastpath *fp; 4596 uint32_t status, mask; 4597 int i, rc = 0; 4598 4599 /* 4600 * 0 for ustorm, 1 for cstorm 4601 * the bits returned from ack_int() are 0-15 4602 * bit 0 = attention status block 4603 * bit 1 = fast path status block 4604 * a mask of 0x2 or more = tx/rx event 4605 * a mask of 1 = slow path event 4606 */ 4607 4608 status = bnx2x_ack_int(sc); 4609 4610 /* the interrupt is not for us */ 4611 if (unlikely(status == 0)) { 4612 return 0; 4613 } 4614 4615 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "Interrupt status 0x%04x", status); 4616 //bnx2x_dump_status_block(sc); 4617 4618 FOR_EACH_ETH_QUEUE(sc, i) { 4619 fp = &sc->fp[i]; 4620 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 4621 if (status & mask) { 4622 /* acknowledge and disable further fastpath interrupts */ 4623 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 4624 0, IGU_INT_DISABLE, 0); 4625 bnx2x_handle_fp_tq(fp); 4626 status &= ~mask; 4627 } 4628 } 4629 4630 if (unlikely(status & 0x1)) { 4631 /* acknowledge and disable further slowpath interrupts */ 4632 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 4633 0, IGU_INT_DISABLE, 0); 4634 rc = bnx2x_handle_sp_tq(sc); 4635 status &= ~0x1; 4636 } 4637 4638 if (unlikely(status)) { 4639 PMD_DRV_LOG(WARNING, sc, 4640 "Unexpected fastpath status (0x%08x)!", status); 4641 } 4642 4643 return rc; 4644 } 4645 4646 static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc); 4647 static int bnx2x_init_hw_common(struct bnx2x_softc *sc); 4648 static int bnx2x_init_hw_port(struct bnx2x_softc *sc); 4649 static int bnx2x_init_hw_func(struct bnx2x_softc *sc); 4650 static void bnx2x_reset_common(struct bnx2x_softc *sc); 4651 static void bnx2x_reset_port(struct bnx2x_softc *sc); 4652 static void bnx2x_reset_func(struct bnx2x_softc *sc); 4653 static int bnx2x_init_firmware(struct bnx2x_softc *sc); 4654 static void bnx2x_release_firmware(struct bnx2x_softc *sc); 4655 4656 static struct 4657 ecore_func_sp_drv_ops bnx2x_func_sp_drv = { 4658 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 4659 .init_hw_cmn = bnx2x_init_hw_common, 4660 .init_hw_port = bnx2x_init_hw_port, 4661 .init_hw_func = bnx2x_init_hw_func, 4662 4663 .reset_hw_cmn = bnx2x_reset_common, 4664 .reset_hw_port = bnx2x_reset_port, 4665 .reset_hw_func = bnx2x_reset_func, 4666 4667 .init_fw = bnx2x_init_firmware, 4668 .release_fw = bnx2x_release_firmware, 4669 }; 4670 4671 static void bnx2x_init_func_obj(struct bnx2x_softc *sc) 4672 { 4673 sc->dmae_ready = 0; 4674 4675 PMD_INIT_FUNC_TRACE(sc); 4676 4677 ecore_init_func_obj(sc, 4678 &sc->func_obj, 4679 BNX2X_SP(sc, func_rdata), 4680 (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata), 4681 BNX2X_SP(sc, func_afex_rdata), 4682 (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata), 4683 &bnx2x_func_sp_drv); 4684 } 4685 4686 static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code) 4687 { 4688 struct ecore_func_state_params func_params = { NULL }; 4689 int rc; 4690 4691 PMD_INIT_FUNC_TRACE(sc); 4692 4693 /* prepare the parameters for function state transitions */ 4694 bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4695 4696 func_params.f_obj = &sc->func_obj; 4697 func_params.cmd = ECORE_F_CMD_HW_INIT; 4698 4699 func_params.params.hw_init.load_phase = load_code; 4700 4701 /* 4702 * Via a plethora of function pointers, we will eventually reach 4703 * bnx2x_init_hw_common(), bnx2x_init_hw_port(), or bnx2x_init_hw_func(). 4704 */ 4705 rc = ecore_func_state_change(sc, &func_params); 4706 4707 return rc; 4708 } 4709 4710 static void 4711 bnx2x_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, uint32_t len) 4712 { 4713 uint32_t i; 4714 4715 if (!(len % 4) && !(addr % 4)) { 4716 for (i = 0; i < len; i += 4) { 4717 REG_WR(sc, (addr + i), fill); 4718 } 4719 } else { 4720 for (i = 0; i < len; i++) { 4721 REG_WR8(sc, (addr + i), fill); 4722 } 4723 } 4724 } 4725 4726 /* writes FP SP data to FW - data_size in dwords */ 4727 static void 4728 bnx2x_wr_fp_sb_data(struct bnx2x_softc *sc, int fw_sb_id, uint32_t * sb_data_p, 4729 uint32_t data_size) 4730 { 4731 uint32_t index; 4732 4733 for (index = 0; index < data_size; index++) { 4734 REG_WR(sc, 4735 (BAR_CSTRORM_INTMEM + 4736 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 4737 (sizeof(uint32_t) * index)), *(sb_data_p + index)); 4738 } 4739 } 4740 4741 static void bnx2x_zero_fp_sb(struct bnx2x_softc *sc, int fw_sb_id) 4742 { 4743 struct hc_status_block_data_e2 sb_data_e2; 4744 struct hc_status_block_data_e1x sb_data_e1x; 4745 uint32_t *sb_data_p; 4746 uint32_t data_size = 0; 4747 4748 if (!CHIP_IS_E1x(sc)) { 4749 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 4750 sb_data_e2.common.state = SB_DISABLED; 4751 sb_data_e2.common.p_func.vf_valid = FALSE; 4752 sb_data_p = (uint32_t *) & sb_data_e2; 4753 data_size = (sizeof(struct hc_status_block_data_e2) / 4754 sizeof(uint32_t)); 4755 } else { 4756 memset(&sb_data_e1x, 0, 4757 sizeof(struct hc_status_block_data_e1x)); 4758 sb_data_e1x.common.state = SB_DISABLED; 4759 sb_data_e1x.common.p_func.vf_valid = FALSE; 4760 sb_data_p = (uint32_t *) & sb_data_e1x; 4761 data_size = (sizeof(struct hc_status_block_data_e1x) / 4762 sizeof(uint32_t)); 4763 } 4764 4765 bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 4766 4767 bnx2x_fill(sc, 4768 (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, 4769 CSTORM_STATUS_BLOCK_SIZE); 4770 bnx2x_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 4771 0, CSTORM_SYNC_BLOCK_SIZE); 4772 } 4773 4774 static void 4775 bnx2x_wr_sp_sb_data(struct bnx2x_softc *sc, 4776 struct hc_sp_status_block_data *sp_sb_data) 4777 { 4778 uint32_t i; 4779 4780 for (i = 0; 4781 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 4782 i++) { 4783 REG_WR(sc, 4784 (BAR_CSTRORM_INTMEM + 4785 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 4786 (i * sizeof(uint32_t))), 4787 *((uint32_t *) sp_sb_data + i)); 4788 } 4789 } 4790 4791 static void bnx2x_zero_sp_sb(struct bnx2x_softc *sc) 4792 { 4793 struct hc_sp_status_block_data sp_sb_data; 4794 4795 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 4796 4797 sp_sb_data.state = SB_DISABLED; 4798 sp_sb_data.p_func.vf_valid = FALSE; 4799 4800 bnx2x_wr_sp_sb_data(sc, &sp_sb_data); 4801 4802 bnx2x_fill(sc, 4803 (BAR_CSTRORM_INTMEM + 4804 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 4805 0, CSTORM_SP_STATUS_BLOCK_SIZE); 4806 bnx2x_fill(sc, 4807 (BAR_CSTRORM_INTMEM + 4808 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 4809 0, CSTORM_SP_SYNC_BLOCK_SIZE); 4810 } 4811 4812 static void 4813 bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, 4814 int igu_seg_id) 4815 { 4816 hc_sm->igu_sb_id = igu_sb_id; 4817 hc_sm->igu_seg_id = igu_seg_id; 4818 hc_sm->timer_value = 0xFF; 4819 hc_sm->time_to_expire = 0xFFFFFFFF; 4820 } 4821 4822 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 4823 { 4824 /* zero out state machine indices */ 4825 4826 /* rx indices */ 4827 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4828 4829 /* tx indices */ 4830 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4831 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 4832 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 4833 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 4834 4835 /* map indices */ 4836 4837 /* rx indices */ 4838 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 4839 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4840 4841 /* tx indices */ 4842 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 4843 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4844 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 4845 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4846 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 4847 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4848 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 4849 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4850 } 4851 4852 static void 4853 bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid, 4854 uint8_t vf_valid, int fw_sb_id, int igu_sb_id) 4855 { 4856 struct hc_status_block_data_e2 sb_data_e2; 4857 struct hc_status_block_data_e1x sb_data_e1x; 4858 struct hc_status_block_sm *hc_sm_p; 4859 uint32_t *sb_data_p; 4860 int igu_seg_id; 4861 int data_size; 4862 4863 if (CHIP_INT_MODE_IS_BC(sc)) { 4864 igu_seg_id = HC_SEG_ACCESS_NORM; 4865 } else { 4866 igu_seg_id = IGU_SEG_ACCESS_NORM; 4867 } 4868 4869 bnx2x_zero_fp_sb(sc, fw_sb_id); 4870 4871 if (!CHIP_IS_E1x(sc)) { 4872 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 4873 sb_data_e2.common.state = SB_ENABLED; 4874 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 4875 sb_data_e2.common.p_func.vf_id = vfid; 4876 sb_data_e2.common.p_func.vf_valid = vf_valid; 4877 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 4878 sb_data_e2.common.same_igu_sb_1b = TRUE; 4879 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 4880 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 4881 hc_sm_p = sb_data_e2.common.state_machine; 4882 sb_data_p = (uint32_t *) & sb_data_e2; 4883 data_size = (sizeof(struct hc_status_block_data_e2) / 4884 sizeof(uint32_t)); 4885 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 4886 } else { 4887 memset(&sb_data_e1x, 0, 4888 sizeof(struct hc_status_block_data_e1x)); 4889 sb_data_e1x.common.state = SB_ENABLED; 4890 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 4891 sb_data_e1x.common.p_func.vf_id = 0xff; 4892 sb_data_e1x.common.p_func.vf_valid = FALSE; 4893 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 4894 sb_data_e1x.common.same_igu_sb_1b = TRUE; 4895 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 4896 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 4897 hc_sm_p = sb_data_e1x.common.state_machine; 4898 sb_data_p = (uint32_t *) & sb_data_e1x; 4899 data_size = (sizeof(struct hc_status_block_data_e1x) / 4900 sizeof(uint32_t)); 4901 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 4902 } 4903 4904 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 4905 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 4906 4907 /* write indices to HW - PCI guarantees endianity of regpairs */ 4908 bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 4909 } 4910 4911 static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 4912 { 4913 if (CHIP_IS_E1x(fp->sc)) { 4914 return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H; 4915 } else { 4916 return fp->cl_id; 4917 } 4918 } 4919 4920 static uint32_t 4921 bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) 4922 { 4923 uint32_t offset = BAR_USTRORM_INTMEM; 4924 4925 if (IS_VF(sc)) { 4926 return PXP_VF_ADDR_USDM_QUEUES_START + 4927 (sc->acquire_resp.resc.hw_qid[fp->index] * 4928 sizeof(struct ustorm_queue_zone_data)); 4929 } else if (!CHIP_IS_E1x(sc)) { 4930 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 4931 } else { 4932 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 4933 } 4934 4935 return offset; 4936 } 4937 4938 static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx) 4939 { 4940 struct bnx2x_fastpath *fp = &sc->fp[idx]; 4941 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 4942 unsigned long q_type = 0; 4943 int cos; 4944 4945 fp->sc = sc; 4946 fp->index = idx; 4947 4948 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 4949 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 4950 4951 if (CHIP_IS_E1x(sc)) 4952 fp->cl_id = SC_L_ID(sc) + idx; 4953 else 4954 /* want client ID same as IGU SB ID for non-E1 */ 4955 fp->cl_id = fp->igu_sb_id; 4956 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 4957 4958 /* setup sb indices */ 4959 if (!CHIP_IS_E1x(sc)) { 4960 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 4961 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 4962 } else { 4963 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 4964 fp->sb_running_index = 4965 fp->status_block.e1x_sb->sb.running_index; 4966 } 4967 4968 /* init shortcut */ 4969 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(sc, fp); 4970 4971 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 4972 4973 for (cos = 0; cos < sc->max_cos; cos++) { 4974 cids[cos] = idx; 4975 } 4976 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 4977 4978 /* nothing more for a VF to do */ 4979 if (IS_VF(sc)) { 4980 return; 4981 } 4982 4983 bnx2x_init_sb(sc, fp->sb_dma.paddr, BNX2X_VF_ID_INVALID, FALSE, 4984 fp->fw_sb_id, fp->igu_sb_id); 4985 4986 bnx2x_update_fp_sb_idx(fp); 4987 4988 /* Configure Queue State object */ 4989 bnx2x_set_bit(ECORE_Q_TYPE_HAS_RX, &q_type); 4990 bnx2x_set_bit(ECORE_Q_TYPE_HAS_TX, &q_type); 4991 4992 ecore_init_queue_obj(sc, 4993 &sc->sp_objs[idx].q_obj, 4994 fp->cl_id, 4995 cids, 4996 sc->max_cos, 4997 SC_FUNC(sc), 4998 BNX2X_SP(sc, q_rdata), 4999 (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata), 5000 q_type); 5001 5002 /* configure classification DBs */ 5003 ecore_init_mac_obj(sc, 5004 &sc->sp_objs[idx].mac_obj, 5005 fp->cl_id, 5006 idx, 5007 SC_FUNC(sc), 5008 BNX2X_SP(sc, mac_rdata), 5009 (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata), 5010 ECORE_FILTER_MAC_PENDING, &sc->sp_state, 5011 ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); 5012 } 5013 5014 static void 5015 bnx2x_update_rx_prod(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 5016 uint16_t rx_bd_prod, uint16_t rx_cq_prod) 5017 { 5018 union ustorm_eth_rx_producers rx_prods; 5019 uint32_t i; 5020 5021 /* update producers */ 5022 rx_prods.prod.bd_prod = rx_bd_prod; 5023 rx_prods.prod.cqe_prod = rx_cq_prod; 5024 rx_prods.prod.reserved = 0; 5025 5026 /* 5027 * Make sure that the BD and SGE data is updated before updating the 5028 * producers since FW might read the BD/SGE right after the producer 5029 * is updated. 5030 * This is only applicable for weak-ordered memory model archs such 5031 * as IA-64. The following barrier is also mandatory since FW will 5032 * assumes BDs must have buffers. 5033 */ 5034 wmb(); 5035 5036 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 5037 REG_WR(sc, 5038 (fp->ustorm_rx_prods_offset + (i * 4)), 5039 rx_prods.raw_data[i]); 5040 } 5041 5042 wmb(); /* keep prod updates ordered */ 5043 } 5044 5045 static void bnx2x_init_rx_rings(struct bnx2x_softc *sc) 5046 { 5047 struct bnx2x_fastpath *fp; 5048 int i; 5049 struct bnx2x_rx_queue *rxq; 5050 5051 for (i = 0; i < sc->num_queues; i++) { 5052 fp = &sc->fp[i]; 5053 rxq = sc->rx_queues[fp->index]; 5054 if (!rxq) { 5055 PMD_RX_LOG(ERR, "RX queue is NULL"); 5056 return; 5057 } 5058 5059 rxq->rx_bd_head = 0; 5060 rxq->rx_bd_tail = rxq->nb_rx_desc; 5061 rxq->rx_cq_head = 0; 5062 rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); 5063 *fp->rx_cq_cons_sb = 0; 5064 5065 /* 5066 * Activate the BD ring... 5067 * Warning, this will generate an interrupt (to the TSTORM) 5068 * so this can only be done after the chip is initialized 5069 */ 5070 bnx2x_update_rx_prod(sc, fp, rxq->rx_bd_tail, rxq->rx_cq_tail); 5071 5072 if (i != 0) { 5073 continue; 5074 } 5075 } 5076 } 5077 5078 static void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp) 5079 { 5080 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 5081 5082 fp->tx_db.data.header.header = 1 << DOORBELL_HDR_DB_TYPE_SHIFT; 5083 fp->tx_db.data.zero_fill1 = 0; 5084 fp->tx_db.data.prod = 0; 5085 5086 if (!txq) { 5087 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 5088 return; 5089 } 5090 5091 txq->tx_pkt_tail = 0; 5092 txq->tx_pkt_head = 0; 5093 txq->tx_bd_tail = 0; 5094 txq->tx_bd_head = 0; 5095 } 5096 5097 static void bnx2x_init_tx_rings(struct bnx2x_softc *sc) 5098 { 5099 int i; 5100 5101 for (i = 0; i < sc->num_queues; i++) { 5102 bnx2x_init_tx_ring_one(&sc->fp[i]); 5103 } 5104 } 5105 5106 static void bnx2x_init_def_sb(struct bnx2x_softc *sc) 5107 { 5108 struct host_sp_status_block *def_sb = sc->def_sb; 5109 rte_iova_t mapping = sc->def_sb_dma.paddr; 5110 int igu_sp_sb_index; 5111 int igu_seg_id; 5112 int port = SC_PORT(sc); 5113 int func = SC_FUNC(sc); 5114 int reg_offset, reg_offset_en5; 5115 uint64_t section; 5116 int index, sindex; 5117 struct hc_sp_status_block_data sp_sb_data; 5118 5119 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5120 5121 if (CHIP_INT_MODE_IS_BC(sc)) { 5122 igu_sp_sb_index = DEF_SB_IGU_ID; 5123 igu_seg_id = HC_SEG_ACCESS_DEF; 5124 } else { 5125 igu_sp_sb_index = sc->igu_dsb_id; 5126 igu_seg_id = IGU_SEG_ACCESS_DEF; 5127 } 5128 5129 /* attentions */ 5130 section = ((uint64_t) mapping + 5131 offsetof(struct host_sp_status_block, atten_status_block)); 5132 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5133 sc->attn_state = 0; 5134 5135 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5136 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 5137 5138 reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5139 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 5140 5141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5142 /* take care of sig[0]..sig[4] */ 5143 for (sindex = 0; sindex < 4; sindex++) { 5144 sc->attn_group[index].sig[sindex] = 5145 REG_RD(sc, 5146 (reg_offset + (sindex * 0x4) + 5147 (0x10 * index))); 5148 } 5149 5150 if (!CHIP_IS_E1x(sc)) { 5151 /* 5152 * enable5 is separate from the rest of the registers, 5153 * and the address skip is 4 and not 16 between the 5154 * different groups 5155 */ 5156 sc->attn_group[index].sig[4] = 5157 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 5158 } else { 5159 sc->attn_group[index].sig[4] = 0; 5160 } 5161 } 5162 5163 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5164 reg_offset = 5165 port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; 5166 REG_WR(sc, reg_offset, U64_LO(section)); 5167 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 5168 } else if (!CHIP_IS_E1x(sc)) { 5169 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5170 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5171 } 5172 5173 section = ((uint64_t) mapping + 5174 offsetof(struct host_sp_status_block, sp_sb)); 5175 5176 bnx2x_zero_sp_sb(sc); 5177 5178 /* PCI guarantees endianity of regpair */ 5179 sp_sb_data.state = SB_ENABLED; 5180 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5181 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5182 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5183 sp_sb_data.igu_seg_id = igu_seg_id; 5184 sp_sb_data.p_func.pf_id = func; 5185 sp_sb_data.p_func.vnic_id = SC_VN(sc); 5186 sp_sb_data.p_func.vf_id = 0xff; 5187 5188 bnx2x_wr_sp_sb_data(sc, &sp_sb_data); 5189 5190 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5191 } 5192 5193 static void bnx2x_init_sp_ring(struct bnx2x_softc *sc) 5194 { 5195 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 5196 sc->spq_prod_idx = 0; 5197 sc->dsb_sp_prod = 5198 &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 5199 sc->spq_prod_bd = sc->spq; 5200 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 5201 } 5202 5203 static void bnx2x_init_eq_ring(struct bnx2x_softc *sc) 5204 { 5205 union event_ring_elem *elem; 5206 int i; 5207 5208 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5209 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 5210 5211 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 5212 BNX2X_PAGE_SIZE * 5213 (i % NUM_EQ_PAGES))); 5214 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 5215 BNX2X_PAGE_SIZE * 5216 (i % NUM_EQ_PAGES))); 5217 } 5218 5219 sc->eq_cons = 0; 5220 sc->eq_prod = NUM_EQ_DESC; 5221 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 5222 5223 atomic_store_rel_long(&sc->eq_spq_left, 5224 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 5225 NUM_EQ_DESC) - 1)); 5226 } 5227 5228 static void bnx2x_init_internal_common(struct bnx2x_softc *sc) 5229 { 5230 int i; 5231 5232 if (IS_MF_SI(sc)) { 5233 /* 5234 * In switch independent mode, the TSTORM needs to accept 5235 * packets that failed classification, since approximate match 5236 * mac addresses aren't written to NIG LLH. 5237 */ 5238 REG_WR8(sc, 5239 (BAR_TSTRORM_INTMEM + 5240 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 2); 5241 } else 5242 REG_WR8(sc, 5243 (BAR_TSTRORM_INTMEM + 5244 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 0); 5245 5246 /* 5247 * Zero this manually as its initialization is currently missing 5248 * in the initTool. 5249 */ 5250 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 5251 REG_WR(sc, 5252 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 5253 0); 5254 } 5255 5256 if (!CHIP_IS_E1x(sc)) { 5257 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 5258 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : 5259 HC_IGU_NBC_MODE); 5260 } 5261 } 5262 5263 static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code) 5264 { 5265 switch (load_code) { 5266 case FW_MSG_CODE_DRV_LOAD_COMMON: 5267 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5268 bnx2x_init_internal_common(sc); 5269 /* no break */ 5270 5271 case FW_MSG_CODE_DRV_LOAD_PORT: 5272 /* nothing to do */ 5273 /* no break */ 5274 5275 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5276 /* internal memory per function is initialized inside bnx2x_pf_init */ 5277 break; 5278 5279 default: 5280 PMD_DRV_LOG(NOTICE, sc, "Unknown load_code (0x%x) from MCP", 5281 load_code); 5282 break; 5283 } 5284 } 5285 5286 static void 5287 storm_memset_func_cfg(struct bnx2x_softc *sc, 5288 struct tstorm_eth_function_common_config *tcfg, 5289 uint16_t abs_fid) 5290 { 5291 uint32_t addr; 5292 size_t size; 5293 5294 addr = (BAR_TSTRORM_INTMEM + 5295 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 5296 size = sizeof(struct tstorm_eth_function_common_config); 5297 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) tcfg); 5298 } 5299 5300 static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_params *p) 5301 { 5302 struct tstorm_eth_function_common_config tcfg = { 0 }; 5303 5304 if (CHIP_IS_E1x(sc)) { 5305 storm_memset_func_cfg(sc, &tcfg, p->func_id); 5306 } 5307 5308 /* Enable the function in the FW */ 5309 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 5310 storm_memset_func_en(sc, p->func_id, 1); 5311 5312 /* spq */ 5313 if (p->func_flgs & FUNC_FLG_SPQ) { 5314 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 5315 REG_WR(sc, 5316 (XSEM_REG_FAST_MEMORY + 5317 XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); 5318 } 5319 } 5320 5321 /* 5322 * Calculates the sum of vn_min_rates. 5323 * It's needed for further normalizing of the min_rates. 5324 * Returns: 5325 * sum of vn_min_rates. 5326 * or 5327 * 0 - if all the min_rates are 0. 5328 * In the later case fainess algorithm should be deactivated. 5329 * If all min rates are not zero then those that are zeroes will be set to 1. 5330 */ 5331 static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input) 5332 { 5333 uint32_t vn_cfg; 5334 uint32_t vn_min_rate; 5335 int all_zero = 1; 5336 int vn; 5337 5338 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5339 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 5340 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 5341 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 5342 5343 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 5344 /* skip hidden VNs */ 5345 vn_min_rate = 0; 5346 } else if (!vn_min_rate) { 5347 /* If min rate is zero - set it to 100 */ 5348 vn_min_rate = DEF_MIN_RATE; 5349 } else { 5350 all_zero = 0; 5351 } 5352 5353 input->vnic_min_rate[vn] = vn_min_rate; 5354 } 5355 5356 /* if ETS or all min rates are zeros - disable fairness */ 5357 if (all_zero) { 5358 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 5359 } else { 5360 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 5361 } 5362 } 5363 5364 static uint16_t 5365 bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg) 5366 { 5367 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 5368 FUNC_MF_CFG_MAX_BW_SHIFT); 5369 5370 if (!max_cfg) { 5371 PMD_DRV_LOG(DEBUG, sc, 5372 "Max BW configured to 0 - using 100 instead"); 5373 max_cfg = 100; 5374 } 5375 5376 return max_cfg; 5377 } 5378 5379 static void 5380 bnx2x_calc_vn_max(struct bnx2x_softc *sc, int vn, struct cmng_init_input *input) 5381 { 5382 uint16_t vn_max_rate; 5383 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 5384 uint32_t max_cfg; 5385 5386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 5387 vn_max_rate = 0; 5388 } else { 5389 max_cfg = bnx2x_extract_max_cfg(sc, vn_cfg); 5390 5391 if (IS_MF_SI(sc)) { 5392 /* max_cfg in percents of linkspeed */ 5393 vn_max_rate = 5394 ((sc->link_vars.line_speed * max_cfg) / 100); 5395 } else { /* SD modes */ 5396 /* max_cfg is absolute in 100Mb units */ 5397 vn_max_rate = (max_cfg * 100); 5398 } 5399 } 5400 5401 input->vnic_max_rate[vn] = vn_max_rate; 5402 } 5403 5404 static void 5405 bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, uint8_t cmng_type) 5406 { 5407 struct cmng_init_input input; 5408 int vn; 5409 5410 memset(&input, 0, sizeof(struct cmng_init_input)); 5411 5412 input.port_rate = sc->link_vars.line_speed; 5413 5414 if (cmng_type == CMNG_FNS_MINMAX) { 5415 /* read mf conf from shmem */ 5416 if (read_cfg) { 5417 bnx2x_read_mf_cfg(sc); 5418 } 5419 5420 /* get VN min rate and enable fairness if not 0 */ 5421 bnx2x_calc_vn_min(sc, &input); 5422 5423 /* get VN max rate */ 5424 if (sc->port.pmf) { 5425 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5426 bnx2x_calc_vn_max(sc, vn, &input); 5427 } 5428 } 5429 5430 /* always enable rate shaping and fairness */ 5431 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 5432 5433 ecore_init_cmng(&input, &sc->cmng); 5434 return; 5435 } 5436 } 5437 5438 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc) 5439 { 5440 if (CHIP_REV_IS_SLOW(sc)) { 5441 return CMNG_FNS_NONE; 5442 } 5443 5444 if (IS_MF(sc)) { 5445 return CMNG_FNS_MINMAX; 5446 } 5447 5448 return CMNG_FNS_NONE; 5449 } 5450 5451 static void 5452 storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, uint8_t port) 5453 { 5454 int vn; 5455 int func; 5456 uint32_t addr; 5457 size_t size; 5458 5459 addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 5460 size = sizeof(struct cmng_struct_per_port); 5461 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) & cmng->port); 5462 5463 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5464 func = func_by_vn(sc, vn); 5465 5466 addr = (BAR_XSTRORM_INTMEM + 5467 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 5468 size = sizeof(struct rate_shaping_vars_per_vn); 5469 ecore_storm_memset_struct(sc, addr, size, 5470 (uint32_t *) & cmng-> 5471 vnic.vnic_max_rate[vn]); 5472 5473 addr = (BAR_XSTRORM_INTMEM + 5474 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 5475 size = sizeof(struct fairness_vars_per_vn); 5476 ecore_storm_memset_struct(sc, addr, size, 5477 (uint32_t *) & cmng-> 5478 vnic.vnic_min_rate[vn]); 5479 } 5480 } 5481 5482 static void bnx2x_pf_init(struct bnx2x_softc *sc) 5483 { 5484 struct bnx2x_func_init_params func_init; 5485 struct event_ring_data eq_data; 5486 uint16_t flags; 5487 5488 memset(&eq_data, 0, sizeof(struct event_ring_data)); 5489 memset(&func_init, 0, sizeof(struct bnx2x_func_init_params)); 5490 5491 if (!CHIP_IS_E1x(sc)) { 5492 /* reset IGU PF statistics: MSIX + ATTN */ 5493 /* PF */ 5494 REG_WR(sc, 5495 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 5496 (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + 5497 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 5498 4)), 0); 5499 /* ATTN */ 5500 REG_WR(sc, 5501 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 5502 (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + 5503 (BNX2X_IGU_STAS_MSG_PF_CNT * 4) + 5504 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 5505 4)), 0); 5506 } 5507 5508 /* function setup flags */ 5509 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 5510 5511 func_init.func_flgs = flags; 5512 func_init.pf_id = SC_FUNC(sc); 5513 func_init.func_id = SC_FUNC(sc); 5514 func_init.spq_map = sc->spq_dma.paddr; 5515 func_init.spq_prod = sc->spq_prod_idx; 5516 5517 bnx2x_func_init(sc, &func_init); 5518 5519 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 5520 5521 /* 5522 * Congestion management values depend on the link rate. 5523 * There is no active link so initial link rate is set to 10Gbps. 5524 * When the link comes up the congestion management values are 5525 * re-calculated according to the actual link rate. 5526 */ 5527 sc->link_vars.line_speed = SPEED_10000; 5528 bnx2x_cmng_fns_init(sc, TRUE, bnx2x_get_cmng_fns_mode(sc)); 5529 5530 /* Only the PMF sets the HW */ 5531 if (sc->port.pmf) { 5532 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 5533 } 5534 5535 /* init Event Queue - PCI bus guarantees correct endainity */ 5536 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 5537 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 5538 eq_data.producer = sc->eq_prod; 5539 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 5540 eq_data.sb_id = DEF_SB_ID; 5541 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 5542 } 5543 5544 static void bnx2x_hc_int_enable(struct bnx2x_softc *sc) 5545 { 5546 int port = SC_PORT(sc); 5547 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 5548 uint32_t val = REG_RD(sc, addr); 5549 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) 5550 || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5551 uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5552 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); 5553 5554 if (msix) { 5555 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5556 HC_CONFIG_0_REG_INT_LINE_EN_0); 5557 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5558 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5559 if (single_msix) { 5560 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 5561 } 5562 } else if (msi) { 5563 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 5564 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5565 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5566 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5567 } else { 5568 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5569 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5570 HC_CONFIG_0_REG_INT_LINE_EN_0 | 5571 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5572 5573 REG_WR(sc, addr, val); 5574 5575 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 5576 } 5577 5578 REG_WR(sc, addr, val); 5579 5580 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 5581 mb(); 5582 5583 /* init leading/trailing edge */ 5584 if (IS_MF(sc)) { 5585 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 5586 if (sc->port.pmf) { 5587 /* enable nig and gpio3 attention */ 5588 val |= 0x1100; 5589 } 5590 } else { 5591 val = 0xffff; 5592 } 5593 5594 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port * 8), val); 5595 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port * 8), val); 5596 5597 /* make sure that interrupts are indeed enabled from here on */ 5598 mb(); 5599 } 5600 5601 static void bnx2x_igu_int_enable(struct bnx2x_softc *sc) 5602 { 5603 uint32_t val; 5604 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) 5605 || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5606 uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5607 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); 5608 5609 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 5610 5611 if (msix) { 5612 val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5613 val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); 5614 if (single_msix) { 5615 val |= IGU_PF_CONF_SINGLE_ISR_EN; 5616 } 5617 } else if (msi) { 5618 val &= ~IGU_PF_CONF_INT_LINE_EN; 5619 val |= (IGU_PF_CONF_MSI_MSIX_EN | 5620 IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5621 } else { 5622 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 5623 val |= (IGU_PF_CONF_INT_LINE_EN | 5624 IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5625 } 5626 5627 /* clean previous status - need to configure igu prior to ack */ 5628 if ((!msix) || single_msix) { 5629 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5630 bnx2x_ack_int(sc); 5631 } 5632 5633 val |= IGU_PF_CONF_FUNC_EN; 5634 5635 PMD_DRV_LOG(DEBUG, sc, "write 0x%x to IGU mode %s", 5636 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 5637 5638 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5639 5640 mb(); 5641 5642 /* init leading/trailing edge */ 5643 if (IS_MF(sc)) { 5644 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 5645 if (sc->port.pmf) { 5646 /* enable nig and gpio3 attention */ 5647 val |= 0x1100; 5648 } 5649 } else { 5650 val = 0xffff; 5651 } 5652 5653 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 5654 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 5655 5656 /* make sure that interrupts are indeed enabled from here on */ 5657 mb(); 5658 } 5659 5660 static void bnx2x_int_enable(struct bnx2x_softc *sc) 5661 { 5662 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5663 bnx2x_hc_int_enable(sc); 5664 } else { 5665 bnx2x_igu_int_enable(sc); 5666 } 5667 } 5668 5669 static void bnx2x_hc_int_disable(struct bnx2x_softc *sc) 5670 { 5671 int port = SC_PORT(sc); 5672 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 5673 uint32_t val = REG_RD(sc, addr); 5674 5675 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5676 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5677 HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5678 /* flush all outstanding writes */ 5679 mb(); 5680 5681 REG_WR(sc, addr, val); 5682 if (REG_RD(sc, addr) != val) { 5683 PMD_DRV_LOG(ERR, sc, "proper val not read from HC IGU!"); 5684 } 5685 } 5686 5687 static void bnx2x_igu_int_disable(struct bnx2x_softc *sc) 5688 { 5689 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 5690 5691 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 5692 IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); 5693 5694 PMD_DRV_LOG(DEBUG, sc, "write %x to IGU", val); 5695 5696 /* flush all outstanding writes */ 5697 mb(); 5698 5699 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5700 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 5701 PMD_DRV_LOG(ERR, sc, "proper val not read from IGU!"); 5702 } 5703 } 5704 5705 static void bnx2x_int_disable(struct bnx2x_softc *sc) 5706 { 5707 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5708 bnx2x_hc_int_disable(sc); 5709 } else { 5710 bnx2x_igu_int_disable(sc); 5711 } 5712 } 5713 5714 static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code) 5715 { 5716 int i; 5717 5718 PMD_INIT_FUNC_TRACE(sc); 5719 5720 for (i = 0; i < sc->num_queues; i++) { 5721 bnx2x_init_eth_fp(sc, i); 5722 } 5723 5724 rmb(); /* ensure status block indices were read */ 5725 5726 bnx2x_init_rx_rings(sc); 5727 bnx2x_init_tx_rings(sc); 5728 5729 if (IS_VF(sc)) { 5730 bnx2x_memset_stats(sc); 5731 return; 5732 } 5733 5734 /* initialize MOD_ABS interrupts */ 5735 elink_init_mod_abs_int(sc, &sc->link_vars, 5736 sc->devinfo.chip_id, 5737 sc->devinfo.shmem_base, 5738 sc->devinfo.shmem2_base, SC_PORT(sc)); 5739 5740 bnx2x_init_def_sb(sc); 5741 bnx2x_update_dsb_idx(sc); 5742 bnx2x_init_sp_ring(sc); 5743 bnx2x_init_eq_ring(sc); 5744 bnx2x_init_internal(sc, load_code); 5745 bnx2x_pf_init(sc); 5746 bnx2x_stats_init(sc); 5747 5748 /* flush all before enabling interrupts */ 5749 mb(); 5750 5751 bnx2x_int_enable(sc); 5752 5753 /* check for SPIO5 */ 5754 bnx2x_attn_int_deasserted0(sc, 5755 REG_RD(sc, 5756 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 5757 SC_PORT(sc) * 4)) & 5758 AEU_INPUTS_ATTN_BITS_SPIO5); 5759 } 5760 5761 static void bnx2x_init_objs(struct bnx2x_softc *sc) 5762 { 5763 /* mcast rules must be added to tx if tx switching is enabled */ 5764 ecore_obj_type o_type; 5765 if (sc->flags & BNX2X_TX_SWITCHING) 5766 o_type = ECORE_OBJ_TYPE_RX_TX; 5767 else 5768 o_type = ECORE_OBJ_TYPE_RX; 5769 5770 /* RX_MODE controlling object */ 5771 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 5772 5773 /* multicast configuration controlling object */ 5774 ecore_init_mcast_obj(sc, 5775 &sc->mcast_obj, 5776 sc->fp[0].cl_id, 5777 sc->fp[0].index, 5778 SC_FUNC(sc), 5779 SC_FUNC(sc), 5780 BNX2X_SP(sc, mcast_rdata), 5781 (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata), 5782 ECORE_FILTER_MCAST_PENDING, 5783 &sc->sp_state, o_type); 5784 5785 /* Setup CAM credit pools */ 5786 ecore_init_mac_credit_pool(sc, 5787 &sc->macs_pool, 5788 SC_FUNC(sc), 5789 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 5790 VNICS_PER_PATH(sc)); 5791 5792 ecore_init_vlan_credit_pool(sc, 5793 &sc->vlans_pool, 5794 SC_ABS_FUNC(sc) >> 1, 5795 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 5796 VNICS_PER_PATH(sc)); 5797 5798 /* RSS configuration object */ 5799 ecore_init_rss_config_obj(&sc->rss_conf_obj, 5800 sc->fp[0].cl_id, 5801 sc->fp[0].index, 5802 SC_FUNC(sc), 5803 SC_FUNC(sc), 5804 BNX2X_SP(sc, rss_rdata), 5805 (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata), 5806 ECORE_FILTER_RSS_CONF_PENDING, 5807 &sc->sp_state, ECORE_OBJ_TYPE_RX); 5808 } 5809 5810 /* 5811 * Initialize the function. This must be called before sending CLIENT_SETUP 5812 * for the first client. 5813 */ 5814 static int bnx2x_func_start(struct bnx2x_softc *sc) 5815 { 5816 struct ecore_func_state_params func_params = { NULL }; 5817 struct ecore_func_start_params *start_params = 5818 &func_params.params.start; 5819 5820 /* Prepare parameters for function state transitions */ 5821 bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 5822 5823 func_params.f_obj = &sc->func_obj; 5824 func_params.cmd = ECORE_F_CMD_START; 5825 5826 /* Function parameters */ 5827 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 5828 start_params->sd_vlan_tag = OVLAN(sc); 5829 5830 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 5831 start_params->network_cos_mode = STATIC_COS; 5832 } else { /* CHIP_IS_E1X */ 5833 start_params->network_cos_mode = FW_WRR; 5834 } 5835 5836 start_params->gre_tunnel_mode = 0; 5837 start_params->gre_tunnel_rss = 0; 5838 5839 return ecore_func_state_change(sc, &func_params); 5840 } 5841 5842 static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state) 5843 { 5844 uint16_t pmcsr; 5845 5846 /* If there is no power capability, silently succeed */ 5847 if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) { 5848 PMD_DRV_LOG(INFO, sc, "No power capability"); 5849 return 0; 5850 } 5851 5852 pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), &pmcsr, 5853 2); 5854 5855 switch (state) { 5856 case PCI_PM_D0: 5857 pci_write_word(sc, 5858 (sc->devinfo.pcie_pm_cap_reg + 5859 PCIR_POWER_STATUS), 5860 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME)); 5861 5862 if (pmcsr & PCIM_PSTAT_DMASK) { 5863 /* delay required during transition out of D3hot */ 5864 DELAY(20000); 5865 } 5866 5867 break; 5868 5869 case PCI_PM_D3hot: 5870 /* don't shut down the power for emulation and FPGA */ 5871 if (CHIP_REV_IS_SLOW(sc)) { 5872 return 0; 5873 } 5874 5875 pmcsr &= ~PCIM_PSTAT_DMASK; 5876 pmcsr |= PCIM_PSTAT_D3; 5877 5878 if (sc->wol) { 5879 pmcsr |= PCIM_PSTAT_PMEENABLE; 5880 } 5881 5882 pci_write_long(sc, 5883 (sc->devinfo.pcie_pm_cap_reg + 5884 PCIR_POWER_STATUS), pmcsr); 5885 5886 /* 5887 * No more memory access after this point until device is brought back 5888 * to D0 state. 5889 */ 5890 break; 5891 5892 default: 5893 PMD_DRV_LOG(NOTICE, sc, "Can't support PCI power state = %d", 5894 state); 5895 return -1; 5896 } 5897 5898 return 0; 5899 } 5900 5901 /* return true if succeeded to acquire the lock */ 5902 static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 5903 { 5904 uint32_t lock_status; 5905 uint32_t resource_bit = (1 << resource); 5906 int func = SC_FUNC(sc); 5907 uint32_t hw_lock_control_reg; 5908 5909 /* Validating that the resource is within range */ 5910 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 5911 PMD_DRV_LOG(INFO, sc, 5912 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)", 5913 resource, HW_LOCK_MAX_RESOURCE_VALUE); 5914 return FALSE; 5915 } 5916 5917 if (func <= 5) { 5918 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func * 8); 5919 } else { 5920 hw_lock_control_reg = 5921 (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8); 5922 } 5923 5924 /* try to acquire the lock */ 5925 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 5926 lock_status = REG_RD(sc, hw_lock_control_reg); 5927 if (lock_status & resource_bit) { 5928 return TRUE; 5929 } 5930 5931 PMD_DRV_LOG(NOTICE, sc, "Failed to get a resource lock 0x%x", resource); 5932 5933 return FALSE; 5934 } 5935 5936 /* 5937 * Get the recovery leader resource id according to the engine this function 5938 * belongs to. Currently only only 2 engines is supported. 5939 */ 5940 static int bnx2x_get_leader_lock_resource(struct bnx2x_softc *sc) 5941 { 5942 if (SC_PATH(sc)) { 5943 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 5944 } else { 5945 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 5946 } 5947 } 5948 5949 /* try to acquire a leader lock for current engine */ 5950 static uint8_t bnx2x_trylock_leader_lock(struct bnx2x_softc *sc) 5951 { 5952 return bnx2x_trylock_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); 5953 } 5954 5955 static int bnx2x_release_leader_lock(struct bnx2x_softc *sc) 5956 { 5957 return bnx2x_release_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); 5958 } 5959 5960 /* close gates #2, #3 and #4 */ 5961 static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close) 5962 { 5963 uint32_t val; 5964 5965 /* gates #2 and #4a are closed/opened */ 5966 /* #4 */ 5967 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, ! !close); 5968 /* #2 */ 5969 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, ! !close); 5970 5971 /* #3 */ 5972 if (CHIP_IS_E1x(sc)) { 5973 /* prevent interrupts from HC on both ports */ 5974 val = REG_RD(sc, HC_REG_CONFIG_1); 5975 if (close) 5976 REG_WR(sc, HC_REG_CONFIG_1, (val & ~(uint32_t) 5977 HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 5978 else 5979 REG_WR(sc, HC_REG_CONFIG_1, 5980 (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 5981 5982 val = REG_RD(sc, HC_REG_CONFIG_0); 5983 if (close) 5984 REG_WR(sc, HC_REG_CONFIG_0, (val & ~(uint32_t) 5985 HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 5986 else 5987 REG_WR(sc, HC_REG_CONFIG_0, 5988 (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 5989 5990 } else { 5991 /* Prevent incoming interrupts in IGU */ 5992 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 5993 5994 if (close) 5995 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 5996 (val & ~(uint32_t) 5997 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 5998 else 5999 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 6000 (val | 6001 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 6002 } 6003 6004 wmb(); 6005 } 6006 6007 /* poll for pending writes bit, it should get cleared in no more than 1s */ 6008 static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc) 6009 { 6010 uint32_t cnt = 1000; 6011 uint32_t pend_bits = 0; 6012 6013 do { 6014 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 6015 6016 if (pend_bits == 0) { 6017 break; 6018 } 6019 6020 DELAY(1000); 6021 } while (cnt-- > 0); 6022 6023 if (cnt <= 0) { 6024 PMD_DRV_LOG(NOTICE, sc, "Still pending IGU requests bits=0x%08x!", 6025 pend_bits); 6026 return -1; 6027 } 6028 6029 return 0; 6030 } 6031 6032 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 6033 6034 static void bnx2x_clp_reset_prep(struct bnx2x_softc *sc, uint32_t * magic_val) 6035 { 6036 /* Do some magic... */ 6037 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 6038 *magic_val = val & SHARED_MF_CLP_MAGIC; 6039 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 6040 } 6041 6042 /* restore the value of the 'magic' bit */ 6043 static void bnx2x_clp_reset_done(struct bnx2x_softc *sc, uint32_t magic_val) 6044 { 6045 /* Restore the 'magic' bit value... */ 6046 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 6047 MFCFG_WR(sc, shared_mf_config.clp_mb, 6048 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 6049 } 6050 6051 /* prepare for MCP reset, takes care of CLP configurations */ 6052 static void bnx2x_reset_mcp_prep(struct bnx2x_softc *sc, uint32_t * magic_val) 6053 { 6054 uint32_t shmem; 6055 uint32_t validity_offset; 6056 6057 /* set `magic' bit in order to save MF config */ 6058 bnx2x_clp_reset_prep(sc, magic_val); 6059 6060 /* get shmem offset */ 6061 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 6062 validity_offset = 6063 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 6064 6065 /* Clear validity map flags */ 6066 if (shmem > 0) { 6067 REG_WR(sc, shmem + validity_offset, 0); 6068 } 6069 } 6070 6071 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 6072 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 6073 6074 static void bnx2x_mcp_wait_one(struct bnx2x_softc *sc) 6075 { 6076 /* special handling for emulation and FPGA (10 times longer) */ 6077 if (CHIP_REV_IS_SLOW(sc)) { 6078 DELAY((MCP_ONE_TIMEOUT * 10) * 1000); 6079 } else { 6080 DELAY((MCP_ONE_TIMEOUT) * 1000); 6081 } 6082 } 6083 6084 /* initialize shmem_base and waits for validity signature to appear */ 6085 static int bnx2x_init_shmem(struct bnx2x_softc *sc) 6086 { 6087 int cnt = 0; 6088 uint32_t val = 0; 6089 6090 do { 6091 sc->devinfo.shmem_base = 6092 sc->link_params.shmem_base = 6093 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 6094 6095 if (sc->devinfo.shmem_base) { 6096 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 6097 if (val & SHR_MEM_VALIDITY_MB) 6098 return 0; 6099 } 6100 6101 bnx2x_mcp_wait_one(sc); 6102 6103 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 6104 6105 PMD_DRV_LOG(NOTICE, sc, "BAD MCP validity signature"); 6106 6107 return -1; 6108 } 6109 6110 static int bnx2x_reset_mcp_comp(struct bnx2x_softc *sc, uint32_t magic_val) 6111 { 6112 int rc = bnx2x_init_shmem(sc); 6113 6114 /* Restore the `magic' bit value */ 6115 bnx2x_clp_reset_done(sc, magic_val); 6116 6117 return rc; 6118 } 6119 6120 static void bnx2x_pxp_prep(struct bnx2x_softc *sc) 6121 { 6122 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 6123 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 6124 wmb(); 6125 } 6126 6127 /* 6128 * Reset the whole chip except for: 6129 * - PCIE core 6130 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 6131 * - IGU 6132 * - MISC (including AEU) 6133 * - GRC 6134 * - RBCN, RBCP 6135 */ 6136 static void bnx2x_process_kill_chip_reset(struct bnx2x_softc *sc, uint8_t global) 6137 { 6138 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 6139 uint32_t global_bits2, stay_reset2; 6140 6141 /* 6142 * Bits that have to be set in reset_mask2 if we want to reset 'global' 6143 * (per chip) blocks. 6144 */ 6145 global_bits2 = 6146 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 6147 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 6148 6149 /* 6150 * Don't reset the following blocks. 6151 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 6152 * reset, as in 4 port device they might still be owned 6153 * by the MCP (there is only one leader per path). 6154 */ 6155 not_reset_mask1 = 6156 MISC_REGISTERS_RESET_REG_1_RST_HC | 6157 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 6158 MISC_REGISTERS_RESET_REG_1_RST_PXP; 6159 6160 not_reset_mask2 = 6161 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 6162 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 6163 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 6164 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 6165 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 6166 MISC_REGISTERS_RESET_REG_2_RST_GRC | 6167 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 6168 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 6169 MISC_REGISTERS_RESET_REG_2_RST_ATC | 6170 MISC_REGISTERS_RESET_REG_2_PGLC | 6171 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 6172 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 6173 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 6174 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 6175 MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; 6176 6177 /* 6178 * Keep the following blocks in reset: 6179 * - all xxMACs are handled by the elink code. 6180 */ 6181 stay_reset2 = 6182 MISC_REGISTERS_RESET_REG_2_XMAC | 6183 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 6184 6185 /* Full reset masks according to the chip */ 6186 reset_mask1 = 0xffffffff; 6187 6188 if (CHIP_IS_E1H(sc)) 6189 reset_mask2 = 0x1ffff; 6190 else if (CHIP_IS_E2(sc)) 6191 reset_mask2 = 0xfffff; 6192 else /* CHIP_IS_E3 */ 6193 reset_mask2 = 0x3ffffff; 6194 6195 /* Don't reset global blocks unless we need to */ 6196 if (!global) 6197 reset_mask2 &= ~global_bits2; 6198 6199 /* 6200 * In case of attention in the QM, we need to reset PXP 6201 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 6202 * because otherwise QM reset would release 'close the gates' shortly 6203 * before resetting the PXP, then the PSWRQ would send a write 6204 * request to PGLUE. Then when PXP is reset, PGLUE would try to 6205 * read the payload data from PSWWR, but PSWWR would not 6206 * respond. The write queue in PGLUE would stuck, dmae commands 6207 * would not return. Therefore it's important to reset the second 6208 * reset register (containing the 6209 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 6210 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 6211 * bit). 6212 */ 6213 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6214 reset_mask2 & (~not_reset_mask2)); 6215 6216 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6217 reset_mask1 & (~not_reset_mask1)); 6218 6219 mb(); 6220 wmb(); 6221 6222 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 6223 reset_mask2 & (~stay_reset2)); 6224 6225 mb(); 6226 wmb(); 6227 6228 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 6229 wmb(); 6230 } 6231 6232 static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global) 6233 { 6234 int cnt = 1000; 6235 uint32_t val = 0; 6236 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 6237 uint32_t tags_63_32 = 0; 6238 6239 /* Empty the Tetris buffer, wait for 1s */ 6240 do { 6241 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 6242 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 6243 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 6244 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 6245 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 6246 if (CHIP_IS_E3(sc)) { 6247 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 6248 } 6249 6250 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 6251 ((port_is_idle_0 & 0x1) == 0x1) && 6252 ((port_is_idle_1 & 0x1) == 0x1) && 6253 (pgl_exp_rom2 == 0xffffffff) && 6254 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 6255 break; 6256 DELAY(1000); 6257 } while (cnt-- > 0); 6258 6259 if (cnt <= 0) { 6260 PMD_DRV_LOG(NOTICE, sc, 6261 "ERROR: Tetris buffer didn't get empty or there " 6262 "are still outstanding read requests after 1s! " 6263 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 6264 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x", 6265 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 6266 pgl_exp_rom2); 6267 return -1; 6268 } 6269 6270 mb(); 6271 6272 /* Close gates #2, #3 and #4 */ 6273 bnx2x_set_234_gates(sc, TRUE); 6274 6275 /* Poll for IGU VQs for 57712 and newer chips */ 6276 if (!CHIP_IS_E1x(sc) && bnx2x_er_poll_igu_vq(sc)) { 6277 return -1; 6278 } 6279 6280 /* clear "unprepared" bit */ 6281 REG_WR(sc, MISC_REG_UNPREPARED, 0); 6282 mb(); 6283 6284 /* Make sure all is written to the chip before the reset */ 6285 wmb(); 6286 6287 /* 6288 * Wait for 1ms to empty GLUE and PCI-E core queues, 6289 * PSWHST, GRC and PSWRD Tetris buffer. 6290 */ 6291 DELAY(1000); 6292 6293 /* Prepare to chip reset: */ 6294 /* MCP */ 6295 if (global) { 6296 bnx2x_reset_mcp_prep(sc, &val); 6297 } 6298 6299 /* PXP */ 6300 bnx2x_pxp_prep(sc); 6301 mb(); 6302 6303 /* reset the chip */ 6304 bnx2x_process_kill_chip_reset(sc, global); 6305 mb(); 6306 6307 /* Recover after reset: */ 6308 /* MCP */ 6309 if (global && bnx2x_reset_mcp_comp(sc, val)) { 6310 return -1; 6311 } 6312 6313 /* Open the gates #2, #3 and #4 */ 6314 bnx2x_set_234_gates(sc, FALSE); 6315 6316 return 0; 6317 } 6318 6319 static int bnx2x_leader_reset(struct bnx2x_softc *sc) 6320 { 6321 int rc = 0; 6322 uint8_t global = bnx2x_reset_is_global(sc); 6323 uint32_t load_code; 6324 6325 /* 6326 * If not going to reset MCP, load "fake" driver to reset HW while 6327 * driver is owner of the HW. 6328 */ 6329 if (!global && !BNX2X_NOMCP(sc)) { 6330 load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 6331 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 6332 if (!load_code) { 6333 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 6334 rc = -1; 6335 goto exit_leader_reset; 6336 } 6337 6338 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 6339 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 6340 PMD_DRV_LOG(NOTICE, sc, 6341 "MCP unexpected response, aborting"); 6342 rc = -1; 6343 goto exit_leader_reset2; 6344 } 6345 6346 load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 6347 if (!load_code) { 6348 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 6349 rc = -1; 6350 goto exit_leader_reset2; 6351 } 6352 } 6353 6354 /* try to recover after the failure */ 6355 if (bnx2x_process_kill(sc, global)) { 6356 PMD_DRV_LOG(NOTICE, sc, "Something bad occurred on engine %d!", 6357 SC_PATH(sc)); 6358 rc = -1; 6359 goto exit_leader_reset2; 6360 } 6361 6362 /* 6363 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 6364 * state. 6365 */ 6366 bnx2x_set_reset_done(sc); 6367 if (global) { 6368 bnx2x_clear_reset_global(sc); 6369 } 6370 6371 exit_leader_reset2: 6372 6373 /* unload "fake driver" if it was loaded */ 6374 if (!global &&!BNX2X_NOMCP(sc)) { 6375 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 6376 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 6377 } 6378 6379 exit_leader_reset: 6380 6381 sc->is_leader = 0; 6382 bnx2x_release_leader_lock(sc); 6383 6384 mb(); 6385 return rc; 6386 } 6387 6388 /* 6389 * prepare INIT transition, parameters configured: 6390 * - HC configuration 6391 * - Queue's CDU context 6392 */ 6393 static void 6394 bnx2x_pf_q_prep_init(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6395 struct ecore_queue_init_params *init_params) 6396 { 6397 uint8_t cos; 6398 int cxt_index, cxt_offset; 6399 6400 bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 6401 bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 6402 6403 bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 6404 bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 6405 6406 /* HC rate */ 6407 init_params->rx.hc_rate = 6408 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 6409 init_params->tx.hc_rate = 6410 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 6411 6412 /* FW SB ID */ 6413 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 6414 6415 /* CQ index among the SB indices */ 6416 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 6417 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 6418 6419 /* set maximum number of COSs supported by this queue */ 6420 init_params->max_cos = sc->max_cos; 6421 6422 /* set the context pointers queue object */ 6423 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 6424 cxt_index = fp->index / ILT_PAGE_CIDS; 6425 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 6426 init_params->cxts[cos] = 6427 &sc->context[cxt_index].vcxt[cxt_offset].eth; 6428 } 6429 } 6430 6431 /* set flags that are common for the Tx-only and not normal connections */ 6432 static unsigned long 6433 bnx2x_get_common_flags(struct bnx2x_softc *sc, uint8_t zero_stats) 6434 { 6435 unsigned long flags = 0; 6436 6437 /* PF driver will always initialize the Queue to an ACTIVE state */ 6438 bnx2x_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 6439 6440 /* 6441 * tx only connections collect statistics (on the same index as the 6442 * parent connection). The statistics are zeroed when the parent 6443 * connection is initialized. 6444 */ 6445 6446 bnx2x_set_bit(ECORE_Q_FLG_STATS, &flags); 6447 if (zero_stats) { 6448 bnx2x_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 6449 } 6450 6451 /* 6452 * tx only connections can support tx-switching, though their 6453 * CoS-ness doesn't survive the loopback 6454 */ 6455 if (sc->flags & BNX2X_TX_SWITCHING) { 6456 bnx2x_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 6457 } 6458 6459 bnx2x_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 6460 6461 return flags; 6462 } 6463 6464 static unsigned long bnx2x_get_q_flags(struct bnx2x_softc *sc, uint8_t leading) 6465 { 6466 unsigned long flags = 0; 6467 6468 if (IS_MF_SD(sc)) { 6469 bnx2x_set_bit(ECORE_Q_FLG_OV, &flags); 6470 } 6471 6472 if (leading) { 6473 bnx2x_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 6474 bnx2x_set_bit(ECORE_Q_FLG_MCAST, &flags); 6475 } 6476 6477 bnx2x_set_bit(ECORE_Q_FLG_VLAN, &flags); 6478 6479 /* merge with common flags */ 6480 return flags | bnx2x_get_common_flags(sc, TRUE); 6481 } 6482 6483 static void 6484 bnx2x_pf_q_prep_general(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6485 struct ecore_general_setup_params *gen_init, uint8_t cos) 6486 { 6487 gen_init->stat_id = bnx2x_stats_id(fp); 6488 gen_init->spcl_id = fp->cl_id; 6489 gen_init->mtu = sc->mtu; 6490 gen_init->cos = cos; 6491 } 6492 6493 static void 6494 bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6495 struct rxq_pause_params *pause, 6496 struct ecore_rxq_setup_params *rxq_init) 6497 { 6498 struct bnx2x_rx_queue *rxq; 6499 6500 rxq = sc->rx_queues[fp->index]; 6501 if (!rxq) { 6502 PMD_RX_LOG(ERR, "RX queue is NULL"); 6503 return; 6504 } 6505 /* pause */ 6506 pause->bd_th_lo = BD_TH_LO(sc); 6507 pause->bd_th_hi = BD_TH_HI(sc); 6508 6509 pause->rcq_th_lo = RCQ_TH_LO(sc); 6510 pause->rcq_th_hi = RCQ_TH_HI(sc); 6511 6512 /* validate rings have enough entries to cross high thresholds */ 6513 if (sc->dropless_fc && 6514 pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { 6515 PMD_DRV_LOG(WARNING, sc, "rx bd ring threshold limit"); 6516 } 6517 6518 if (sc->dropless_fc && 6519 pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) { 6520 PMD_DRV_LOG(WARNING, sc, "rcq ring threshold limit"); 6521 } 6522 6523 pause->pri_map = 1; 6524 6525 /* rxq setup */ 6526 rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr; 6527 rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr; 6528 rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr + 6529 BNX2X_PAGE_SIZE); 6530 6531 /* 6532 * This should be a maximum number of data bytes that may be 6533 * placed on the BD (not including paddings). 6534 */ 6535 rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); 6536 6537 rxq_init->cl_qzone_id = fp->cl_qzone_id; 6538 rxq_init->rss_engine_id = SC_FUNC(sc); 6539 rxq_init->mcast_engine_id = SC_FUNC(sc); 6540 6541 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 6542 rxq_init->fw_sb_id = fp->fw_sb_id; 6543 6544 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 6545 6546 /* 6547 * configure silent vlan removal 6548 * if multi function mode is afex, then mask default vlan 6549 */ 6550 if (IS_MF_AFEX(sc)) { 6551 rxq_init->silent_removal_value = 6552 sc->devinfo.mf_info.afex_def_vlan_tag; 6553 rxq_init->silent_removal_mask = EVL_VLID_MASK; 6554 } 6555 } 6556 6557 static void 6558 bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6559 struct ecore_txq_setup_params *txq_init, uint8_t cos) 6560 { 6561 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 6562 6563 if (!txq) { 6564 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 6565 return; 6566 } 6567 txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr; 6568 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 6569 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 6570 txq_init->fw_sb_id = fp->fw_sb_id; 6571 6572 /* 6573 * set the TSS leading client id for TX classfication to the 6574 * leading RSS client id 6575 */ 6576 txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id); 6577 } 6578 6579 /* 6580 * This function performs 2 steps in a queue state machine: 6581 * 1) RESET->INIT 6582 * 2) INIT->SETUP 6583 */ 6584 static int 6585 bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t leading) 6586 { 6587 struct ecore_queue_state_params q_params = { NULL }; 6588 struct ecore_queue_setup_params *setup_params = &q_params.params.setup; 6589 int rc; 6590 6591 PMD_DRV_LOG(DEBUG, sc, "setting up queue %d", fp->index); 6592 6593 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 6594 6595 q_params.q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; 6596 6597 /* we want to wait for completion in this context */ 6598 bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 6599 6600 /* prepare the INIT parameters */ 6601 bnx2x_pf_q_prep_init(sc, fp, &q_params.params.init); 6602 6603 /* Set the command */ 6604 q_params.cmd = ECORE_Q_CMD_INIT; 6605 6606 /* Change the state to INIT */ 6607 rc = ecore_queue_state_change(sc, &q_params); 6608 if (rc) { 6609 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) INIT failed", fp->index); 6610 return rc; 6611 } 6612 6613 PMD_DRV_LOG(DEBUG, sc, "init complete"); 6614 6615 /* now move the Queue to the SETUP state */ 6616 memset(setup_params, 0, sizeof(*setup_params)); 6617 6618 /* set Queue flags */ 6619 setup_params->flags = bnx2x_get_q_flags(sc, leading); 6620 6621 /* set general SETUP parameters */ 6622 bnx2x_pf_q_prep_general(sc, fp, &setup_params->gen_params, 6623 FIRST_TX_COS_INDEX); 6624 6625 bnx2x_pf_rx_q_prep(sc, fp, 6626 &setup_params->pause_params, 6627 &setup_params->rxq_params); 6628 6629 bnx2x_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); 6630 6631 /* Set the command */ 6632 q_params.cmd = ECORE_Q_CMD_SETUP; 6633 6634 /* change the state to SETUP */ 6635 rc = ecore_queue_state_change(sc, &q_params); 6636 if (rc) { 6637 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) SETUP failed", fp->index); 6638 return rc; 6639 } 6640 6641 return rc; 6642 } 6643 6644 static int bnx2x_setup_leading(struct bnx2x_softc *sc) 6645 { 6646 if (IS_PF(sc)) 6647 return bnx2x_setup_queue(sc, &sc->fp[0], TRUE); 6648 else /* VF */ 6649 return bnx2x_vf_setup_queue(sc, &sc->fp[0], TRUE); 6650 } 6651 6652 static int 6653 bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj, 6654 uint8_t config_hash) 6655 { 6656 struct ecore_config_rss_params params = { NULL }; 6657 uint32_t i; 6658 6659 /* 6660 * Although RSS is meaningless when there is a single HW queue we 6661 * still need it enabled in order to have HW Rx hash generated. 6662 */ 6663 6664 params.rss_obj = rss_obj; 6665 6666 bnx2x_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 6667 6668 bnx2x_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 6669 6670 /* RSS configuration */ 6671 bnx2x_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 6672 bnx2x_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 6673 bnx2x_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 6674 bnx2x_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 6675 if (rss_obj->udp_rss_v4) { 6676 bnx2x_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 6677 } 6678 if (rss_obj->udp_rss_v6) { 6679 bnx2x_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 6680 } 6681 6682 /* Hash bits */ 6683 params.rss_result_mask = MULTI_MASK; 6684 6685 rte_memcpy(params.ind_table, rss_obj->ind_table, 6686 sizeof(params.ind_table)); 6687 6688 if (config_hash) { 6689 /* RSS keys */ 6690 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 6691 params.rss_key[i] = (uint32_t) rte_rand(); 6692 } 6693 6694 bnx2x_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 6695 } 6696 6697 if (IS_PF(sc)) 6698 return ecore_config_rss(sc, ¶ms); 6699 else 6700 return bnx2x_vf_config_rss(sc, ¶ms); 6701 } 6702 6703 static int bnx2x_config_rss_eth(struct bnx2x_softc *sc, uint8_t config_hash) 6704 { 6705 return bnx2x_config_rss_pf(sc, &sc->rss_conf_obj, config_hash); 6706 } 6707 6708 static int bnx2x_init_rss_pf(struct bnx2x_softc *sc) 6709 { 6710 uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(sc); 6711 uint32_t i; 6712 6713 /* 6714 * Prepare the initial contents of the indirection table if 6715 * RSS is enabled 6716 */ 6717 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 6718 sc->rss_conf_obj.ind_table[i] = 6719 (sc->fp->cl_id + (i % num_eth_queues)); 6720 } 6721 6722 if (sc->udp_rss) { 6723 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 6724 } 6725 6726 /* 6727 * For 57711 SEARCHER configuration (rss_keys) is 6728 * per-port, so if explicit configuration is needed, do it only 6729 * for a PMF. 6730 * 6731 * For 57712 and newer it's a per-function configuration. 6732 */ 6733 return bnx2x_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)); 6734 } 6735 6736 static int 6737 bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac, 6738 struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, 6739 unsigned long *ramrod_flags) 6740 { 6741 struct ecore_vlan_mac_ramrod_params ramrod_param; 6742 int rc; 6743 6744 memset(&ramrod_param, 0, sizeof(ramrod_param)); 6745 6746 /* fill in general parameters */ 6747 ramrod_param.vlan_mac_obj = obj; 6748 ramrod_param.ramrod_flags = *ramrod_flags; 6749 6750 /* fill a user request section if needed */ 6751 if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) { 6752 rte_memcpy(ramrod_param.user_req.u.mac.mac, mac, 6753 ETH_ALEN); 6754 6755 bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 6756 6757 /* Set the command: ADD or DEL */ 6758 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 6759 ECORE_VLAN_MAC_DEL; 6760 } 6761 6762 rc = ecore_config_vlan_mac(sc, &ramrod_param); 6763 6764 if (rc == ECORE_EXISTS) { 6765 PMD_DRV_LOG(INFO, sc, "Failed to schedule ADD operations (EEXIST)"); 6766 /* do not treat adding same MAC as error */ 6767 rc = 0; 6768 } else if (rc < 0) { 6769 PMD_DRV_LOG(ERR, sc, 6770 "%s MAC failed (%d)", (set ? "Set" : "Delete"), rc); 6771 } 6772 6773 return rc; 6774 } 6775 6776 static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set) 6777 { 6778 unsigned long ramrod_flags = 0; 6779 6780 PMD_DRV_LOG(DEBUG, sc, "Adding Ethernet MAC"); 6781 6782 bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 6783 6784 /* Eth MAC is set on RSS leading client (fp[0]) */ 6785 return bnx2x_set_mac_one(sc, sc->link_params.mac_addr, 6786 &sc->sp_objs->mac_obj, 6787 set, ECORE_ETH_MAC, &ramrod_flags); 6788 } 6789 6790 static int bnx2x_get_cur_phy_idx(struct bnx2x_softc *sc) 6791 { 6792 uint32_t sel_phy_idx = 0; 6793 6794 if (sc->link_params.num_phys <= 1) { 6795 return ELINK_INT_PHY; 6796 } 6797 6798 if (sc->link_vars.link_up) { 6799 sel_phy_idx = ELINK_EXT_PHY1; 6800 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 6801 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 6802 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 6803 ELINK_SUPPORTED_FIBRE)) 6804 sel_phy_idx = ELINK_EXT_PHY2; 6805 } else { 6806 switch (elink_phy_selection(&sc->link_params)) { 6807 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 6808 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 6809 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 6810 sel_phy_idx = ELINK_EXT_PHY1; 6811 break; 6812 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 6813 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 6814 sel_phy_idx = ELINK_EXT_PHY2; 6815 break; 6816 } 6817 } 6818 6819 return sel_phy_idx; 6820 } 6821 6822 static int bnx2x_get_link_cfg_idx(struct bnx2x_softc *sc) 6823 { 6824 uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(sc); 6825 6826 /* 6827 * The selected activated PHY is always after swapping (in case PHY 6828 * swapping is enabled). So when swapping is enabled, we need to reverse 6829 * the configuration 6830 */ 6831 6832 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 6833 if (sel_phy_idx == ELINK_EXT_PHY1) 6834 sel_phy_idx = ELINK_EXT_PHY2; 6835 else if (sel_phy_idx == ELINK_EXT_PHY2) 6836 sel_phy_idx = ELINK_EXT_PHY1; 6837 } 6838 6839 return ELINK_LINK_CONFIG_IDX(sel_phy_idx); 6840 } 6841 6842 static void bnx2x_set_requested_fc(struct bnx2x_softc *sc) 6843 { 6844 /* 6845 * Initialize link parameters structure variables 6846 * It is recommended to turn off RX FC for jumbo frames 6847 * for better performance 6848 */ 6849 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 6850 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 6851 } else { 6852 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 6853 } 6854 } 6855 6856 static void bnx2x_calc_fc_adv(struct bnx2x_softc *sc) 6857 { 6858 uint8_t cfg_idx = bnx2x_get_link_cfg_idx(sc); 6859 switch (sc->link_vars.ieee_fc & 6860 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 6861 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 6862 default: 6863 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 6864 ADVERTISED_Pause); 6865 break; 6866 6867 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 6868 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 6869 ADVERTISED_Pause); 6870 break; 6871 6872 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 6873 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 6874 break; 6875 } 6876 } 6877 6878 static uint16_t bnx2x_get_mf_speed(struct bnx2x_softc *sc) 6879 { 6880 uint16_t line_speed = sc->link_vars.line_speed; 6881 if (IS_MF(sc)) { 6882 uint16_t maxCfg = bnx2x_extract_max_cfg(sc, 6883 sc->devinfo. 6884 mf_info.mf_config[SC_VN 6885 (sc)]); 6886 6887 /* calculate the current MAX line speed limit for the MF devices */ 6888 if (IS_MF_SI(sc)) { 6889 line_speed = (line_speed * maxCfg) / 100; 6890 } else { /* SD mode */ 6891 uint16_t vn_max_rate = maxCfg * 100; 6892 6893 if (vn_max_rate < line_speed) { 6894 line_speed = vn_max_rate; 6895 } 6896 } 6897 } 6898 6899 return line_speed; 6900 } 6901 6902 static void 6903 bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *data) 6904 { 6905 uint16_t line_speed = bnx2x_get_mf_speed(sc); 6906 6907 memset(data, 0, sizeof(*data)); 6908 6909 /* fill the report data with the effective line speed */ 6910 data->line_speed = line_speed; 6911 6912 /* Link is down */ 6913 if (!sc->link_vars.link_up || (sc->flags & BNX2X_MF_FUNC_DIS)) { 6914 bnx2x_set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 6915 &data->link_report_flags); 6916 } 6917 6918 /* Full DUPLEX */ 6919 if (sc->link_vars.duplex == DUPLEX_FULL) { 6920 bnx2x_set_bit(BNX2X_LINK_REPORT_FULL_DUPLEX, 6921 &data->link_report_flags); 6922 } 6923 6924 /* Rx Flow Control is ON */ 6925 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 6926 bnx2x_set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 6927 } 6928 6929 /* Tx Flow Control is ON */ 6930 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 6931 bnx2x_set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 6932 } 6933 } 6934 6935 /* report link status to OS, should be called under phy_lock */ 6936 static void bnx2x_link_report_locked(struct bnx2x_softc *sc) 6937 { 6938 struct bnx2x_link_report_data cur_data; 6939 6940 /* reread mf_cfg */ 6941 if (IS_PF(sc)) { 6942 bnx2x_read_mf_cfg(sc); 6943 } 6944 6945 /* Read the current link report info */ 6946 bnx2x_fill_report_data(sc, &cur_data); 6947 6948 /* Don't report link down or exactly the same link status twice */ 6949 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 6950 (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 6951 &sc->last_reported_link.link_report_flags) && 6952 bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 6953 &cur_data.link_report_flags))) { 6954 return; 6955 } 6956 6957 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %lx, last_reported_link = %lx", 6958 cur_data.link_report_flags, 6959 sc->last_reported_link.link_report_flags); 6960 6961 sc->link_cnt++; 6962 6963 ELINK_DEBUG_P1(sc, "link status change count = %x", sc->link_cnt); 6964 /* report new link params and remember the state for the next time */ 6965 rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 6966 6967 if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 6968 &cur_data.link_report_flags)) { 6969 ELINK_DEBUG_P0(sc, "NIC Link is Down"); 6970 } else { 6971 __rte_unused const char *duplex; 6972 __rte_unused const char *flow; 6973 6974 if (bnx2x_test_and_clear_bit(BNX2X_LINK_REPORT_FULL_DUPLEX, 6975 &cur_data.link_report_flags)) { 6976 duplex = "full"; 6977 ELINK_DEBUG_P0(sc, "link set to full duplex"); 6978 } else { 6979 duplex = "half"; 6980 ELINK_DEBUG_P0(sc, "link set to half duplex"); 6981 } 6982 6983 /* 6984 * Handle the FC at the end so that only these flags would be 6985 * possibly set. This way we may easily check if there is no FC 6986 * enabled. 6987 */ 6988 if (cur_data.link_report_flags) { 6989 if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 6990 &cur_data.link_report_flags) && 6991 bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 6992 &cur_data.link_report_flags)) { 6993 flow = "ON - receive & transmit"; 6994 } else if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 6995 &cur_data.link_report_flags) && 6996 !bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 6997 &cur_data.link_report_flags)) { 6998 flow = "ON - receive"; 6999 } else if (!bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 7000 &cur_data.link_report_flags) && 7001 bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 7002 &cur_data.link_report_flags)) { 7003 flow = "ON - transmit"; 7004 } else { 7005 flow = "none"; /* possible? */ 7006 } 7007 } else { 7008 flow = "none"; 7009 } 7010 7011 PMD_DRV_LOG(INFO, sc, 7012 "NIC Link is Up, %d Mbps %s duplex, Flow control: %s", 7013 cur_data.line_speed, duplex, flow); 7014 } 7015 } 7016 7017 static void 7018 bnx2x_link_report(struct bnx2x_softc *sc) 7019 { 7020 bnx2x_acquire_phy_lock(sc); 7021 bnx2x_link_report_locked(sc); 7022 bnx2x_release_phy_lock(sc); 7023 } 7024 7025 void bnx2x_link_status_update(struct bnx2x_softc *sc) 7026 { 7027 if (sc->state != BNX2X_STATE_OPEN) { 7028 return; 7029 } 7030 7031 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 7032 elink_link_status_update(&sc->link_params, &sc->link_vars); 7033 } else { 7034 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 7035 ELINK_SUPPORTED_10baseT_Full | 7036 ELINK_SUPPORTED_100baseT_Half | 7037 ELINK_SUPPORTED_100baseT_Full | 7038 ELINK_SUPPORTED_1000baseT_Full | 7039 ELINK_SUPPORTED_2500baseX_Full | 7040 ELINK_SUPPORTED_10000baseT_Full | 7041 ELINK_SUPPORTED_TP | 7042 ELINK_SUPPORTED_FIBRE | 7043 ELINK_SUPPORTED_Autoneg | 7044 ELINK_SUPPORTED_Pause | 7045 ELINK_SUPPORTED_Asym_Pause); 7046 sc->port.advertising[0] = sc->port.supported[0]; 7047 7048 sc->link_params.sc = sc; 7049 sc->link_params.port = SC_PORT(sc); 7050 sc->link_params.req_duplex[0] = DUPLEX_FULL; 7051 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 7052 sc->link_params.req_line_speed[0] = SPEED_10000; 7053 sc->link_params.speed_cap_mask[0] = 0x7f0000; 7054 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 7055 7056 if (CHIP_REV_IS_FPGA(sc)) { 7057 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 7058 sc->link_vars.line_speed = ELINK_SPEED_1000; 7059 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 7060 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 7061 } else { 7062 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 7063 sc->link_vars.line_speed = ELINK_SPEED_10000; 7064 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 7065 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 7066 } 7067 7068 sc->link_vars.link_up = 1; 7069 7070 sc->link_vars.duplex = DUPLEX_FULL; 7071 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 7072 7073 if (IS_PF(sc)) { 7074 REG_WR(sc, 7075 NIG_REG_EGRESS_DRAIN0_MODE + 7076 sc->link_params.port * 4, 0); 7077 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7078 bnx2x_link_report(sc); 7079 } 7080 } 7081 7082 if (IS_PF(sc)) { 7083 if (sc->link_vars.link_up) { 7084 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7085 } else { 7086 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 7087 } 7088 bnx2x_link_report(sc); 7089 } else { 7090 bnx2x_link_report_locked(sc); 7091 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7092 } 7093 } 7094 7095 static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode) 7096 { 7097 int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc); 7098 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 7099 struct elink_params *lp = &sc->link_params; 7100 7101 bnx2x_set_requested_fc(sc); 7102 7103 bnx2x_acquire_phy_lock(sc); 7104 7105 if (load_mode == LOAD_DIAG) { 7106 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 7107 /* Prefer doing PHY loopback at 10G speed, if possible */ 7108 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 7109 if (lp->speed_cap_mask[cfg_idx] & 7110 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 7111 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 7112 } else { 7113 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 7114 } 7115 } 7116 } 7117 7118 if (load_mode == LOAD_LOOPBACK_EXT) { 7119 lp->loopback_mode = ELINK_LOOPBACK_EXT; 7120 } 7121 7122 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 7123 7124 bnx2x_release_phy_lock(sc); 7125 7126 bnx2x_calc_fc_adv(sc); 7127 7128 if (sc->link_vars.link_up) { 7129 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7130 bnx2x_link_report(sc); 7131 } 7132 7133 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 7134 return rc; 7135 } 7136 7137 /* update flags in shmem */ 7138 static void 7139 bnx2x_update_drv_flags(struct bnx2x_softc *sc, uint32_t flags, uint32_t set) 7140 { 7141 uint32_t drv_flags; 7142 7143 if (SHMEM2_HAS(sc, drv_flags)) { 7144 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 7145 drv_flags = SHMEM2_RD(sc, drv_flags); 7146 7147 if (set) { 7148 drv_flags |= flags; 7149 } else { 7150 drv_flags &= ~flags; 7151 } 7152 7153 SHMEM2_WR(sc, drv_flags, drv_flags); 7154 7155 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 7156 } 7157 } 7158 7159 /* periodic timer callout routine, only runs when the interface is up */ 7160 void bnx2x_periodic_callout(struct bnx2x_softc *sc) 7161 { 7162 if ((sc->state != BNX2X_STATE_OPEN) || 7163 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 7164 PMD_DRV_LOG(DEBUG, sc, "periodic callout exit (state=0x%x)", 7165 sc->state); 7166 return; 7167 } 7168 if (!CHIP_REV_IS_SLOW(sc)) { 7169 /* 7170 * This barrier is needed to ensure the ordering between the writing 7171 * to the sc->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 7172 * the reading here. 7173 */ 7174 mb(); 7175 if (sc->port.pmf) { 7176 bnx2x_acquire_phy_lock(sc); 7177 elink_period_func(&sc->link_params, &sc->link_vars); 7178 bnx2x_release_phy_lock(sc); 7179 } 7180 } 7181 #ifdef BNX2X_PULSE 7182 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 7183 int mb_idx = SC_FW_MB_IDX(sc); 7184 uint32_t drv_pulse; 7185 uint32_t mcp_pulse; 7186 7187 ++sc->fw_drv_pulse_wr_seq; 7188 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 7189 7190 drv_pulse = sc->fw_drv_pulse_wr_seq; 7191 bnx2x_drv_pulse(sc); 7192 7193 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 7194 MCP_PULSE_SEQ_MASK); 7195 7196 /* 7197 * The delta between driver pulse and mcp response should 7198 * be 1 (before mcp response) or 0 (after mcp response). 7199 */ 7200 if ((drv_pulse != mcp_pulse) && 7201 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 7202 /* someone lost a heartbeat... */ 7203 PMD_DRV_LOG(ERR, sc, 7204 "drv_pulse (0x%x) != mcp_pulse (0x%x)", 7205 drv_pulse, mcp_pulse); 7206 } 7207 } 7208 #endif 7209 } 7210 7211 /* start the controller */ 7212 static __rte_noinline 7213 int bnx2x_nic_load(struct bnx2x_softc *sc) 7214 { 7215 uint32_t val; 7216 uint32_t load_code = 0; 7217 int i, rc = 0; 7218 7219 PMD_INIT_FUNC_TRACE(sc); 7220 7221 sc->state = BNX2X_STATE_OPENING_WAITING_LOAD; 7222 7223 if (IS_PF(sc)) { 7224 /* must be called before memory allocation and HW init */ 7225 bnx2x_ilt_set_info(sc); 7226 } 7227 7228 bnx2x_set_fp_rx_buf_size(sc); 7229 7230 if (IS_PF(sc)) { 7231 if (bnx2x_alloc_mem(sc) != 0) { 7232 sc->state = BNX2X_STATE_CLOSED; 7233 rc = -ENOMEM; 7234 goto bnx2x_nic_load_error0; 7235 } 7236 } 7237 7238 /* allocate the host hardware/software hsi structures */ 7239 if (bnx2x_alloc_hsi_mem(sc) != 0) { 7240 PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed"); 7241 sc->state = BNX2X_STATE_CLOSED; 7242 rc = -ENOMEM; 7243 goto bnx2x_nic_load_error0; 7244 } 7245 7246 if (bnx2x_alloc_fw_stats_mem(sc) != 0) { 7247 sc->state = BNX2X_STATE_CLOSED; 7248 rc = -ENOMEM; 7249 goto bnx2x_nic_load_error0; 7250 } 7251 7252 if (IS_VF(sc)) { 7253 rc = bnx2x_vf_init(sc); 7254 if (rc) { 7255 sc->state = BNX2X_STATE_ERROR; 7256 goto bnx2x_nic_load_error0; 7257 } 7258 } 7259 7260 if (IS_PF(sc)) { 7261 /* set pf load just before approaching the MCP */ 7262 bnx2x_set_pf_load(sc); 7263 7264 /* if MCP exists send load request and analyze response */ 7265 if (!BNX2X_NOMCP(sc)) { 7266 /* attempt to load pf */ 7267 if (bnx2x_nic_load_request(sc, &load_code) != 0) { 7268 sc->state = BNX2X_STATE_CLOSED; 7269 rc = -ENXIO; 7270 goto bnx2x_nic_load_error1; 7271 } 7272 7273 /* what did the MCP say? */ 7274 if (bnx2x_nic_load_analyze_req(sc, load_code) != 0) { 7275 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7276 sc->state = BNX2X_STATE_CLOSED; 7277 rc = -ENXIO; 7278 goto bnx2x_nic_load_error2; 7279 } 7280 } else { 7281 PMD_DRV_LOG(INFO, sc, "Device has no MCP!"); 7282 load_code = bnx2x_nic_load_no_mcp(sc); 7283 } 7284 7285 /* mark PMF if applicable */ 7286 bnx2x_nic_load_pmf(sc, load_code); 7287 7288 /* Init Function state controlling object */ 7289 bnx2x_init_func_obj(sc); 7290 7291 /* Initialize HW */ 7292 if (bnx2x_init_hw(sc, load_code) != 0) { 7293 PMD_DRV_LOG(NOTICE, sc, "HW init failed"); 7294 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7295 sc->state = BNX2X_STATE_CLOSED; 7296 rc = -ENXIO; 7297 goto bnx2x_nic_load_error2; 7298 } 7299 } 7300 7301 bnx2x_nic_init(sc, load_code); 7302 7303 /* Init per-function objects */ 7304 if (IS_PF(sc)) { 7305 bnx2x_init_objs(sc); 7306 7307 /* set AFEX default VLAN tag to an invalid value */ 7308 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 7309 7310 sc->state = BNX2X_STATE_OPENING_WAITING_PORT; 7311 rc = bnx2x_func_start(sc); 7312 if (rc) { 7313 PMD_DRV_LOG(NOTICE, sc, "Function start failed!"); 7314 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7315 sc->state = BNX2X_STATE_ERROR; 7316 goto bnx2x_nic_load_error3; 7317 } 7318 7319 /* send LOAD_DONE command to MCP */ 7320 if (!BNX2X_NOMCP(sc)) { 7321 load_code = 7322 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7323 if (!load_code) { 7324 PMD_DRV_LOG(NOTICE, sc, 7325 "MCP response failure, aborting"); 7326 sc->state = BNX2X_STATE_ERROR; 7327 rc = -ENXIO; 7328 goto bnx2x_nic_load_error3; 7329 } 7330 } 7331 } 7332 7333 rc = bnx2x_setup_leading(sc); 7334 if (rc) { 7335 PMD_DRV_LOG(NOTICE, sc, "Setup leading failed!"); 7336 sc->state = BNX2X_STATE_ERROR; 7337 goto bnx2x_nic_load_error3; 7338 } 7339 7340 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 7341 if (IS_PF(sc)) 7342 rc = bnx2x_setup_queue(sc, &sc->fp[i], FALSE); 7343 else /* IS_VF(sc) */ 7344 rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE); 7345 7346 if (rc) { 7347 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) setup failed", i); 7348 sc->state = BNX2X_STATE_ERROR; 7349 goto bnx2x_nic_load_error3; 7350 } 7351 } 7352 7353 rc = bnx2x_init_rss_pf(sc); 7354 if (rc) { 7355 PMD_DRV_LOG(NOTICE, sc, "PF RSS init failed"); 7356 sc->state = BNX2X_STATE_ERROR; 7357 goto bnx2x_nic_load_error3; 7358 } 7359 7360 /* now when Clients are configured we are ready to work */ 7361 sc->state = BNX2X_STATE_OPEN; 7362 7363 /* Configure a ucast MAC */ 7364 if (IS_PF(sc)) { 7365 rc = bnx2x_set_eth_mac(sc, TRUE); 7366 } else { /* IS_VF(sc) */ 7367 rc = bnx2x_vf_set_mac(sc, TRUE); 7368 } 7369 7370 if (rc) { 7371 PMD_DRV_LOG(NOTICE, sc, "Setting Ethernet MAC failed"); 7372 sc->state = BNX2X_STATE_ERROR; 7373 goto bnx2x_nic_load_error3; 7374 } 7375 7376 if (sc->port.pmf) { 7377 rc = bnx2x_initial_phy_init(sc, LOAD_OPEN); 7378 if (rc) { 7379 sc->state = BNX2X_STATE_ERROR; 7380 goto bnx2x_nic_load_error3; 7381 } 7382 } 7383 7384 sc->link_params.feature_config_flags &= 7385 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 7386 7387 /* start the Tx */ 7388 switch (LOAD_OPEN) { 7389 case LOAD_NORMAL: 7390 case LOAD_OPEN: 7391 break; 7392 7393 case LOAD_DIAG: 7394 case LOAD_LOOPBACK_EXT: 7395 sc->state = BNX2X_STATE_DIAG; 7396 break; 7397 7398 default: 7399 break; 7400 } 7401 7402 if (sc->port.pmf) { 7403 bnx2x_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 7404 } else { 7405 bnx2x_link_status_update(sc); 7406 } 7407 7408 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 7409 /* mark driver is loaded in shmem2 */ 7410 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 7411 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 7412 (val | 7413 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 7414 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 7415 } 7416 7417 /* start fast path */ 7418 /* Initialize Rx filter */ 7419 bnx2x_set_rx_mode(sc); 7420 7421 /* wait for all pending SP commands to complete */ 7422 if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0UL)) { 7423 PMD_DRV_LOG(NOTICE, sc, "Timeout waiting for all SPs to complete!"); 7424 bnx2x_periodic_stop(sc); 7425 bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE); 7426 return -ENXIO; 7427 } 7428 7429 PMD_DRV_LOG(DEBUG, sc, "NIC successfully loaded"); 7430 7431 return 0; 7432 7433 bnx2x_nic_load_error3: 7434 7435 if (IS_PF(sc)) { 7436 bnx2x_int_disable_sync(sc, 1); 7437 7438 /* clean out queued objects */ 7439 bnx2x_squeeze_objects(sc); 7440 } 7441 7442 bnx2x_nic_load_error2: 7443 7444 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 7445 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 7446 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 7447 } 7448 7449 sc->port.pmf = 0; 7450 7451 bnx2x_nic_load_error1: 7452 7453 /* clear pf_load status, as it was already set */ 7454 if (IS_PF(sc)) { 7455 bnx2x_clear_pf_load(sc); 7456 } 7457 7458 bnx2x_nic_load_error0: 7459 7460 bnx2x_free_fw_stats_mem(sc); 7461 bnx2x_free_hsi_mem(sc); 7462 bnx2x_free_mem(sc); 7463 7464 return rc; 7465 } 7466 7467 /* 7468 * Handles controller initialization. 7469 */ 7470 int bnx2x_init(struct bnx2x_softc *sc) 7471 { 7472 int other_engine = SC_PATH(sc) ? 0 : 1; 7473 uint8_t other_load_status, load_status; 7474 uint8_t global = FALSE; 7475 int rc; 7476 7477 /* Check if the driver is still running and bail out if it is. */ 7478 if (sc->state != BNX2X_STATE_CLOSED) { 7479 PMD_DRV_LOG(DEBUG, sc, "Init called while driver is running!"); 7480 rc = 0; 7481 goto bnx2x_init_done; 7482 } 7483 7484 bnx2x_set_power_state(sc, PCI_PM_D0); 7485 7486 /* 7487 * If parity occurred during the unload, then attentions and/or 7488 * RECOVERY_IN_PROGRESS may still be set. If so we want the first function 7489 * loaded on the current engine to complete the recovery. Parity recovery 7490 * is only relevant for PF driver. 7491 */ 7492 if (IS_PF(sc)) { 7493 other_load_status = bnx2x_get_load_status(sc, other_engine); 7494 load_status = bnx2x_get_load_status(sc, SC_PATH(sc)); 7495 7496 if (!bnx2x_reset_is_done(sc, SC_PATH(sc)) || 7497 bnx2x_chk_parity_attn(sc, &global, TRUE)) { 7498 do { 7499 /* 7500 * If there are attentions and they are in global blocks, set 7501 * the GLOBAL_RESET bit regardless whether it will be this 7502 * function that will complete the recovery or not. 7503 */ 7504 if (global) { 7505 bnx2x_set_reset_global(sc); 7506 } 7507 7508 /* 7509 * Only the first function on the current engine should try 7510 * to recover in open. In case of attentions in global blocks 7511 * only the first in the chip should try to recover. 7512 */ 7513 if ((!load_status 7514 && (!global ||!other_load_status)) 7515 && bnx2x_trylock_leader_lock(sc) 7516 && !bnx2x_leader_reset(sc)) { 7517 PMD_DRV_LOG(INFO, sc, 7518 "Recovered during init"); 7519 break; 7520 } 7521 7522 /* recovery has failed... */ 7523 bnx2x_set_power_state(sc, PCI_PM_D3hot); 7524 7525 sc->recovery_state = BNX2X_RECOVERY_FAILED; 7526 7527 PMD_DRV_LOG(NOTICE, sc, 7528 "Recovery flow hasn't properly " 7529 "completed yet, try again later. " 7530 "If you still see this message after a " 7531 "few retries then power cycle is required."); 7532 7533 rc = -ENXIO; 7534 goto bnx2x_init_done; 7535 } while (0); 7536 } 7537 } 7538 7539 sc->recovery_state = BNX2X_RECOVERY_DONE; 7540 7541 rc = bnx2x_nic_load(sc); 7542 7543 bnx2x_init_done: 7544 7545 if (rc) { 7546 PMD_DRV_LOG(NOTICE, sc, "Initialization failed, " 7547 "stack notified driver is NOT running!"); 7548 } 7549 7550 return rc; 7551 } 7552 7553 static void bnx2x_get_function_num(struct bnx2x_softc *sc) 7554 { 7555 uint32_t val = 0; 7556 7557 /* 7558 * Read the ME register to get the function number. The ME register 7559 * holds the relative-function number and absolute-function number. The 7560 * absolute-function number appears only in E2 and above. Before that 7561 * these bits always contained zero, therefore we cannot blindly use them. 7562 */ 7563 7564 val = REG_RD(sc, BAR_ME_REGISTER); 7565 7566 sc->pfunc_rel = 7567 (uint8_t) ((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 7568 sc->path_id = 7569 (uint8_t) ((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 7570 1; 7571 7572 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 7573 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 7574 } else { 7575 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 7576 } 7577 7578 PMD_DRV_LOG(DEBUG, sc, 7579 "Relative function %d, Absolute function %d, Path %d", 7580 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 7581 } 7582 7583 static uint32_t bnx2x_get_shmem_mf_cfg_base(struct bnx2x_softc *sc) 7584 { 7585 uint32_t shmem2_size; 7586 uint32_t offset; 7587 uint32_t mf_cfg_offset_value; 7588 7589 /* Non 57712 */ 7590 offset = (SHMEM_ADDR(sc, func_mb) + 7591 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 7592 7593 /* 57712 plus */ 7594 if (sc->devinfo.shmem2_base != 0) { 7595 shmem2_size = SHMEM2_RD(sc, size); 7596 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 7597 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 7598 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 7599 offset = mf_cfg_offset_value; 7600 } 7601 } 7602 } 7603 7604 return offset; 7605 } 7606 7607 static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg) 7608 { 7609 uint32_t ret; 7610 struct bnx2x_pci_cap *caps; 7611 7612 /* ensure PCIe capability is enabled */ 7613 caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP); 7614 if (NULL != caps) { 7615 PMD_DRV_LOG(DEBUG, sc, "Found PCIe capability: " 7616 "id=0x%04X type=0x%04X addr=0x%08X", 7617 caps->id, caps->type, caps->addr); 7618 pci_read(sc, (caps->addr + reg), &ret, 2); 7619 return ret; 7620 } 7621 7622 PMD_DRV_LOG(WARNING, sc, "PCIe capability NOT FOUND!!!"); 7623 7624 return 0; 7625 } 7626 7627 static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc) 7628 { 7629 return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) & 7630 PCIM_EXP_STA_TRANSACTION_PND; 7631 } 7632 7633 /* 7634 * Walk the PCI capabiites list for the device to find what features are 7635 * supported. These capabilites may be enabled/disabled by firmware so it's 7636 * best to walk the list rather than make assumptions. 7637 */ 7638 static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) 7639 { 7640 PMD_INIT_FUNC_TRACE(sc); 7641 7642 struct bnx2x_pci_cap *caps; 7643 uint16_t link_status; 7644 int reg = 0; 7645 7646 /* check if PCI Power Management is enabled */ 7647 caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP); 7648 if (NULL != caps) { 7649 PMD_DRV_LOG(DEBUG, sc, "Found PM capability: " 7650 "id=0x%04X type=0x%04X addr=0x%08X", 7651 caps->id, caps->type, caps->addr); 7652 7653 sc->devinfo.pcie_cap_flags |= BNX2X_PM_CAPABLE_FLAG; 7654 sc->devinfo.pcie_pm_cap_reg = caps->addr; 7655 } 7656 7657 link_status = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA); 7658 7659 sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); 7660 sc->devinfo.pcie_link_width = 7661 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 7662 7663 PMD_DRV_LOG(DEBUG, sc, "PCIe link speed=%d width=%d", 7664 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 7665 7666 sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG; 7667 7668 /* check if MSI capability is enabled */ 7669 caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP); 7670 if (NULL != caps) { 7671 PMD_DRV_LOG(DEBUG, sc, "Found MSI capability at 0x%04x", reg); 7672 7673 sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG; 7674 sc->devinfo.pcie_msi_cap_reg = caps->addr; 7675 } 7676 7677 /* check if MSI-X capability is enabled */ 7678 caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP); 7679 if (NULL != caps) { 7680 PMD_DRV_LOG(DEBUG, sc, "Found MSI-X capability at 0x%04x", reg); 7681 7682 sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG; 7683 sc->devinfo.pcie_msix_cap_reg = caps->addr; 7684 } 7685 } 7686 7687 static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc) 7688 { 7689 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7690 uint32_t val; 7691 7692 /* get the outer vlan if we're in switch-dependent mode */ 7693 7694 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7695 mf_info->ext_id = (uint16_t) val; 7696 7697 mf_info->multi_vnics_mode = 1; 7698 7699 if (!VALID_OVLAN(mf_info->ext_id)) { 7700 PMD_DRV_LOG(NOTICE, sc, "Invalid VLAN (%d)", mf_info->ext_id); 7701 return 1; 7702 } 7703 7704 /* get the capabilities */ 7705 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 7706 FUNC_MF_CFG_PROTOCOL_ISCSI) { 7707 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 7708 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) 7709 == FUNC_MF_CFG_PROTOCOL_FCOE) { 7710 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 7711 } else { 7712 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 7713 } 7714 7715 mf_info->vnics_per_port = 7716 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7717 7718 return 0; 7719 } 7720 7721 static uint32_t bnx2x_get_shmem_ext_proto_support_flags(struct bnx2x_softc *sc) 7722 { 7723 uint32_t retval = 0; 7724 uint32_t val; 7725 7726 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 7727 7728 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 7729 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 7730 retval |= MF_PROTO_SUPPORT_ETHERNET; 7731 } 7732 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 7733 retval |= MF_PROTO_SUPPORT_ISCSI; 7734 } 7735 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 7736 retval |= MF_PROTO_SUPPORT_FCOE; 7737 } 7738 } 7739 7740 return retval; 7741 } 7742 7743 static int bnx2x_get_shmem_mf_cfg_info_si(struct bnx2x_softc *sc) 7744 { 7745 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7746 uint32_t val; 7747 7748 /* 7749 * There is no outer vlan if we're in switch-independent mode. 7750 * If the mac is valid then assume multi-function. 7751 */ 7752 7753 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 7754 7755 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 7756 7757 mf_info->mf_protos_supported = 7758 bnx2x_get_shmem_ext_proto_support_flags(sc); 7759 7760 mf_info->vnics_per_port = 7761 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7762 7763 return 0; 7764 } 7765 7766 static int bnx2x_get_shmem_mf_cfg_info_niv(struct bnx2x_softc *sc) 7767 { 7768 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7769 uint32_t e1hov_tag; 7770 uint32_t func_config; 7771 uint32_t niv_config; 7772 7773 mf_info->multi_vnics_mode = 1; 7774 7775 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7776 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 7777 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 7778 7779 mf_info->ext_id = 7780 (uint16_t) ((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 7781 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 7782 7783 mf_info->default_vlan = 7784 (uint16_t) ((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 7785 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 7786 7787 mf_info->niv_allowed_priorities = 7788 (uint8_t) ((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 7789 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 7790 7791 mf_info->niv_default_cos = 7792 (uint8_t) ((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 7793 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 7794 7795 mf_info->afex_vlan_mode = 7796 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 7797 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 7798 7799 mf_info->niv_mba_enabled = 7800 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 7801 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 7802 7803 mf_info->mf_protos_supported = 7804 bnx2x_get_shmem_ext_proto_support_flags(sc); 7805 7806 mf_info->vnics_per_port = 7807 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7808 7809 return 0; 7810 } 7811 7812 static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc) 7813 { 7814 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7815 uint32_t mf_cfg1; 7816 uint32_t mf_cfg2; 7817 uint32_t ovlan1; 7818 uint32_t ovlan2; 7819 uint8_t i, j; 7820 7821 /* various MF mode sanity checks... */ 7822 7823 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 7824 PMD_DRV_LOG(NOTICE, sc, 7825 "Enumerated function %d is marked as hidden", 7826 SC_PORT(sc)); 7827 return 1; 7828 } 7829 7830 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 7831 PMD_DRV_LOG(NOTICE, sc, "vnics_per_port=%d multi_vnics_mode=%d", 7832 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 7833 return 1; 7834 } 7835 7836 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 7837 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 7838 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 7839 PMD_DRV_LOG(NOTICE, sc, "mf_mode=SD vnic_id=%d ovlan=%d", 7840 SC_VN(sc), OVLAN(sc)); 7841 return 1; 7842 } 7843 7844 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 7845 PMD_DRV_LOG(NOTICE, sc, 7846 "mf_mode=SD multi_vnics_mode=%d ovlan=%d", 7847 mf_info->multi_vnics_mode, OVLAN(sc)); 7848 return 1; 7849 } 7850 7851 /* 7852 * Verify all functions are either MF or SF mode. If MF, make sure 7853 * sure that all non-hidden functions have a valid ovlan. If SF, 7854 * make sure that all non-hidden functions have an invalid ovlan. 7855 */ 7856 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 7857 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 7858 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 7859 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 7860 (((mf_info->multi_vnics_mode) 7861 && !VALID_OVLAN(ovlan1)) 7862 || ((!mf_info->multi_vnics_mode) 7863 && VALID_OVLAN(ovlan1)))) { 7864 PMD_DRV_LOG(NOTICE, sc, 7865 "mf_mode=SD function %d MF config " 7866 "mismatch, multi_vnics_mode=%d ovlan=%d", 7867 i, mf_info->multi_vnics_mode, 7868 ovlan1); 7869 return 1; 7870 } 7871 } 7872 7873 /* Verify all funcs on the same port each have a different ovlan. */ 7874 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 7875 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 7876 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 7877 /* iterate from the next function on the port to the max func */ 7878 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 7879 mf_cfg2 = 7880 MFCFG_RD(sc, func_mf_config[j].config); 7881 ovlan2 = 7882 MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 7883 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) 7884 && VALID_OVLAN(ovlan1) 7885 && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) 7886 && VALID_OVLAN(ovlan2) 7887 && (ovlan1 == ovlan2)) { 7888 PMD_DRV_LOG(NOTICE, sc, 7889 "mf_mode=SD functions %d and %d " 7890 "have the same ovlan (%d)", 7891 i, j, ovlan1); 7892 return 1; 7893 } 7894 } 7895 } 7896 } 7897 /* MULTI_FUNCTION_SD */ 7898 return 0; 7899 } 7900 7901 static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc) 7902 { 7903 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7904 uint32_t val, mac_upper; 7905 uint8_t i, vnic; 7906 7907 /* initialize mf_info defaults */ 7908 mf_info->vnics_per_port = 1; 7909 mf_info->multi_vnics_mode = FALSE; 7910 mf_info->path_has_ovlan = FALSE; 7911 mf_info->mf_mode = SINGLE_FUNCTION; 7912 7913 if (!CHIP_IS_MF_CAP(sc)) { 7914 return 0; 7915 } 7916 7917 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 7918 PMD_DRV_LOG(NOTICE, sc, "Invalid mf_cfg_base!"); 7919 return 1; 7920 } 7921 7922 /* get the MF mode (switch dependent / independent / single-function) */ 7923 7924 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 7925 7926 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { 7927 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 7928 7929 mac_upper = 7930 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 7931 7932 /* check for legal upper mac bytes */ 7933 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 7934 mf_info->mf_mode = MULTI_FUNCTION_SI; 7935 } else { 7936 PMD_DRV_LOG(NOTICE, sc, 7937 "Invalid config for Switch Independent mode"); 7938 } 7939 7940 break; 7941 7942 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 7943 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 7944 7945 /* get outer vlan configuration */ 7946 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7947 7948 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 7949 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7950 mf_info->mf_mode = MULTI_FUNCTION_SD; 7951 } else { 7952 PMD_DRV_LOG(NOTICE, sc, 7953 "Invalid config for Switch Dependent mode"); 7954 } 7955 7956 break; 7957 7958 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 7959 7960 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 7961 return 0; 7962 7963 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 7964 7965 /* 7966 * Mark MF mode as NIV if MCP version includes NPAR-SD support 7967 * and the MAC address is valid. 7968 */ 7969 mac_upper = 7970 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 7971 7972 if ((SHMEM2_HAS(sc, afex_driver_support)) && 7973 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 7974 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 7975 } else { 7976 PMD_DRV_LOG(NOTICE, sc, "Invalid config for AFEX mode"); 7977 } 7978 7979 break; 7980 7981 default: 7982 7983 PMD_DRV_LOG(NOTICE, sc, "Unknown MF mode (0x%08x)", 7984 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 7985 7986 return 1; 7987 } 7988 7989 /* set path mf_mode (which could be different than function mf_mode) */ 7990 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 7991 mf_info->path_has_ovlan = TRUE; 7992 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 7993 /* 7994 * Decide on path multi vnics mode. If we're not in MF mode and in 7995 * 4-port mode, this is good enough to check vnic-0 of the other port 7996 * on the same path 7997 */ 7998 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 7999 uint8_t other_port = !(PORT_ID(sc) & 1); 8000 uint8_t abs_func_other_port = 8001 (SC_PATH(sc) + (2 * other_port)); 8002 8003 val = 8004 MFCFG_RD(sc, 8005 func_mf_config 8006 [abs_func_other_port].e1hov_tag); 8007 8008 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t) val); 8009 } 8010 } 8011 8012 if (mf_info->mf_mode == SINGLE_FUNCTION) { 8013 /* invalid MF config */ 8014 if (SC_VN(sc) >= 1) { 8015 PMD_DRV_LOG(NOTICE, sc, "VNIC ID >= 1 in SF mode"); 8016 return 1; 8017 } 8018 8019 return 0; 8020 } 8021 8022 /* get the MF configuration */ 8023 mf_info->mf_config[SC_VN(sc)] = 8024 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8025 8026 switch (mf_info->mf_mode) { 8027 case MULTI_FUNCTION_SD: 8028 8029 bnx2x_get_shmem_mf_cfg_info_sd(sc); 8030 break; 8031 8032 case MULTI_FUNCTION_SI: 8033 8034 bnx2x_get_shmem_mf_cfg_info_si(sc); 8035 break; 8036 8037 case MULTI_FUNCTION_AFEX: 8038 8039 bnx2x_get_shmem_mf_cfg_info_niv(sc); 8040 break; 8041 8042 default: 8043 8044 PMD_DRV_LOG(NOTICE, sc, "Get MF config failed (mf_mode=0x%08x)", 8045 mf_info->mf_mode); 8046 return 1; 8047 } 8048 8049 /* get the congestion management parameters */ 8050 8051 vnic = 0; 8052 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 8053 /* get min/max bw */ 8054 val = MFCFG_RD(sc, func_mf_config[i].config); 8055 mf_info->min_bw[vnic] = 8056 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> 8057 FUNC_MF_CFG_MIN_BW_SHIFT); 8058 mf_info->max_bw[vnic] = 8059 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> 8060 FUNC_MF_CFG_MAX_BW_SHIFT); 8061 vnic++; 8062 } 8063 8064 return bnx2x_check_valid_mf_cfg(sc); 8065 } 8066 8067 static int bnx2x_get_shmem_info(struct bnx2x_softc *sc) 8068 { 8069 int port; 8070 uint32_t mac_hi, mac_lo, val; 8071 8072 PMD_INIT_FUNC_TRACE(sc); 8073 8074 port = SC_PORT(sc); 8075 mac_hi = mac_lo = 0; 8076 8077 sc->link_params.sc = sc; 8078 sc->link_params.port = port; 8079 8080 /* get the hardware config info */ 8081 sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); 8082 sc->devinfo.hw_config2 = 8083 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 8084 8085 sc->link_params.hw_led_mode = 8086 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 8087 SHARED_HW_CFG_LED_MODE_SHIFT); 8088 8089 /* get the port feature config */ 8090 sc->port.config = 8091 SHMEM_RD(sc, dev_info.port_feature_config[port].config); 8092 8093 /* get the link params */ 8094 sc->link_params.speed_cap_mask[ELINK_INT_PHY] = 8095 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask) 8096 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 8097 sc->link_params.speed_cap_mask[ELINK_EXT_PHY1] = 8098 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2) 8099 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 8100 8101 /* get the lane config */ 8102 sc->link_params.lane_config = 8103 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 8104 8105 /* get the link config */ 8106 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 8107 sc->port.link_config[ELINK_INT_PHY] = val; 8108 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 8109 sc->port.link_config[ELINK_EXT_PHY1] = 8110 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 8111 8112 /* get the override preemphasis flag and enable it or turn it off */ 8113 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 8114 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 8115 sc->link_params.feature_config_flags |= 8116 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 8117 } else { 8118 sc->link_params.feature_config_flags &= 8119 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 8120 } 8121 8122 val = sc->devinfo.bc_ver >> 8; 8123 if (val < BNX2X_BC_VER) { 8124 /* for now only warn later we might need to enforce this */ 8125 PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC\n", 8126 BNX2X_BC_VER, val); 8127 } 8128 sc->link_params.feature_config_flags |= 8129 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 8130 ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 8131 0; 8132 8133 sc->link_params.feature_config_flags |= 8134 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 8135 ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 8136 sc->link_params.feature_config_flags |= 8137 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 8138 ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 8139 sc->link_params.feature_config_flags |= 8140 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 8141 ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 8142 8143 /* get the initial value of the link params */ 8144 sc->link_params.multi_phy_config = 8145 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 8146 8147 /* get external phy info */ 8148 sc->port.ext_phy_config = 8149 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 8150 8151 /* get the multifunction configuration */ 8152 bnx2x_get_mf_cfg_info(sc); 8153 8154 /* get the mac address */ 8155 if (IS_MF(sc)) { 8156 mac_hi = 8157 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 8158 mac_lo = 8159 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 8160 } else { 8161 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 8162 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 8163 } 8164 8165 if ((mac_lo == 0) && (mac_hi == 0)) { 8166 *sc->mac_addr_str = 0; 8167 PMD_DRV_LOG(NOTICE, sc, "No Ethernet address programmed!"); 8168 } else { 8169 sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8); 8170 sc->link_params.mac_addr[1] = (uint8_t) (mac_hi); 8171 sc->link_params.mac_addr[2] = (uint8_t) (mac_lo >> 24); 8172 sc->link_params.mac_addr[3] = (uint8_t) (mac_lo >> 16); 8173 sc->link_params.mac_addr[4] = (uint8_t) (mac_lo >> 8); 8174 sc->link_params.mac_addr[5] = (uint8_t) (mac_lo); 8175 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 8176 "%02x:%02x:%02x:%02x:%02x:%02x", 8177 sc->link_params.mac_addr[0], 8178 sc->link_params.mac_addr[1], 8179 sc->link_params.mac_addr[2], 8180 sc->link_params.mac_addr[3], 8181 sc->link_params.mac_addr[4], 8182 sc->link_params.mac_addr[5]); 8183 PMD_DRV_LOG(DEBUG, sc, 8184 "Ethernet address: %s", sc->mac_addr_str); 8185 } 8186 8187 return 0; 8188 } 8189 8190 static void bnx2x_media_detect(struct bnx2x_softc *sc) 8191 { 8192 uint32_t phy_idx = bnx2x_get_cur_phy_idx(sc); 8193 switch (sc->link_params.phy[phy_idx].media_type) { 8194 case ELINK_ETH_PHY_SFPP_10G_FIBER: 8195 case ELINK_ETH_PHY_SFP_1G_FIBER: 8196 case ELINK_ETH_PHY_XFP_FIBER: 8197 case ELINK_ETH_PHY_KR: 8198 case ELINK_ETH_PHY_CX4: 8199 PMD_DRV_LOG(INFO, sc, "Found 10GBase-CX4 media."); 8200 sc->media = IFM_10G_CX4; 8201 break; 8202 case ELINK_ETH_PHY_DA_TWINAX: 8203 PMD_DRV_LOG(INFO, sc, "Found 10Gb Twinax media."); 8204 sc->media = IFM_10G_TWINAX; 8205 break; 8206 case ELINK_ETH_PHY_BASE_T: 8207 PMD_DRV_LOG(INFO, sc, "Found 10GBase-T media."); 8208 sc->media = IFM_10G_T; 8209 break; 8210 case ELINK_ETH_PHY_NOT_PRESENT: 8211 PMD_DRV_LOG(INFO, sc, "Media not present."); 8212 sc->media = 0; 8213 break; 8214 case ELINK_ETH_PHY_UNSPECIFIED: 8215 default: 8216 PMD_DRV_LOG(INFO, sc, "Unknown media!"); 8217 sc->media = 0; 8218 break; 8219 } 8220 } 8221 8222 #define GET_FIELD(value, fname) \ 8223 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 8224 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 8225 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 8226 8227 static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc) 8228 { 8229 int pfid = SC_FUNC(sc); 8230 int igu_sb_id; 8231 uint32_t val; 8232 uint8_t fid, igu_sb_cnt = 0; 8233 8234 sc->igu_base_sb = 0xff; 8235 8236 if (CHIP_INT_MODE_IS_BC(sc)) { 8237 int vn = SC_VN(sc); 8238 igu_sb_cnt = sc->igu_sb_cnt; 8239 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 8240 FP_SB_MAX_E1x); 8241 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 8242 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 8243 return 0; 8244 } 8245 8246 /* IGU in normal mode - read CAM */ 8247 for (igu_sb_id = 0; 8248 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { 8249 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 8250 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 8251 continue; 8252 } 8253 fid = IGU_FID(val); 8254 if (fid & IGU_FID_ENCODE_IS_PF) { 8255 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 8256 continue; 8257 } 8258 if (IGU_VEC(val) == 0) { 8259 /* default status block */ 8260 sc->igu_dsb_id = igu_sb_id; 8261 } else { 8262 if (sc->igu_base_sb == 0xff) { 8263 sc->igu_base_sb = igu_sb_id; 8264 } 8265 igu_sb_cnt++; 8266 } 8267 } 8268 } 8269 8270 /* 8271 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 8272 * that number of CAM entries will not be equal to the value advertised in 8273 * PCI. Driver should use the minimal value of both as the actual status 8274 * block count 8275 */ 8276 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 8277 8278 if (igu_sb_cnt == 0) { 8279 PMD_DRV_LOG(ERR, sc, "CAM configuration error"); 8280 return -1; 8281 } 8282 8283 return 0; 8284 } 8285 8286 /* 8287 * Gather various information from the device config space, the device itself, 8288 * shmem, and the user input. 8289 */ 8290 static int bnx2x_get_device_info(struct bnx2x_softc *sc) 8291 { 8292 uint32_t val; 8293 int rc; 8294 8295 /* get the chip revision (chip metal comes from pci config space) */ 8296 sc->devinfo.chip_id = sc->link_params.chip_id = 8297 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 8298 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 8299 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 8300 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 8301 8302 /* force 57811 according to MISC register */ 8303 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 8304 if (CHIP_IS_57810(sc)) { 8305 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 8306 (sc-> 8307 devinfo.chip_id & 0x0000ffff)); 8308 } else if (CHIP_IS_57810_MF(sc)) { 8309 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 8310 (sc-> 8311 devinfo.chip_id & 0x0000ffff)); 8312 } 8313 sc->devinfo.chip_id |= 0x1; 8314 } 8315 8316 PMD_DRV_LOG(DEBUG, sc, 8317 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)", 8318 sc->devinfo.chip_id, 8319 ((sc->devinfo.chip_id >> 16) & 0xffff), 8320 ((sc->devinfo.chip_id >> 12) & 0xf), 8321 ((sc->devinfo.chip_id >> 4) & 0xff), 8322 ((sc->devinfo.chip_id >> 0) & 0xf)); 8323 8324 val = (REG_RD(sc, 0x2874) & 0x55); 8325 if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) { 8326 sc->flags |= BNX2X_ONE_PORT_FLAG; 8327 PMD_DRV_LOG(DEBUG, sc, "single port device"); 8328 } 8329 8330 /* set the doorbell size */ 8331 sc->doorbell_size = (1 << BNX2X_DB_SHIFT); 8332 8333 /* determine whether the device is in 2 port or 4 port mode */ 8334 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1h */ 8335 if (CHIP_IS_E2E3(sc)) { 8336 /* 8337 * Read port4mode_en_ovwr[0]: 8338 * If 1, four port mode is in port4mode_en_ovwr[1]. 8339 * If 0, four port mode is in port4mode_en[0]. 8340 */ 8341 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 8342 if (val & 1) { 8343 val = ((val >> 1) & 1); 8344 } else { 8345 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 8346 } 8347 8348 sc->devinfo.chip_port_mode = 8349 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 8350 8351 PMD_DRV_LOG(DEBUG, sc, "Port mode = %s", (val) ? "4" : "2"); 8352 } 8353 8354 /* get the function and path info for the device */ 8355 bnx2x_get_function_num(sc); 8356 8357 /* get the shared memory base address */ 8358 sc->devinfo.shmem_base = 8359 sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 8360 sc->devinfo.shmem2_base = 8361 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 8362 MISC_REG_GENERIC_CR_0)); 8363 8364 if (!sc->devinfo.shmem_base) { 8365 /* this should ONLY prevent upcoming shmem reads */ 8366 PMD_DRV_LOG(INFO, sc, "MCP not active"); 8367 sc->flags |= BNX2X_NO_MCP_FLAG; 8368 return 0; 8369 } 8370 8371 /* make sure the shared memory contents are valid */ 8372 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 8373 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 8374 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 8375 PMD_DRV_LOG(NOTICE, sc, "Invalid SHMEM validity signature: 0x%08x", 8376 val); 8377 return 0; 8378 } 8379 8380 /* get the bootcode version */ 8381 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 8382 snprintf(sc->devinfo.bc_ver_str, 8383 sizeof(sc->devinfo.bc_ver_str), 8384 "%d.%d.%d", 8385 ((sc->devinfo.bc_ver >> 24) & 0xff), 8386 ((sc->devinfo.bc_ver >> 16) & 0xff), 8387 ((sc->devinfo.bc_ver >> 8) & 0xff)); 8388 PMD_DRV_LOG(DEBUG, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str); 8389 8390 /* get the bootcode shmem address */ 8391 sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc); 8392 8393 /* clean indirect addresses as they're not used */ 8394 pci_write_long(sc, PCICFG_GRC_ADDRESS, 0); 8395 if (IS_PF(sc)) { 8396 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 8397 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 8398 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 8399 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 8400 if (CHIP_IS_E1x(sc)) { 8401 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 8402 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 8403 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 8404 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 8405 } 8406 } 8407 8408 /* get the nvram size */ 8409 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 8410 sc->devinfo.flash_size = 8411 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 8412 8413 bnx2x_set_power_state(sc, PCI_PM_D0); 8414 /* get various configuration parameters from shmem */ 8415 bnx2x_get_shmem_info(sc); 8416 8417 /* initialize IGU parameters */ 8418 if (CHIP_IS_E1x(sc)) { 8419 sc->devinfo.int_block = INT_BLOCK_HC; 8420 sc->igu_dsb_id = DEF_SB_IGU_ID; 8421 sc->igu_base_sb = 0; 8422 } else { 8423 sc->devinfo.int_block = INT_BLOCK_IGU; 8424 8425 /* do not allow device reset during IGU info preocessing */ 8426 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8427 8428 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 8429 8430 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 8431 int tout = 5000; 8432 8433 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 8434 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 8435 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 8436 8437 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 8438 tout--; 8439 DELAY(1000); 8440 } 8441 8442 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 8443 PMD_DRV_LOG(NOTICE, sc, 8444 "FORCING IGU Normal Mode failed!!!"); 8445 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8446 return -1; 8447 } 8448 } 8449 8450 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 8451 PMD_DRV_LOG(DEBUG, sc, "IGU Backward Compatible Mode"); 8452 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 8453 } else { 8454 PMD_DRV_LOG(DEBUG, sc, "IGU Normal Mode"); 8455 } 8456 8457 rc = bnx2x_get_igu_cam_info(sc); 8458 8459 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8460 8461 if (rc) { 8462 return rc; 8463 } 8464 } 8465 8466 /* 8467 * Get base FW non-default (fast path) status block ID. This value is 8468 * used to initialize the fw_sb_id saved on the fp/queue structure to 8469 * determine the id used by the FW. 8470 */ 8471 if (CHIP_IS_E1x(sc)) { 8472 sc->base_fw_ndsb = 8473 ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 8474 } else { 8475 /* 8476 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 8477 * the same queue are indicated on the same IGU SB). So we prefer 8478 * FW and IGU SBs to be the same value. 8479 */ 8480 sc->base_fw_ndsb = sc->igu_base_sb; 8481 } 8482 8483 elink_phy_probe(&sc->link_params); 8484 8485 return 0; 8486 } 8487 8488 static void 8489 bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg) 8490 { 8491 uint32_t cfg_size = 0; 8492 uint32_t idx; 8493 uint8_t port = SC_PORT(sc); 8494 8495 /* aggregation of supported attributes of all external phys */ 8496 sc->port.supported[0] = 0; 8497 sc->port.supported[1] = 0; 8498 8499 switch (sc->link_params.num_phys) { 8500 case 1: 8501 sc->port.supported[0] = 8502 sc->link_params.phy[ELINK_INT_PHY].supported; 8503 cfg_size = 1; 8504 break; 8505 case 2: 8506 sc->port.supported[0] = 8507 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8508 cfg_size = 1; 8509 break; 8510 case 3: 8511 if (sc->link_params.multi_phy_config & 8512 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 8513 sc->port.supported[1] = 8514 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8515 sc->port.supported[0] = 8516 sc->link_params.phy[ELINK_EXT_PHY2].supported; 8517 } else { 8518 sc->port.supported[0] = 8519 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8520 sc->port.supported[1] = 8521 sc->link_params.phy[ELINK_EXT_PHY2].supported; 8522 } 8523 cfg_size = 2; 8524 break; 8525 } 8526 8527 if (!(sc->port.supported[0] || sc->port.supported[1])) { 8528 PMD_DRV_LOG(ERR, sc, 8529 "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)", 8530 SHMEM_RD(sc, 8531 dev_info.port_hw_config 8532 [port].external_phy_config), 8533 SHMEM_RD(sc, 8534 dev_info.port_hw_config 8535 [port].external_phy_config2)); 8536 return; 8537 } 8538 8539 if (CHIP_IS_E3(sc)) 8540 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 8541 else { 8542 switch (switch_cfg) { 8543 case ELINK_SWITCH_CFG_1G: 8544 sc->port.phy_addr = 8545 REG_RD(sc, 8546 NIG_REG_SERDES0_CTRL_PHY_ADDR + port * 0x10); 8547 break; 8548 case ELINK_SWITCH_CFG_10G: 8549 sc->port.phy_addr = 8550 REG_RD(sc, 8551 NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18); 8552 break; 8553 default: 8554 PMD_DRV_LOG(ERR, sc, 8555 "Invalid switch config in" 8556 "link_config=0x%08x", 8557 sc->port.link_config[0]); 8558 return; 8559 } 8560 } 8561 8562 PMD_DRV_LOG(INFO, sc, "PHY addr 0x%08x", sc->port.phy_addr); 8563 8564 /* mask what we support according to speed_cap_mask per configuration */ 8565 for (idx = 0; idx < cfg_size; idx++) { 8566 if (!(sc->link_params.speed_cap_mask[idx] & 8567 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 8568 sc->port.supported[idx] &= 8569 ~ELINK_SUPPORTED_10baseT_Half; 8570 } 8571 8572 if (!(sc->link_params.speed_cap_mask[idx] & 8573 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 8574 sc->port.supported[idx] &= 8575 ~ELINK_SUPPORTED_10baseT_Full; 8576 } 8577 8578 if (!(sc->link_params.speed_cap_mask[idx] & 8579 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 8580 sc->port.supported[idx] &= 8581 ~ELINK_SUPPORTED_100baseT_Half; 8582 } 8583 8584 if (!(sc->link_params.speed_cap_mask[idx] & 8585 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 8586 sc->port.supported[idx] &= 8587 ~ELINK_SUPPORTED_100baseT_Full; 8588 } 8589 8590 if (!(sc->link_params.speed_cap_mask[idx] & 8591 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 8592 sc->port.supported[idx] &= 8593 ~ELINK_SUPPORTED_1000baseT_Full; 8594 } 8595 8596 if (!(sc->link_params.speed_cap_mask[idx] & 8597 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 8598 sc->port.supported[idx] &= 8599 ~ELINK_SUPPORTED_2500baseX_Full; 8600 } 8601 8602 if (!(sc->link_params.speed_cap_mask[idx] & 8603 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 8604 sc->port.supported[idx] &= 8605 ~ELINK_SUPPORTED_10000baseT_Full; 8606 } 8607 8608 if (!(sc->link_params.speed_cap_mask[idx] & 8609 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 8610 sc->port.supported[idx] &= 8611 ~ELINK_SUPPORTED_20000baseKR2_Full; 8612 } 8613 } 8614 8615 PMD_DRV_LOG(INFO, sc, "PHY supported 0=0x%08x 1=0x%08x", 8616 sc->port.supported[0], sc->port.supported[1]); 8617 } 8618 8619 static void bnx2x_link_settings_requested(struct bnx2x_softc *sc) 8620 { 8621 uint32_t link_config; 8622 uint32_t idx; 8623 uint32_t cfg_size = 0; 8624 8625 sc->port.advertising[0] = 0; 8626 sc->port.advertising[1] = 0; 8627 8628 switch (sc->link_params.num_phys) { 8629 case 1: 8630 case 2: 8631 cfg_size = 1; 8632 break; 8633 case 3: 8634 cfg_size = 2; 8635 break; 8636 } 8637 8638 for (idx = 0; idx < cfg_size; idx++) { 8639 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 8640 link_config = sc->port.link_config[idx]; 8641 8642 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 8643 case PORT_FEATURE_LINK_SPEED_AUTO: 8644 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 8645 sc->link_params.req_line_speed[idx] = 8646 ELINK_SPEED_AUTO_NEG; 8647 sc->port.advertising[idx] |= 8648 sc->port.supported[idx]; 8649 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 8650 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) 8651 sc->port.advertising[idx] |= 8652 (ELINK_SUPPORTED_100baseT_Half | 8653 ELINK_SUPPORTED_100baseT_Full); 8654 } else { 8655 /* force 10G, no AN */ 8656 sc->link_params.req_line_speed[idx] = 8657 ELINK_SPEED_10000; 8658 sc->port.advertising[idx] |= 8659 (ADVERTISED_10000baseT_Full | 8660 ADVERTISED_FIBRE); 8661 continue; 8662 } 8663 break; 8664 8665 case PORT_FEATURE_LINK_SPEED_10M_FULL: 8666 if (sc-> 8667 port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) 8668 { 8669 sc->link_params.req_line_speed[idx] = 8670 ELINK_SPEED_10; 8671 sc->port.advertising[idx] |= 8672 (ADVERTISED_10baseT_Full | ADVERTISED_TP); 8673 } else { 8674 PMD_DRV_LOG(ERR, sc, 8675 "Invalid NVRAM config link_config=0x%08x " 8676 "speed_cap_mask=0x%08x", 8677 link_config, 8678 sc-> 8679 link_params.speed_cap_mask[idx]); 8680 return; 8681 } 8682 break; 8683 8684 case PORT_FEATURE_LINK_SPEED_10M_HALF: 8685 if (sc-> 8686 port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) 8687 { 8688 sc->link_params.req_line_speed[idx] = 8689 ELINK_SPEED_10; 8690 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 8691 sc->port.advertising[idx] |= 8692 (ADVERTISED_10baseT_Half | ADVERTISED_TP); 8693 } else { 8694 PMD_DRV_LOG(ERR, sc, 8695 "Invalid NVRAM config link_config=0x%08x " 8696 "speed_cap_mask=0x%08x", 8697 link_config, 8698 sc-> 8699 link_params.speed_cap_mask[idx]); 8700 return; 8701 } 8702 break; 8703 8704 case PORT_FEATURE_LINK_SPEED_100M_FULL: 8705 if (sc-> 8706 port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) 8707 { 8708 sc->link_params.req_line_speed[idx] = 8709 ELINK_SPEED_100; 8710 sc->port.advertising[idx] |= 8711 (ADVERTISED_100baseT_Full | ADVERTISED_TP); 8712 } else { 8713 PMD_DRV_LOG(ERR, sc, 8714 "Invalid NVRAM config link_config=0x%08x " 8715 "speed_cap_mask=0x%08x", 8716 link_config, 8717 sc-> 8718 link_params.speed_cap_mask[idx]); 8719 return; 8720 } 8721 break; 8722 8723 case PORT_FEATURE_LINK_SPEED_100M_HALF: 8724 if (sc-> 8725 port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) 8726 { 8727 sc->link_params.req_line_speed[idx] = 8728 ELINK_SPEED_100; 8729 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 8730 sc->port.advertising[idx] |= 8731 (ADVERTISED_100baseT_Half | ADVERTISED_TP); 8732 } else { 8733 PMD_DRV_LOG(ERR, sc, 8734 "Invalid NVRAM config link_config=0x%08x " 8735 "speed_cap_mask=0x%08x", 8736 link_config, 8737 sc-> 8738 link_params.speed_cap_mask[idx]); 8739 return; 8740 } 8741 break; 8742 8743 case PORT_FEATURE_LINK_SPEED_1G: 8744 if (sc->port.supported[idx] & 8745 ELINK_SUPPORTED_1000baseT_Full) { 8746 sc->link_params.req_line_speed[idx] = 8747 ELINK_SPEED_1000; 8748 sc->port.advertising[idx] |= 8749 (ADVERTISED_1000baseT_Full | ADVERTISED_TP); 8750 } else { 8751 PMD_DRV_LOG(ERR, sc, 8752 "Invalid NVRAM config link_config=0x%08x " 8753 "speed_cap_mask=0x%08x", 8754 link_config, 8755 sc-> 8756 link_params.speed_cap_mask[idx]); 8757 return; 8758 } 8759 break; 8760 8761 case PORT_FEATURE_LINK_SPEED_2_5G: 8762 if (sc->port.supported[idx] & 8763 ELINK_SUPPORTED_2500baseX_Full) { 8764 sc->link_params.req_line_speed[idx] = 8765 ELINK_SPEED_2500; 8766 sc->port.advertising[idx] |= 8767 (ADVERTISED_2500baseX_Full | ADVERTISED_TP); 8768 } else { 8769 PMD_DRV_LOG(ERR, sc, 8770 "Invalid NVRAM config link_config=0x%08x " 8771 "speed_cap_mask=0x%08x", 8772 link_config, 8773 sc-> 8774 link_params.speed_cap_mask[idx]); 8775 return; 8776 } 8777 break; 8778 8779 case PORT_FEATURE_LINK_SPEED_10G_CX4: 8780 if (sc->port.supported[idx] & 8781 ELINK_SUPPORTED_10000baseT_Full) { 8782 sc->link_params.req_line_speed[idx] = 8783 ELINK_SPEED_10000; 8784 sc->port.advertising[idx] |= 8785 (ADVERTISED_10000baseT_Full | 8786 ADVERTISED_FIBRE); 8787 } else { 8788 PMD_DRV_LOG(ERR, sc, 8789 "Invalid NVRAM config link_config=0x%08x " 8790 "speed_cap_mask=0x%08x", 8791 link_config, 8792 sc-> 8793 link_params.speed_cap_mask[idx]); 8794 return; 8795 } 8796 break; 8797 8798 case PORT_FEATURE_LINK_SPEED_20G: 8799 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 8800 break; 8801 8802 default: 8803 PMD_DRV_LOG(ERR, sc, 8804 "Invalid NVRAM config link_config=0x%08x " 8805 "speed_cap_mask=0x%08x", link_config, 8806 sc->link_params.speed_cap_mask[idx]); 8807 sc->link_params.req_line_speed[idx] = 8808 ELINK_SPEED_AUTO_NEG; 8809 sc->port.advertising[idx] = sc->port.supported[idx]; 8810 break; 8811 } 8812 8813 sc->link_params.req_flow_ctrl[idx] = 8814 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 8815 8816 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 8817 if (! 8818 (sc-> 8819 port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 8820 sc->link_params.req_flow_ctrl[idx] = 8821 ELINK_FLOW_CTRL_NONE; 8822 } else { 8823 bnx2x_set_requested_fc(sc); 8824 } 8825 } 8826 } 8827 } 8828 8829 static void bnx2x_get_phy_info(struct bnx2x_softc *sc) 8830 { 8831 uint8_t port = SC_PORT(sc); 8832 uint32_t eee_mode; 8833 8834 PMD_INIT_FUNC_TRACE(sc); 8835 8836 /* shmem data already read in bnx2x_get_shmem_info() */ 8837 8838 bnx2x_link_settings_supported(sc, sc->link_params.switch_cfg); 8839 bnx2x_link_settings_requested(sc); 8840 8841 /* configure link feature according to nvram value */ 8842 eee_mode = 8843 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) 8844 & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 8845 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 8846 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 8847 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 8848 ELINK_EEE_MODE_ENABLE_LPI | 8849 ELINK_EEE_MODE_OUTPUT_TIME); 8850 } else { 8851 sc->link_params.eee_mode = 0; 8852 } 8853 8854 /* get the media type */ 8855 bnx2x_media_detect(sc); 8856 } 8857 8858 static void bnx2x_set_modes_bitmap(struct bnx2x_softc *sc) 8859 { 8860 uint32_t flags = MODE_ASIC | MODE_PORT2; 8861 8862 if (CHIP_IS_E2(sc)) { 8863 flags |= MODE_E2; 8864 } else if (CHIP_IS_E3(sc)) { 8865 flags |= MODE_E3; 8866 if (CHIP_REV(sc) == CHIP_REV_Ax) { 8867 flags |= MODE_E3_A0; 8868 } else { /*if (CHIP_REV(sc) == CHIP_REV_Bx) */ 8869 8870 flags |= MODE_E3_B0 | MODE_COS3; 8871 } 8872 } 8873 8874 if (IS_MF(sc)) { 8875 flags |= MODE_MF; 8876 switch (sc->devinfo.mf_info.mf_mode) { 8877 case MULTI_FUNCTION_SD: 8878 flags |= MODE_MF_SD; 8879 break; 8880 case MULTI_FUNCTION_SI: 8881 flags |= MODE_MF_SI; 8882 break; 8883 case MULTI_FUNCTION_AFEX: 8884 flags |= MODE_MF_AFEX; 8885 break; 8886 } 8887 } else { 8888 flags |= MODE_SF; 8889 } 8890 8891 #if defined(__LITTLE_ENDIAN) 8892 flags |= MODE_LITTLE_ENDIAN; 8893 #else /* __BIG_ENDIAN */ 8894 flags |= MODE_BIG_ENDIAN; 8895 #endif 8896 8897 INIT_MODE_FLAGS(sc) = flags; 8898 } 8899 8900 int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) 8901 { 8902 struct bnx2x_fastpath *fp; 8903 char buf[32]; 8904 uint32_t i; 8905 8906 if (IS_PF(sc)) { 8907 /************************/ 8908 /* DEFAULT STATUS BLOCK */ 8909 /************************/ 8910 8911 if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block), 8912 &sc->def_sb_dma, "def_sb", 8913 RTE_CACHE_LINE_SIZE) != 0) { 8914 return -1; 8915 } 8916 8917 sc->def_sb = 8918 (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 8919 /***************/ 8920 /* EVENT QUEUE */ 8921 /***************/ 8922 8923 if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, 8924 &sc->eq_dma, "ev_queue", 8925 RTE_CACHE_LINE_SIZE) != 0) { 8926 sc->def_sb = NULL; 8927 return -1; 8928 } 8929 8930 sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr; 8931 8932 /*************/ 8933 /* SLOW PATH */ 8934 /*************/ 8935 8936 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath), 8937 &sc->sp_dma, "sp", 8938 RTE_CACHE_LINE_SIZE) != 0) { 8939 sc->eq = NULL; 8940 sc->def_sb = NULL; 8941 return -1; 8942 } 8943 8944 sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr; 8945 8946 /*******************/ 8947 /* SLOW PATH QUEUE */ 8948 /*******************/ 8949 8950 if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, 8951 &sc->spq_dma, "sp_queue", 8952 RTE_CACHE_LINE_SIZE) != 0) { 8953 sc->sp = NULL; 8954 sc->eq = NULL; 8955 sc->def_sb = NULL; 8956 return -1; 8957 } 8958 8959 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 8960 8961 /***************************/ 8962 /* FW DECOMPRESSION BUFFER */ 8963 /***************************/ 8964 8965 if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 8966 "fw_buf", RTE_CACHE_LINE_SIZE) != 0) { 8967 sc->spq = NULL; 8968 sc->sp = NULL; 8969 sc->eq = NULL; 8970 sc->def_sb = NULL; 8971 return -1; 8972 } 8973 8974 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 8975 } 8976 8977 /*************/ 8978 /* FASTPATHS */ 8979 /*************/ 8980 8981 /* allocate DMA memory for each fastpath structure */ 8982 for (i = 0; i < sc->num_queues; i++) { 8983 fp = &sc->fp[i]; 8984 fp->sc = sc; 8985 fp->index = i; 8986 8987 /*******************/ 8988 /* FP STATUS BLOCK */ 8989 /*******************/ 8990 8991 snprintf(buf, sizeof(buf), "fp_%d_sb", i); 8992 if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block), 8993 &fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) { 8994 PMD_DRV_LOG(NOTICE, sc, "Failed to alloc %s", buf); 8995 return -1; 8996 } else { 8997 if (CHIP_IS_E2E3(sc)) { 8998 fp->status_block.e2_sb = 8999 (struct host_hc_status_block_e2 *) 9000 fp->sb_dma.vaddr; 9001 } else { 9002 fp->status_block.e1x_sb = 9003 (struct host_hc_status_block_e1x *) 9004 fp->sb_dma.vaddr; 9005 } 9006 } 9007 } 9008 9009 return 0; 9010 } 9011 9012 void bnx2x_free_hsi_mem(struct bnx2x_softc *sc) 9013 { 9014 struct bnx2x_fastpath *fp; 9015 int i; 9016 9017 for (i = 0; i < sc->num_queues; i++) { 9018 fp = &sc->fp[i]; 9019 9020 /*******************/ 9021 /* FP STATUS BLOCK */ 9022 /*******************/ 9023 9024 memset(&fp->status_block, 0, sizeof(fp->status_block)); 9025 bnx2x_dma_free(&fp->sb_dma); 9026 } 9027 9028 if (IS_PF(sc)) { 9029 /***************************/ 9030 /* FW DECOMPRESSION BUFFER */ 9031 /***************************/ 9032 9033 bnx2x_dma_free(&sc->gz_buf_dma); 9034 sc->gz_buf = NULL; 9035 9036 /*******************/ 9037 /* SLOW PATH QUEUE */ 9038 /*******************/ 9039 9040 bnx2x_dma_free(&sc->spq_dma); 9041 sc->spq = NULL; 9042 9043 /*************/ 9044 /* SLOW PATH */ 9045 /*************/ 9046 9047 bnx2x_dma_free(&sc->sp_dma); 9048 sc->sp = NULL; 9049 9050 /***************/ 9051 /* EVENT QUEUE */ 9052 /***************/ 9053 9054 bnx2x_dma_free(&sc->eq_dma); 9055 sc->eq = NULL; 9056 9057 /************************/ 9058 /* DEFAULT STATUS BLOCK */ 9059 /************************/ 9060 9061 bnx2x_dma_free(&sc->def_sb_dma); 9062 sc->def_sb = NULL; 9063 } 9064 } 9065 9066 /* 9067 * Previous driver DMAE transaction may have occurred when pre-boot stage 9068 * ended and boot began. This would invalidate the addresses of the 9069 * transaction, resulting in was-error bit set in the PCI causing all 9070 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 9071 * the interrupt which detected this from the pglueb and the was-done bit 9072 */ 9073 static void bnx2x_prev_interrupted_dmae(struct bnx2x_softc *sc) 9074 { 9075 uint32_t val; 9076 9077 if (!CHIP_IS_E1x(sc)) { 9078 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 9079 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9080 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 9081 1 << SC_FUNC(sc)); 9082 } 9083 } 9084 } 9085 9086 static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc) 9087 { 9088 uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 9089 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 9090 if (!rc) { 9091 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 9092 return -1; 9093 } 9094 9095 return 0; 9096 } 9097 9098 static struct bnx2x_prev_list_node *bnx2x_prev_path_get_entry(struct bnx2x_softc *sc) 9099 { 9100 struct bnx2x_prev_list_node *tmp; 9101 9102 LIST_FOREACH(tmp, &bnx2x_prev_list, node) { 9103 if ((sc->pcie_bus == tmp->bus) && 9104 (sc->pcie_device == tmp->slot) && 9105 (SC_PATH(sc) == tmp->path)) { 9106 return tmp; 9107 } 9108 } 9109 9110 return NULL; 9111 } 9112 9113 static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc) 9114 { 9115 struct bnx2x_prev_list_node *tmp; 9116 int rc = FALSE; 9117 9118 rte_spinlock_lock(&bnx2x_prev_mtx); 9119 9120 tmp = bnx2x_prev_path_get_entry(sc); 9121 if (tmp) { 9122 if (tmp->aer) { 9123 PMD_DRV_LOG(DEBUG, sc, 9124 "Path %d/%d/%d was marked by AER", 9125 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9126 } else { 9127 rc = TRUE; 9128 PMD_DRV_LOG(DEBUG, sc, 9129 "Path %d/%d/%d was already cleaned from previous drivers", 9130 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9131 } 9132 } 9133 9134 rte_spinlock_unlock(&bnx2x_prev_mtx); 9135 9136 return rc; 9137 } 9138 9139 static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi) 9140 { 9141 struct bnx2x_prev_list_node *tmp; 9142 9143 rte_spinlock_lock(&bnx2x_prev_mtx); 9144 9145 /* Check whether the entry for this path already exists */ 9146 tmp = bnx2x_prev_path_get_entry(sc); 9147 if (tmp) { 9148 if (!tmp->aer) { 9149 PMD_DRV_LOG(DEBUG, sc, 9150 "Re-marking AER in path %d/%d/%d", 9151 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9152 } else { 9153 PMD_DRV_LOG(DEBUG, sc, 9154 "Removing AER indication from path %d/%d/%d", 9155 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9156 tmp->aer = 0; 9157 } 9158 9159 rte_spinlock_unlock(&bnx2x_prev_mtx); 9160 return 0; 9161 } 9162 9163 rte_spinlock_unlock(&bnx2x_prev_mtx); 9164 9165 /* Create an entry for this path and add it */ 9166 tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node), 9167 RTE_CACHE_LINE_SIZE); 9168 if (!tmp) { 9169 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate 'bnx2x_prev_list_node'"); 9170 return -1; 9171 } 9172 9173 tmp->bus = sc->pcie_bus; 9174 tmp->slot = sc->pcie_device; 9175 tmp->path = SC_PATH(sc); 9176 tmp->aer = 0; 9177 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 9178 9179 rte_spinlock_lock(&bnx2x_prev_mtx); 9180 9181 LIST_INSERT_HEAD(&bnx2x_prev_list, tmp, node); 9182 9183 rte_spinlock_unlock(&bnx2x_prev_mtx); 9184 9185 return 0; 9186 } 9187 9188 static int bnx2x_do_flr(struct bnx2x_softc *sc) 9189 { 9190 int i; 9191 9192 /* only E2 and onwards support FLR */ 9193 if (CHIP_IS_E1x(sc)) { 9194 PMD_DRV_LOG(WARNING, sc, "FLR not supported in E1H"); 9195 return -1; 9196 } 9197 9198 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 9199 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 9200 PMD_DRV_LOG(WARNING, sc, 9201 "FLR not supported by BC_VER: 0x%08x", 9202 sc->devinfo.bc_ver); 9203 return -1; 9204 } 9205 9206 /* Wait for Transaction Pending bit clean */ 9207 for (i = 0; i < 4; i++) { 9208 if (i) { 9209 DELAY(((1 << (i - 1)) * 100) * 1000); 9210 } 9211 9212 if (!bnx2x_is_pcie_pending(sc)) { 9213 goto clear; 9214 } 9215 } 9216 9217 PMD_DRV_LOG(NOTICE, sc, "PCIE transaction is not cleared, " 9218 "proceeding with reset anyway"); 9219 9220 clear: 9221 bnx2x_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 9222 9223 return 0; 9224 } 9225 9226 struct bnx2x_mac_vals { 9227 uint32_t xmac_addr; 9228 uint32_t xmac_val; 9229 uint32_t emac_addr; 9230 uint32_t emac_val; 9231 uint32_t umac_addr; 9232 uint32_t umac_val; 9233 uint32_t bmac_addr; 9234 uint32_t bmac_val[2]; 9235 }; 9236 9237 static void 9238 bnx2x_prev_unload_close_mac(struct bnx2x_softc *sc, struct bnx2x_mac_vals *vals) 9239 { 9240 uint32_t val, base_addr, offset, mask, reset_reg; 9241 uint8_t mac_stopped = FALSE; 9242 uint8_t port = SC_PORT(sc); 9243 uint32_t wb_data[2]; 9244 9245 /* reset addresses as they also mark which values were changed */ 9246 vals->bmac_addr = 0; 9247 vals->umac_addr = 0; 9248 vals->xmac_addr = 0; 9249 vals->emac_addr = 0; 9250 9251 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 9252 9253 if (!CHIP_IS_E3(sc)) { 9254 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9255 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9256 if ((mask & reset_reg) && val) { 9257 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 9258 : NIG_REG_INGRESS_BMAC0_MEM; 9259 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 9260 : BIGMAC_REGISTER_BMAC_CONTROL; 9261 9262 /* 9263 * use rd/wr since we cannot use dmae. This is safe 9264 * since MCP won't access the bus due to the request 9265 * to unload, and no function on the path can be 9266 * loaded at this time. 9267 */ 9268 wb_data[0] = REG_RD(sc, base_addr + offset); 9269 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 9270 vals->bmac_addr = base_addr + offset; 9271 vals->bmac_val[0] = wb_data[0]; 9272 vals->bmac_val[1] = wb_data[1]; 9273 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 9274 REG_WR(sc, vals->bmac_addr, wb_data[0]); 9275 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 9276 } 9277 9278 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc) * 4; 9279 vals->emac_val = REG_RD(sc, vals->emac_addr); 9280 REG_WR(sc, vals->emac_addr, 0); 9281 mac_stopped = TRUE; 9282 } else { 9283 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9284 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9285 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 9286 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, 9287 val & ~(1 << 1)); 9288 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, 9289 val | (1 << 1)); 9290 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 9291 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 9292 REG_WR(sc, vals->xmac_addr, 0); 9293 mac_stopped = TRUE; 9294 } 9295 9296 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9297 if (mask & reset_reg) { 9298 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9299 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 9300 vals->umac_val = REG_RD(sc, vals->umac_addr); 9301 REG_WR(sc, vals->umac_addr, 0); 9302 mac_stopped = TRUE; 9303 } 9304 } 9305 9306 if (mac_stopped) { 9307 DELAY(20000); 9308 } 9309 } 9310 9311 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9312 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9313 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9314 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9315 9316 static void 9317 bnx2x_prev_unload_undi_inc(struct bnx2x_softc *sc, uint8_t port, uint8_t inc) 9318 { 9319 uint16_t rcq, bd; 9320 uint32_t tmp_reg = REG_RD(sc, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9321 9322 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9323 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9324 9325 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9326 REG_WR(sc, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9327 } 9328 9329 static int bnx2x_prev_unload_common(struct bnx2x_softc *sc) 9330 { 9331 uint32_t reset_reg, tmp_reg = 0, rc; 9332 uint8_t prev_undi = FALSE; 9333 struct bnx2x_mac_vals mac_vals; 9334 uint32_t timer_count = 1000; 9335 uint32_t prev_brb; 9336 9337 /* 9338 * It is possible a previous function received 'common' answer, 9339 * but hasn't loaded yet, therefore creating a scenario of 9340 * multiple functions receiving 'common' on the same path. 9341 */ 9342 memset(&mac_vals, 0, sizeof(mac_vals)); 9343 9344 if (bnx2x_prev_is_path_marked(sc)) { 9345 return bnx2x_prev_mcp_done(sc); 9346 } 9347 9348 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 9349 9350 /* Reset should be performed after BRB is emptied */ 9351 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 9352 /* Close the MAC Rx to prevent BRB from filling up */ 9353 bnx2x_prev_unload_close_mac(sc, &mac_vals); 9354 9355 /* close LLH filters towards the BRB */ 9356 elink_set_rx_filter(&sc->link_params, 0); 9357 9358 /* 9359 * Check if the UNDI driver was previously loaded. 9360 * UNDI driver initializes CID offset for normal bell to 0x7 9361 */ 9362 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 9363 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 9364 if (tmp_reg == 0x7) { 9365 PMD_DRV_LOG(DEBUG, sc, "UNDI previously loaded"); 9366 prev_undi = TRUE; 9367 /* clear the UNDI indication */ 9368 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 9369 /* clear possible idle check errors */ 9370 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 9371 } 9372 } 9373 9374 /* wait until BRB is empty */ 9375 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 9376 while (timer_count) { 9377 prev_brb = tmp_reg; 9378 9379 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 9380 if (!tmp_reg) { 9381 break; 9382 } 9383 9384 PMD_DRV_LOG(DEBUG, sc, "BRB still has 0x%08x", tmp_reg); 9385 9386 /* reset timer as long as BRB actually gets emptied */ 9387 if (prev_brb > tmp_reg) { 9388 timer_count = 1000; 9389 } else { 9390 timer_count--; 9391 } 9392 9393 /* If UNDI resides in memory, manually increment it */ 9394 if (prev_undi) { 9395 bnx2x_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 9396 } 9397 9398 DELAY(10); 9399 } 9400 9401 if (!timer_count) { 9402 PMD_DRV_LOG(NOTICE, sc, "Failed to empty BRB"); 9403 } 9404 } 9405 9406 /* No packets are in the pipeline, path is ready for reset */ 9407 bnx2x_reset_common(sc); 9408 9409 if (mac_vals.xmac_addr) { 9410 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 9411 } 9412 if (mac_vals.umac_addr) { 9413 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 9414 } 9415 if (mac_vals.emac_addr) { 9416 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 9417 } 9418 if (mac_vals.bmac_addr) { 9419 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 9420 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 9421 } 9422 9423 rc = bnx2x_prev_mark_path(sc, prev_undi); 9424 if (rc) { 9425 bnx2x_prev_mcp_done(sc); 9426 return rc; 9427 } 9428 9429 return bnx2x_prev_mcp_done(sc); 9430 } 9431 9432 static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc) 9433 { 9434 int rc; 9435 9436 /* Test if previous unload process was already finished for this path */ 9437 if (bnx2x_prev_is_path_marked(sc)) { 9438 return bnx2x_prev_mcp_done(sc); 9439 } 9440 9441 /* 9442 * If function has FLR capabilities, and existing FW version matches 9443 * the one required, then FLR will be sufficient to clean any residue 9444 * left by previous driver 9445 */ 9446 rc = bnx2x_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 9447 if (!rc) { 9448 /* fw version is good */ 9449 rc = bnx2x_do_flr(sc); 9450 } 9451 9452 if (!rc) { 9453 /* FLR was performed */ 9454 return 0; 9455 } 9456 9457 PMD_DRV_LOG(INFO, sc, "Could not FLR"); 9458 9459 /* Close the MCP request, return failure */ 9460 rc = bnx2x_prev_mcp_done(sc); 9461 if (!rc) { 9462 rc = BNX2X_PREV_WAIT_NEEDED; 9463 } 9464 9465 return rc; 9466 } 9467 9468 static int bnx2x_prev_unload(struct bnx2x_softc *sc) 9469 { 9470 int time_counter = 10; 9471 uint32_t fw, hw_lock_reg, hw_lock_val; 9472 uint32_t rc = 0; 9473 9474 PMD_INIT_FUNC_TRACE(sc); 9475 9476 /* 9477 * Clear HW from errors which may have resulted from an interrupted 9478 * DMAE transaction. 9479 */ 9480 bnx2x_prev_interrupted_dmae(sc); 9481 9482 /* Release previously held locks */ 9483 hw_lock_reg = (SC_FUNC(sc) <= 5) ? 9484 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 9485 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 9486 9487 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 9488 if (hw_lock_val) { 9489 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 9490 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock\n"); 9491 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 9492 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 9493 } 9494 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock\n"); 9495 REG_WR(sc, hw_lock_reg, 0xffffffff); 9496 } 9497 9498 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 9499 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR\n"); 9500 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 9501 } 9502 9503 do { 9504 /* Lock MCP using an unload request */ 9505 fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 9506 if (!fw) { 9507 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 9508 rc = -1; 9509 break; 9510 } 9511 9512 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 9513 rc = bnx2x_prev_unload_common(sc); 9514 break; 9515 } 9516 9517 /* non-common reply from MCP might require looping */ 9518 rc = bnx2x_prev_unload_uncommon(sc); 9519 if (rc != BNX2X_PREV_WAIT_NEEDED) { 9520 break; 9521 } 9522 9523 DELAY(20000); 9524 } while (--time_counter); 9525 9526 if (!time_counter || rc) { 9527 PMD_DRV_LOG(NOTICE, sc, "Failed to unload previous driver!"); 9528 rc = -1; 9529 } 9530 9531 return rc; 9532 } 9533 9534 static void 9535 bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) 9536 { 9537 if (!CHIP_IS_E1x(sc)) { 9538 sc->dcb_state = dcb_on; 9539 sc->dcbx_enabled = dcbx_enabled; 9540 } else { 9541 sc->dcb_state = FALSE; 9542 sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; 9543 } 9544 PMD_DRV_LOG(DEBUG, sc, 9545 "DCB state [%s:%s]", 9546 dcb_on ? "ON" : "OFF", 9547 (dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" : 9548 (dcbx_enabled == 9549 BNX2X_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" 9550 : (dcbx_enabled == 9551 BNX2X_DCBX_ENABLED_ON_NEG_ON) ? 9552 "on-chip with negotiation" : "invalid"); 9553 } 9554 9555 static int bnx2x_set_qm_cid_count(struct bnx2x_softc *sc) 9556 { 9557 int cid_count = BNX2X_L2_MAX_CID(sc); 9558 9559 if (CNIC_SUPPORT(sc)) { 9560 cid_count += CNIC_CID_MAX; 9561 } 9562 9563 return roundup(cid_count, QM_CID_ROUND); 9564 } 9565 9566 static void bnx2x_init_multi_cos(struct bnx2x_softc *sc) 9567 { 9568 int pri, cos; 9569 9570 uint32_t pri_map = 0; 9571 9572 for (pri = 0; pri < BNX2X_MAX_PRIORITY; pri++) { 9573 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 9574 if (cos < sc->max_cos) { 9575 sc->prio_to_cos[pri] = cos; 9576 } else { 9577 PMD_DRV_LOG(WARNING, sc, 9578 "Invalid COS %d for priority %d " 9579 "(max COS is %d), setting to 0", cos, pri, 9580 (sc->max_cos - 1)); 9581 sc->prio_to_cos[pri] = 0; 9582 } 9583 } 9584 } 9585 9586 static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) 9587 { 9588 struct { 9589 uint8_t id; 9590 uint8_t next; 9591 } pci_cap; 9592 uint16_t status; 9593 struct bnx2x_pci_cap *cap; 9594 9595 cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap), 9596 RTE_CACHE_LINE_SIZE); 9597 if (!cap) { 9598 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); 9599 return -ENOMEM; 9600 } 9601 9602 #ifndef __FreeBSD__ 9603 pci_read(sc, PCI_STATUS, &status, 2); 9604 if (!(status & PCI_STATUS_CAP_LIST)) { 9605 #else 9606 pci_read(sc, PCIR_STATUS, &status, 2); 9607 if (!(status & PCIM_STATUS_CAPPRESENT)) { 9608 #endif 9609 PMD_DRV_LOG(NOTICE, sc, "PCIe capability reading failed"); 9610 return -1; 9611 } 9612 9613 #ifndef __FreeBSD__ 9614 pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1); 9615 #else 9616 pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1); 9617 #endif 9618 while (pci_cap.next) { 9619 cap->addr = pci_cap.next & ~3; 9620 pci_read(sc, pci_cap.next & ~3, &pci_cap, 2); 9621 if (pci_cap.id == 0xff) 9622 break; 9623 cap->id = pci_cap.id; 9624 cap->type = BNX2X_PCI_CAP; 9625 cap->next = rte_zmalloc("pci_cap", 9626 sizeof(struct bnx2x_pci_cap), 9627 RTE_CACHE_LINE_SIZE); 9628 if (!cap->next) { 9629 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); 9630 return -ENOMEM; 9631 } 9632 cap = cap->next; 9633 } 9634 9635 return 0; 9636 } 9637 9638 static void bnx2x_init_rte(struct bnx2x_softc *sc) 9639 { 9640 if (IS_VF(sc)) { 9641 sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, 9642 sc->igu_sb_cnt); 9643 sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, 9644 sc->igu_sb_cnt); 9645 } else { 9646 sc->max_rx_queues = BNX2X_MAX_RSS_COUNT(sc); 9647 sc->max_tx_queues = sc->max_rx_queues; 9648 } 9649 } 9650 9651 #define FW_HEADER_LEN 104 9652 #define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.2.51.0.fw" 9653 #define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.2.51.0.fw" 9654 9655 void bnx2x_load_firmware(struct bnx2x_softc *sc) 9656 { 9657 const char *fwname; 9658 int f; 9659 struct stat st; 9660 9661 fwname = sc->devinfo.device_id == CHIP_NUM_57711 9662 ? FW_NAME_57711 : FW_NAME_57810; 9663 f = open(fwname, O_RDONLY); 9664 if (f < 0) { 9665 PMD_DRV_LOG(NOTICE, sc, "Can't open firmware file"); 9666 return; 9667 } 9668 9669 if (fstat(f, &st) < 0) { 9670 PMD_DRV_LOG(NOTICE, sc, "Can't stat firmware file"); 9671 close(f); 9672 return; 9673 } 9674 9675 sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE); 9676 if (!sc->firmware) { 9677 PMD_DRV_LOG(NOTICE, sc, "Can't allocate memory for firmware"); 9678 close(f); 9679 return; 9680 } 9681 9682 if (read(f, sc->firmware, st.st_size) != st.st_size) { 9683 PMD_DRV_LOG(NOTICE, sc, "Can't read firmware data"); 9684 close(f); 9685 return; 9686 } 9687 close(f); 9688 9689 sc->fw_len = st.st_size; 9690 if (sc->fw_len < FW_HEADER_LEN) { 9691 PMD_DRV_LOG(NOTICE, sc, 9692 "Invalid fw size: %" PRIu64, sc->fw_len); 9693 return; 9694 } 9695 PMD_DRV_LOG(DEBUG, sc, "fw_len = %" PRIu64, sc->fw_len); 9696 } 9697 9698 static void 9699 bnx2x_data_to_init_ops(uint8_t * data, struct raw_op *dst, uint32_t len) 9700 { 9701 uint32_t *src = (uint32_t *) data; 9702 uint32_t i, j, tmp; 9703 9704 for (i = 0, j = 0; i < len / 8; ++i, j += 2) { 9705 tmp = rte_be_to_cpu_32(src[j]); 9706 dst[i].op = (tmp >> 24) & 0xFF; 9707 dst[i].offset = tmp & 0xFFFFFF; 9708 dst[i].raw_data = rte_be_to_cpu_32(src[j + 1]); 9709 } 9710 } 9711 9712 static void 9713 bnx2x_data_to_init_offsets(uint8_t * data, uint16_t * dst, uint32_t len) 9714 { 9715 uint16_t *src = (uint16_t *) data; 9716 uint32_t i; 9717 9718 for (i = 0; i < len / 2; ++i) 9719 dst[i] = rte_be_to_cpu_16(src[i]); 9720 } 9721 9722 static void bnx2x_data_to_init_data(uint8_t * data, uint32_t * dst, uint32_t len) 9723 { 9724 uint32_t *src = (uint32_t *) data; 9725 uint32_t i; 9726 9727 for (i = 0; i < len / 4; ++i) 9728 dst[i] = rte_be_to_cpu_32(src[i]); 9729 } 9730 9731 static void bnx2x_data_to_iro_array(uint8_t * data, struct iro *dst, uint32_t len) 9732 { 9733 uint32_t *src = (uint32_t *) data; 9734 uint32_t i, j, tmp; 9735 9736 for (i = 0, j = 0; i < len / sizeof(struct iro); ++i, ++j) { 9737 dst[i].base = rte_be_to_cpu_32(src[j++]); 9738 tmp = rte_be_to_cpu_32(src[j]); 9739 dst[i].m1 = (tmp >> 16) & 0xFFFF; 9740 dst[i].m2 = tmp & 0xFFFF; 9741 ++j; 9742 tmp = rte_be_to_cpu_32(src[j]); 9743 dst[i].m3 = (tmp >> 16) & 0xFFFF; 9744 dst[i].size = tmp & 0xFFFF; 9745 } 9746 } 9747 9748 /* 9749 * Device attach function. 9750 * 9751 * Allocates device resources, performs secondary chip identification, and 9752 * initializes driver instance variables. This function is called from driver 9753 * load after a successful probe. 9754 * 9755 * Returns: 9756 * 0 = Success, >0 = Failure 9757 */ 9758 int bnx2x_attach(struct bnx2x_softc *sc) 9759 { 9760 int rc; 9761 9762 PMD_DRV_LOG(DEBUG, sc, "Starting attach..."); 9763 9764 rc = bnx2x_pci_get_caps(sc); 9765 if (rc) { 9766 PMD_DRV_LOG(NOTICE, sc, "PCIe caps reading was failed"); 9767 return rc; 9768 } 9769 9770 sc->state = BNX2X_STATE_CLOSED; 9771 9772 pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); 9773 9774 sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 9775 9776 /* get PCI capabilites */ 9777 bnx2x_probe_pci_caps(sc); 9778 9779 if (sc->devinfo.pcie_msix_cap_reg != 0) { 9780 uint32_t val; 9781 pci_read(sc, 9782 (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val, 9783 2); 9784 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1; 9785 } else { 9786 sc->igu_sb_cnt = 1; 9787 } 9788 9789 /* Init RTE stuff */ 9790 bnx2x_init_rte(sc); 9791 9792 if (IS_PF(sc)) { 9793 /* Enable internal target-read (in case we are probed after PF 9794 * FLR). Must be done prior to any BAR read access. Only for 9795 * 57712 and up 9796 */ 9797 if (!CHIP_IS_E1x(sc)) { 9798 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 9799 1); 9800 DELAY(200000); 9801 } 9802 9803 /* get device info and set params */ 9804 if (bnx2x_get_device_info(sc) != 0) { 9805 PMD_DRV_LOG(NOTICE, sc, "getting device info"); 9806 return -ENXIO; 9807 } 9808 9809 /* get phy settings from shmem and 'and' against admin settings */ 9810 bnx2x_get_phy_info(sc); 9811 } else { 9812 /* Left mac of VF unfilled, PF should set it for VF */ 9813 memset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN); 9814 } 9815 9816 sc->wol = 0; 9817 9818 /* set the default MTU (changed via ifconfig) */ 9819 sc->mtu = RTE_ETHER_MTU; 9820 9821 bnx2x_set_modes_bitmap(sc); 9822 9823 /* need to reset chip if UNDI was active */ 9824 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 9825 /* init fw_seq */ 9826 sc->fw_seq = 9827 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 9828 DRV_MSG_SEQ_NUMBER_MASK); 9829 PMD_DRV_LOG(DEBUG, sc, "prev unload fw_seq 0x%04x", 9830 sc->fw_seq); 9831 bnx2x_prev_unload(sc); 9832 } 9833 9834 bnx2x_dcbx_set_state(sc, FALSE, BNX2X_DCBX_ENABLED_OFF); 9835 9836 /* calculate qm_cid_count */ 9837 sc->qm_cid_count = bnx2x_set_qm_cid_count(sc); 9838 9839 sc->max_cos = 1; 9840 bnx2x_init_multi_cos(sc); 9841 9842 return 0; 9843 } 9844 9845 static void 9846 bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment, 9847 uint16_t index, uint8_t op, uint8_t update) 9848 { 9849 uint32_t igu_addr = sc->igu_base_addr; 9850 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 9851 bnx2x_igu_ack_sb_gen(sc, segment, index, op, update, igu_addr); 9852 } 9853 9854 static void 9855 bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, 9856 uint16_t index, uint8_t op, uint8_t update) 9857 { 9858 if (unlikely(sc->devinfo.int_block == INT_BLOCK_HC)) 9859 bnx2x_hc_ack_sb(sc, igu_sb_id, storm, index, op, update); 9860 else { 9861 uint8_t segment; 9862 if (CHIP_INT_MODE_IS_BC(sc)) { 9863 segment = storm; 9864 } else if (igu_sb_id != sc->igu_dsb_id) { 9865 segment = IGU_SEG_ACCESS_DEF; 9866 } else if (storm == ATTENTION_ID) { 9867 segment = IGU_SEG_ACCESS_ATTN; 9868 } else { 9869 segment = IGU_SEG_ACCESS_DEF; 9870 } 9871 bnx2x_igu_ack_sb(sc, igu_sb_id, segment, index, op, update); 9872 } 9873 } 9874 9875 static void 9876 bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id, 9877 uint8_t is_pf) 9878 { 9879 uint32_t data, ctl, cnt = 100; 9880 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 9881 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 9882 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + 9883 (idu_sb_id / 32) * 4; 9884 uint32_t sb_bit = 1 << (idu_sb_id % 32); 9885 uint32_t func_encode = func | 9886 (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 9887 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 9888 9889 /* Not supported in BC mode */ 9890 if (CHIP_INT_MODE_IS_BC(sc)) { 9891 return; 9892 } 9893 9894 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 9895 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 9896 IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); 9897 9898 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 9899 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 9900 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 9901 9902 REG_WR(sc, igu_addr_data, data); 9903 9904 mb(); 9905 9906 PMD_DRV_LOG(DEBUG, sc, "write 0x%08x to IGU(via GRC) addr 0x%x", 9907 ctl, igu_addr_ctl); 9908 REG_WR(sc, igu_addr_ctl, ctl); 9909 9910 mb(); 9911 9912 /* wait for clean up to finish */ 9913 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 9914 DELAY(20000); 9915 } 9916 9917 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 9918 PMD_DRV_LOG(DEBUG, sc, 9919 "Unable to finish IGU cleanup: " 9920 "idu_sb_id %d offset %d bit %d (cnt %d)", 9921 idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt); 9922 } 9923 } 9924 9925 static void bnx2x_igu_clear_sb(struct bnx2x_softc *sc, uint8_t idu_sb_id) 9926 { 9927 bnx2x_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 9928 } 9929 9930 /*******************/ 9931 /* ECORE CALLBACKS */ 9932 /*******************/ 9933 9934 static void bnx2x_reset_common(struct bnx2x_softc *sc) 9935 { 9936 uint32_t val = 0x1400; 9937 9938 PMD_INIT_FUNC_TRACE(sc); 9939 9940 /* reset_common */ 9941 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 9942 0xd3ffff7f); 9943 9944 if (CHIP_IS_E3(sc)) { 9945 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 9946 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 9947 } 9948 9949 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 9950 } 9951 9952 static void bnx2x_common_init_phy(struct bnx2x_softc *sc) 9953 { 9954 uint32_t shmem_base[2]; 9955 uint32_t shmem2_base[2]; 9956 9957 /* Avoid common init in case MFW supports LFA */ 9958 if (SHMEM2_RD(sc, size) > 9959 (uint32_t) offsetof(struct shmem2_region, 9960 lfa_host_addr[SC_PORT(sc)])) { 9961 return; 9962 } 9963 9964 shmem_base[0] = sc->devinfo.shmem_base; 9965 shmem2_base[0] = sc->devinfo.shmem2_base; 9966 9967 if (!CHIP_IS_E1x(sc)) { 9968 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 9969 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 9970 } 9971 9972 bnx2x_acquire_phy_lock(sc); 9973 elink_common_init_phy(sc, shmem_base, shmem2_base, 9974 sc->devinfo.chip_id, 0); 9975 bnx2x_release_phy_lock(sc); 9976 } 9977 9978 static void bnx2x_pf_disable(struct bnx2x_softc *sc) 9979 { 9980 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 9981 9982 val &= ~IGU_PF_CONF_FUNC_EN; 9983 9984 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 9985 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 9986 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 9987 } 9988 9989 static void bnx2x_init_pxp(struct bnx2x_softc *sc) 9990 { 9991 uint16_t devctl; 9992 int r_order, w_order; 9993 9994 devctl = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL); 9995 9996 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 9997 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 9998 9999 ecore_init_pxp_arb(sc, r_order, w_order); 10000 } 10001 10002 static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc *sc) 10003 { 10004 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 10005 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 10006 return base + (SC_ABS_FUNC(sc)) * stride; 10007 } 10008 10009 /* 10010 * Called only on E1H or E2. 10011 * When pretending to be PF, the pretend value is the function number 0..7. 10012 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 10013 * combination. 10014 */ 10015 static int bnx2x_pretend_func(struct bnx2x_softc *sc, uint16_t pretend_func_val) 10016 { 10017 uint32_t pretend_reg; 10018 10019 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) 10020 return -1; 10021 10022 /* get my own pretend register */ 10023 pretend_reg = bnx2x_get_pretend_reg(sc); 10024 REG_WR(sc, pretend_reg, pretend_func_val); 10025 REG_RD(sc, pretend_reg); 10026 return 0; 10027 } 10028 10029 static void bnx2x_setup_fan_failure_detection(struct bnx2x_softc *sc) 10030 { 10031 int is_required; 10032 uint32_t val; 10033 int port; 10034 10035 is_required = 0; 10036 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 10037 SHARED_HW_CFG_FAN_FAILURE_MASK); 10038 10039 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 10040 is_required = 1; 10041 } 10042 /* 10043 * The fan failure mechanism is usually related to the PHY type since 10044 * the power consumption of the board is affected by the PHY. Currently, 10045 * fan is required for most designs with SFX7101, BNX2X8727 and BNX2X8481. 10046 */ 10047 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 10048 for (port = PORT_0; port < PORT_MAX; port++) { 10049 is_required |= elink_fan_failure_det_req(sc, 10050 sc-> 10051 devinfo.shmem_base, 10052 sc-> 10053 devinfo.shmem2_base, 10054 port); 10055 } 10056 } 10057 10058 if (is_required == 0) { 10059 return; 10060 } 10061 10062 /* Fan failure is indicated by SPIO 5 */ 10063 bnx2x_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 10064 10065 /* set to active low mode */ 10066 val = REG_RD(sc, MISC_REG_SPIO_INT); 10067 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 10068 REG_WR(sc, MISC_REG_SPIO_INT, val); 10069 10070 /* enable interrupt to signal the IGU */ 10071 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 10072 val |= MISC_SPIO_SPIO5; 10073 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 10074 } 10075 10076 static void bnx2x_enable_blocks_attention(struct bnx2x_softc *sc) 10077 { 10078 uint32_t val; 10079 10080 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 10081 if (!CHIP_IS_E1x(sc)) { 10082 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 10083 } else { 10084 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 10085 } 10086 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 10087 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 10088 /* 10089 * mask read length error interrupts in brb for parser 10090 * (parsing unit and 'checksum and crc' unit) 10091 * these errors are legal (PU reads fixed length and CAC can cause 10092 * read length error on truncated packets) 10093 */ 10094 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 10095 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 10096 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 10097 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 10098 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 10099 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 10100 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 10101 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 10102 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 10103 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 10104 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 10105 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 10106 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 10107 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 10108 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 10109 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 10110 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 10111 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 10112 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 10113 10114 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 10115 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 10116 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 10117 if (!CHIP_IS_E1x(sc)) { 10118 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 10119 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 10120 } 10121 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 10122 10123 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 10124 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 10125 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 10126 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 10127 10128 if (!CHIP_IS_E1x(sc)) { 10129 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 10130 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 10131 } 10132 10133 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 10134 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 10135 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 10136 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 10137 } 10138 10139 /** 10140 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 10141 * 10142 * @sc: driver handle 10143 */ 10144 static int bnx2x_init_hw_common(struct bnx2x_softc *sc) 10145 { 10146 uint8_t abs_func_id; 10147 uint32_t val; 10148 10149 PMD_DRV_LOG(DEBUG, sc, 10150 "starting common init for func %d", SC_ABS_FUNC(sc)); 10151 10152 /* 10153 * take the RESET lock to protect undi_unload flow from accessing 10154 * registers while we are resetting the chip 10155 */ 10156 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 10157 10158 bnx2x_reset_common(sc); 10159 10160 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 10161 10162 val = 0xfffc; 10163 if (CHIP_IS_E3(sc)) { 10164 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 10165 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 10166 } 10167 10168 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 10169 10170 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 10171 10172 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 10173 10174 if (!CHIP_IS_E1x(sc)) { 10175 /* 10176 * 4-port mode or 2-port mode we need to turn off master-enable for 10177 * everyone. After that we turn it back on for self. So, we disregard 10178 * multi-function, and always disable all functions on the given path, 10179 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 10180 */ 10181 for (abs_func_id = SC_PATH(sc); 10182 abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { 10183 if (abs_func_id == SC_ABS_FUNC(sc)) { 10184 REG_WR(sc, 10185 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 10186 1); 10187 continue; 10188 } 10189 10190 bnx2x_pretend_func(sc, abs_func_id); 10191 10192 /* clear pf enable */ 10193 bnx2x_pf_disable(sc); 10194 10195 bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); 10196 } 10197 } 10198 10199 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 10200 10201 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 10202 bnx2x_init_pxp(sc); 10203 10204 #ifdef __BIG_ENDIAN 10205 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 10206 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 10207 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 10208 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 10209 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 10210 /* make sure this value is 0 */ 10211 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 10212 10213 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 10214 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 10215 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 10216 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 10217 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 10218 #endif 10219 10220 ecore_ilt_init_page_size(sc, INITOP_SET); 10221 10222 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 10223 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 10224 } 10225 10226 /* let the HW do it's magic... */ 10227 DELAY(100000); 10228 10229 /* finish PXP init */ 10230 10231 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 10232 if (val != 1) { 10233 PMD_DRV_LOG(NOTICE, sc, "PXP2 CFG failed"); 10234 return -1; 10235 } 10236 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 10237 if (val != 1) { 10238 PMD_DRV_LOG(NOTICE, sc, "PXP2 RD_INIT failed"); 10239 return -1; 10240 } 10241 10242 /* 10243 * Timer bug workaround for E2 only. We need to set the entire ILT to have 10244 * entries with value "0" and valid bit on. This needs to be done by the 10245 * first PF that is loaded in a path (i.e. common phase) 10246 */ 10247 if (!CHIP_IS_E1x(sc)) { 10248 /* 10249 * In E2 there is a bug in the timers block that can cause function 6 / 7 10250 * (i.e. vnic3) to start even if it is marked as "scan-off". 10251 * This occurs when a different function (func2,3) is being marked 10252 * as "scan-off". Real-life scenario for example: if a driver is being 10253 * load-unloaded while func6,7 are down. This will cause the timer to access 10254 * the ilt, translate to a logical address and send a request to read/write. 10255 * Since the ilt for the function that is down is not valid, this will cause 10256 * a translation error which is unrecoverable. 10257 * The Workaround is intended to make sure that when this happens nothing 10258 * fatal will occur. The workaround: 10259 * 1. First PF driver which loads on a path will: 10260 * a. After taking the chip out of reset, by using pretend, 10261 * it will write "0" to the following registers of 10262 * the other vnics. 10263 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10264 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 10265 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 10266 * And for itself it will write '1' to 10267 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 10268 * dmae-operations (writing to pram for example.) 10269 * note: can be done for only function 6,7 but cleaner this 10270 * way. 10271 * b. Write zero+valid to the entire ILT. 10272 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 10273 * VNIC3 (of that port). The range allocated will be the 10274 * entire ILT. This is needed to prevent ILT range error. 10275 * 2. Any PF driver load flow: 10276 * a. ILT update with the physical addresses of the allocated 10277 * logical pages. 10278 * b. Wait 20msec. - note that this timeout is needed to make 10279 * sure there are no requests in one of the PXP internal 10280 * queues with "old" ILT addresses. 10281 * c. PF enable in the PGLC. 10282 * d. Clear the was_error of the PF in the PGLC. (could have 10283 * occurred while driver was down) 10284 * e. PF enable in the CFC (WEAK + STRONG) 10285 * f. Timers scan enable 10286 * 3. PF driver unload flow: 10287 * a. Clear the Timers scan_en. 10288 * b. Polling for scan_on=0 for that PF. 10289 * c. Clear the PF enable bit in the PXP. 10290 * d. Clear the PF enable in the CFC (WEAK + STRONG) 10291 * e. Write zero+valid to all ILT entries (The valid bit must 10292 * stay set) 10293 * f. If this is VNIC 3 of a port then also init 10294 * first_timers_ilt_entry to zero and last_timers_ilt_entry 10295 * to the last enrty in the ILT. 10296 * 10297 * Notes: 10298 * Currently the PF error in the PGLC is non recoverable. 10299 * In the future the there will be a recovery routine for this error. 10300 * Currently attention is masked. 10301 * Having an MCP lock on the load/unload process does not guarantee that 10302 * there is no Timer disable during Func6/7 enable. This is because the 10303 * Timers scan is currently being cleared by the MCP on FLR. 10304 * Step 2.d can be done only for PF6/7 and the driver can also check if 10305 * there is error before clearing it. But the flow above is simpler and 10306 * more general. 10307 * All ILT entries are written by zero+valid and not just PF6/7 10308 * ILT entries since in the future the ILT entries allocation for 10309 * PF-s might be dynamic. 10310 */ 10311 struct ilt_client_info ilt_cli; 10312 struct ecore_ilt ilt; 10313 10314 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 10315 memset(&ilt, 0, sizeof(struct ecore_ilt)); 10316 10317 /* initialize dummy TM client */ 10318 ilt_cli.start = 0; 10319 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 10320 ilt_cli.client_num = ILT_CLIENT_TM; 10321 10322 /* 10323 * Step 1: set zeroes to all ilt page entries with valid bit on 10324 * Step 2: set the timers first/last ilt entry to point 10325 * to the entire range to prevent ILT range error for 3rd/4th 10326 * vnic (this code assumes existence of the vnic) 10327 * 10328 * both steps performed by call to ecore_ilt_client_init_op() 10329 * with dummy TM client 10330 * 10331 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 10332 * and his brother are split registers 10333 */ 10334 10335 bnx2x_pretend_func(sc, (SC_PATH(sc) + 6)); 10336 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 10337 bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); 10338 10339 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 10340 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 10341 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 10342 } 10343 10344 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 10345 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 10346 10347 if (!CHIP_IS_E1x(sc)) { 10348 int factor = 0; 10349 10350 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 10351 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 10352 10353 /* let the HW do it's magic... */ 10354 do { 10355 DELAY(200000); 10356 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 10357 } while (factor-- && (val != 1)); 10358 10359 if (val != 1) { 10360 PMD_DRV_LOG(NOTICE, sc, "ATC_INIT failed"); 10361 return -1; 10362 } 10363 } 10364 10365 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 10366 10367 /* clean the DMAE memory */ 10368 sc->dmae_ready = 1; 10369 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8); 10370 10371 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 10372 10373 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 10374 10375 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 10376 10377 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 10378 10379 bnx2x_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 10380 bnx2x_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 10381 bnx2x_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 10382 bnx2x_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 10383 10384 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 10385 10386 /* QM queues pointers table */ 10387 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 10388 10389 /* soft reset pulse */ 10390 REG_WR(sc, QM_REG_SOFT_RESET, 1); 10391 REG_WR(sc, QM_REG_SOFT_RESET, 0); 10392 10393 if (CNIC_SUPPORT(sc)) 10394 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 10395 10396 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 10397 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 10398 10399 if (!CHIP_REV_IS_SLOW(sc)) { 10400 /* enable hw interrupt from doorbell Q */ 10401 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 10402 } 10403 10404 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 10405 10406 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 10407 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 10408 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 10409 10410 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 10411 if (IS_MF_AFEX(sc)) { 10412 /* 10413 * configure that AFEX and VLAN headers must be 10414 * received in AFEX mode 10415 */ 10416 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 10417 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 10418 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 10419 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 10420 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 10421 } else { 10422 /* 10423 * Bit-map indicating which L2 hdrs may appear 10424 * after the basic Ethernet header 10425 */ 10426 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 10427 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 10428 } 10429 } 10430 10431 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 10432 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 10433 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 10434 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 10435 10436 if (!CHIP_IS_E1x(sc)) { 10437 /* reset VFC memories */ 10438 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 10439 VFC_MEMORIES_RST_REG_CAM_RST | 10440 VFC_MEMORIES_RST_REG_RAM_RST); 10441 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 10442 VFC_MEMORIES_RST_REG_CAM_RST | 10443 VFC_MEMORIES_RST_REG_RAM_RST); 10444 10445 DELAY(20000); 10446 } 10447 10448 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 10449 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 10450 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 10451 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 10452 10453 /* sync semi rtc */ 10454 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); 10455 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); 10456 10457 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 10458 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 10459 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 10460 10461 if (!CHIP_IS_E1x(sc)) { 10462 if (IS_MF_AFEX(sc)) { 10463 /* 10464 * configure that AFEX and VLAN headers must be 10465 * sent in AFEX mode 10466 */ 10467 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 10468 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 10469 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 10470 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 10471 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 10472 } else { 10473 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 10474 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 10475 } 10476 } 10477 10478 REG_WR(sc, SRC_REG_SOFT_RST, 1); 10479 10480 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 10481 10482 if (CNIC_SUPPORT(sc)) { 10483 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 10484 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 10485 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 10486 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 10487 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 10488 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 10489 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 10490 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 10491 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 10492 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 10493 } 10494 REG_WR(sc, SRC_REG_SOFT_RST, 0); 10495 10496 if (sizeof(union cdu_context) != 1024) { 10497 /* we currently assume that a context is 1024 bytes */ 10498 PMD_DRV_LOG(NOTICE, sc, 10499 "please adjust the size of cdu_context(%ld)", 10500 (long)sizeof(union cdu_context)); 10501 } 10502 10503 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 10504 val = (4 << 24) + (0 << 12) + 1024; 10505 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 10506 10507 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 10508 10509 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 10510 /* enable context validation interrupt from CFC */ 10511 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 10512 10513 /* set the thresholds to prevent CFC/CDU race */ 10514 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 10515 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 10516 10517 if (!CHIP_IS_E1x(sc) && BNX2X_NOMCP(sc)) { 10518 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 10519 } 10520 10521 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 10522 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 10523 10524 /* Reset PCIE errors for debug */ 10525 REG_WR(sc, 0x2814, 0xffffffff); 10526 REG_WR(sc, 0x3820, 0xffffffff); 10527 10528 if (!CHIP_IS_E1x(sc)) { 10529 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 10530 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 10531 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 10532 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 10533 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 10534 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 10535 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 10536 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 10537 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 10538 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 10539 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 10540 } 10541 10542 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 10543 10544 /* in E3 this done in per-port section */ 10545 if (!CHIP_IS_E3(sc)) 10546 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 10547 10548 if (CHIP_IS_E1H(sc)) { 10549 /* not applicable for E2 (and above ...) */ 10550 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 10551 } 10552 10553 if (CHIP_REV_IS_SLOW(sc)) { 10554 DELAY(200000); 10555 } 10556 10557 /* finish CFC init */ 10558 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 10559 if (val != 1) { 10560 PMD_DRV_LOG(NOTICE, sc, "CFC LL_INIT failed"); 10561 return -1; 10562 } 10563 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 10564 if (val != 1) { 10565 PMD_DRV_LOG(NOTICE, sc, "CFC AC_INIT failed"); 10566 return -1; 10567 } 10568 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 10569 if (val != 1) { 10570 PMD_DRV_LOG(NOTICE, sc, "CFC CAM_INIT failed"); 10571 return -1; 10572 } 10573 REG_WR(sc, CFC_REG_DEBUG0, 0); 10574 10575 bnx2x_setup_fan_failure_detection(sc); 10576 10577 /* clear PXP2 attentions */ 10578 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 10579 10580 bnx2x_enable_blocks_attention(sc); 10581 10582 if (!CHIP_REV_IS_SLOW(sc)) { 10583 ecore_enable_blocks_parity(sc); 10584 } 10585 10586 if (!BNX2X_NOMCP(sc)) { 10587 if (CHIP_IS_E1x(sc)) { 10588 bnx2x_common_init_phy(sc); 10589 } 10590 } 10591 10592 return 0; 10593 } 10594 10595 /** 10596 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 10597 * 10598 * @sc: driver handle 10599 */ 10600 static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc) 10601 { 10602 int rc = bnx2x_init_hw_common(sc); 10603 10604 if (rc) { 10605 return rc; 10606 } 10607 10608 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 10609 if (!BNX2X_NOMCP(sc)) { 10610 bnx2x_common_init_phy(sc); 10611 } 10612 10613 return 0; 10614 } 10615 10616 static int bnx2x_init_hw_port(struct bnx2x_softc *sc) 10617 { 10618 int port = SC_PORT(sc); 10619 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 10620 uint32_t low, high; 10621 uint32_t val; 10622 10623 PMD_DRV_LOG(DEBUG, sc, "starting port init for port %d", port); 10624 10625 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); 10626 10627 ecore_init_block(sc, BLOCK_MISC, init_phase); 10628 ecore_init_block(sc, BLOCK_PXP, init_phase); 10629 ecore_init_block(sc, BLOCK_PXP2, init_phase); 10630 10631 /* 10632 * Timers bug workaround: disables the pf_master bit in pglue at 10633 * common phase, we need to enable it here before any dmae access are 10634 * attempted. Therefore we manually added the enable-master to the 10635 * port phase (it also happens in the function phase) 10636 */ 10637 if (!CHIP_IS_E1x(sc)) { 10638 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 10639 } 10640 10641 ecore_init_block(sc, BLOCK_ATC, init_phase); 10642 ecore_init_block(sc, BLOCK_DMAE, init_phase); 10643 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 10644 ecore_init_block(sc, BLOCK_QM, init_phase); 10645 10646 ecore_init_block(sc, BLOCK_TCM, init_phase); 10647 ecore_init_block(sc, BLOCK_UCM, init_phase); 10648 ecore_init_block(sc, BLOCK_CCM, init_phase); 10649 ecore_init_block(sc, BLOCK_XCM, init_phase); 10650 10651 /* QM cid (connection) count */ 10652 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 10653 10654 if (CNIC_SUPPORT(sc)) { 10655 ecore_init_block(sc, BLOCK_TM, init_phase); 10656 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port * 4, 20); 10657 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port * 4, 31); 10658 } 10659 10660 ecore_init_block(sc, BLOCK_DORQ, init_phase); 10661 10662 ecore_init_block(sc, BLOCK_BRB1, init_phase); 10663 10664 if (CHIP_IS_E1H(sc)) { 10665 if (IS_MF(sc)) { 10666 low = (BNX2X_ONE_PORT(sc) ? 160 : 246); 10667 } else if (sc->mtu > 4096) { 10668 if (BNX2X_ONE_PORT(sc)) { 10669 low = 160; 10670 } else { 10671 val = sc->mtu; 10672 /* (24*1024 + val*4)/256 */ 10673 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 10674 } 10675 } else { 10676 low = (BNX2X_ONE_PORT(sc) ? 80 : 160); 10677 } 10678 high = (low + 56); /* 14*1024/256 */ 10679 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low); 10680 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high); 10681 } 10682 10683 if (CHIP_IS_MODE_4_PORT(sc)) { 10684 REG_WR(sc, SC_PORT(sc) ? 10685 BRB1_REG_MAC_GUARANTIED_1 : 10686 BRB1_REG_MAC_GUARANTIED_0, 40); 10687 } 10688 10689 ecore_init_block(sc, BLOCK_PRS, init_phase); 10690 if (CHIP_IS_E3B0(sc)) { 10691 if (IS_MF_AFEX(sc)) { 10692 /* configure headers for AFEX mode */ 10693 if (SC_PORT(sc)) { 10694 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_1, 10695 0xE); 10696 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_1, 10697 0x6); 10698 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_1, 0xA); 10699 } else { 10700 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_0, 10701 0xE); 10702 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 10703 0x6); 10704 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 10705 } 10706 } else { 10707 /* Ovlan exists only if we are in multi-function + 10708 * switch-dependent mode, in switch-independent there 10709 * is no ovlan headers 10710 */ 10711 REG_WR(sc, SC_PORT(sc) ? 10712 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 10713 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 10714 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 10715 } 10716 } 10717 10718 ecore_init_block(sc, BLOCK_TSDM, init_phase); 10719 ecore_init_block(sc, BLOCK_CSDM, init_phase); 10720 ecore_init_block(sc, BLOCK_USDM, init_phase); 10721 ecore_init_block(sc, BLOCK_XSDM, init_phase); 10722 10723 ecore_init_block(sc, BLOCK_TSEM, init_phase); 10724 ecore_init_block(sc, BLOCK_USEM, init_phase); 10725 ecore_init_block(sc, BLOCK_CSEM, init_phase); 10726 ecore_init_block(sc, BLOCK_XSEM, init_phase); 10727 10728 ecore_init_block(sc, BLOCK_UPB, init_phase); 10729 ecore_init_block(sc, BLOCK_XPB, init_phase); 10730 10731 ecore_init_block(sc, BLOCK_PBF, init_phase); 10732 10733 if (CHIP_IS_E1x(sc)) { 10734 /* configure PBF to work without PAUSE mtu 9000 */ 10735 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0); 10736 10737 /* update threshold */ 10738 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040 / 16)); 10739 /* update init credit */ 10740 REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, 10741 (9040 / 16) + 553 - 22); 10742 10743 /* probe changes */ 10744 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1); 10745 DELAY(50); 10746 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0); 10747 } 10748 10749 if (CNIC_SUPPORT(sc)) { 10750 ecore_init_block(sc, BLOCK_SRC, init_phase); 10751 } 10752 10753 ecore_init_block(sc, BLOCK_CDU, init_phase); 10754 ecore_init_block(sc, BLOCK_CFC, init_phase); 10755 ecore_init_block(sc, BLOCK_HC, init_phase); 10756 ecore_init_block(sc, BLOCK_IGU, init_phase); 10757 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 10758 /* init aeu_mask_attn_func_0/1: 10759 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 10760 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 10761 * bits 4-7 are used for "per vn group attention" */ 10762 val = IS_MF(sc) ? 0xF7 : 0x7; 10763 val |= 0x10; 10764 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, val); 10765 10766 ecore_init_block(sc, BLOCK_NIG, init_phase); 10767 10768 if (!CHIP_IS_E1x(sc)) { 10769 /* Bit-map indicating which L2 hdrs may appear after the 10770 * basic Ethernet header 10771 */ 10772 if (IS_MF_AFEX(sc)) { 10773 REG_WR(sc, SC_PORT(sc) ? 10774 NIG_REG_P1_HDRS_AFTER_BASIC : 10775 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 10776 } else { 10777 REG_WR(sc, SC_PORT(sc) ? 10778 NIG_REG_P1_HDRS_AFTER_BASIC : 10779 NIG_REG_P0_HDRS_AFTER_BASIC, 10780 IS_MF_SD(sc) ? 7 : 6); 10781 } 10782 10783 if (CHIP_IS_E3(sc)) { 10784 REG_WR(sc, SC_PORT(sc) ? 10785 NIG_REG_LLH1_MF_MODE : 10786 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 10787 } 10788 } 10789 if (!CHIP_IS_E3(sc)) { 10790 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); 10791 } 10792 10793 /* 0x2 disable mf_ov, 0x1 enable */ 10794 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4, 10795 (IS_MF_SD(sc) ? 0x1 : 0x2)); 10796 10797 if (!CHIP_IS_E1x(sc)) { 10798 val = 0; 10799 switch (sc->devinfo.mf_info.mf_mode) { 10800 case MULTI_FUNCTION_SD: 10801 val = 1; 10802 break; 10803 case MULTI_FUNCTION_SI: 10804 case MULTI_FUNCTION_AFEX: 10805 val = 2; 10806 break; 10807 } 10808 10809 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 10810 NIG_REG_LLH0_CLS_TYPE), val); 10811 } 10812 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0); 10813 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0); 10814 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1); 10815 10816 /* If SPIO5 is set to generate interrupts, enable it for this port */ 10817 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 10818 if (val & MISC_SPIO_SPIO5) { 10819 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10820 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 10821 val = REG_RD(sc, reg_addr); 10822 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 10823 REG_WR(sc, reg_addr, val); 10824 } 10825 10826 return 0; 10827 } 10828 10829 static uint32_t 10830 bnx2x_flr_clnup_reg_poll(struct bnx2x_softc *sc, uint32_t reg, 10831 uint32_t expected, uint32_t poll_count) 10832 { 10833 uint32_t cur_cnt = poll_count; 10834 uint32_t val; 10835 10836 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 10837 DELAY(FLR_WAIT_INTERVAL); 10838 } 10839 10840 return val; 10841 } 10842 10843 static int 10844 bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg, 10845 __rte_unused const char *msg, uint32_t poll_cnt) 10846 { 10847 uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 10848 10849 if (val != 0) { 10850 PMD_DRV_LOG(NOTICE, sc, "%s usage count=%d", msg, val); 10851 return -1; 10852 } 10853 10854 return 0; 10855 } 10856 10857 /* Common routines with VF FLR cleanup */ 10858 static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc *sc) 10859 { 10860 /* adjust polling timeout */ 10861 if (CHIP_REV_IS_EMUL(sc)) { 10862 return FLR_POLL_CNT * 2000; 10863 } 10864 10865 if (CHIP_REV_IS_FPGA(sc)) { 10866 return FLR_POLL_CNT * 120; 10867 } 10868 10869 return FLR_POLL_CNT; 10870 } 10871 10872 static int bnx2x_poll_hw_usage_counters(struct bnx2x_softc *sc, uint32_t poll_cnt) 10873 { 10874 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 10875 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10876 CFC_REG_NUM_LCIDS_INSIDE_PF, 10877 "CFC PF usage counter timed out", 10878 poll_cnt)) { 10879 return -1; 10880 } 10881 10882 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 10883 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10884 DORQ_REG_PF_USAGE_CNT, 10885 "DQ PF usage counter timed out", 10886 poll_cnt)) { 10887 return -1; 10888 } 10889 10890 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 10891 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10892 QM_REG_PF_USG_CNT_0 + 4 * SC_FUNC(sc), 10893 "QM PF usage counter timed out", 10894 poll_cnt)) { 10895 return -1; 10896 } 10897 10898 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 10899 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10900 TM_REG_LIN0_VNIC_UC + 4 * SC_PORT(sc), 10901 "Timers VNIC usage counter timed out", 10902 poll_cnt)) { 10903 return -1; 10904 } 10905 10906 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10907 TM_REG_LIN0_NUM_SCANS + 10908 4 * SC_PORT(sc), 10909 "Timers NUM_SCANS usage counter timed out", 10910 poll_cnt)) { 10911 return -1; 10912 } 10913 10914 /* Wait DMAE PF usage counter to zero */ 10915 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10916 dmae_reg_go_c[INIT_DMAE_C(sc)], 10917 "DMAE dommand register timed out", 10918 poll_cnt)) { 10919 return -1; 10920 } 10921 10922 return 0; 10923 } 10924 10925 #define OP_GEN_PARAM(param) \ 10926 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 10927 #define OP_GEN_TYPE(type) \ 10928 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 10929 #define OP_GEN_AGG_VECT(index) \ 10930 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 10931 10932 static int 10933 bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func, 10934 uint32_t poll_cnt) 10935 { 10936 uint32_t op_gen_command = 0; 10937 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 10938 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 10939 int ret = 0; 10940 10941 if (REG_RD(sc, comp_addr)) { 10942 PMD_DRV_LOG(NOTICE, sc, 10943 "Cleanup complete was not 0 before sending"); 10944 return -1; 10945 } 10946 10947 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 10948 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 10949 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 10950 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 10951 10952 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 10953 10954 if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 10955 PMD_DRV_LOG(NOTICE, sc, "FW final cleanup did not succeed"); 10956 PMD_DRV_LOG(DEBUG, sc, "At timeout completion address contained %x", 10957 (REG_RD(sc, comp_addr))); 10958 rte_panic("FLR cleanup failed"); 10959 return -1; 10960 } 10961 10962 /* Zero completion for nxt FLR */ 10963 REG_WR(sc, comp_addr, 0); 10964 10965 return ret; 10966 } 10967 10968 static void 10969 bnx2x_pbf_pN_buf_flushed(struct bnx2x_softc *sc, struct pbf_pN_buf_regs *regs, 10970 uint32_t poll_count) 10971 { 10972 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 10973 uint32_t cur_cnt = poll_count; 10974 10975 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 10976 crd = crd_start = REG_RD(sc, regs->crd); 10977 init_crd = REG_RD(sc, regs->init_crd); 10978 10979 while ((crd != init_crd) && 10980 ((uint32_t) ((int32_t) crd_freed - (int32_t) crd_freed_start) < 10981 (init_crd - crd_start))) { 10982 if (cur_cnt--) { 10983 DELAY(FLR_WAIT_INTERVAL); 10984 crd = REG_RD(sc, regs->crd); 10985 crd_freed = REG_RD(sc, regs->crd_freed); 10986 } else { 10987 break; 10988 } 10989 } 10990 } 10991 10992 static void 10993 bnx2x_pbf_pN_cmd_flushed(struct bnx2x_softc *sc, struct pbf_pN_cmd_regs *regs, 10994 uint32_t poll_count) 10995 { 10996 uint32_t occup, to_free, freed, freed_start; 10997 uint32_t cur_cnt = poll_count; 10998 10999 occup = to_free = REG_RD(sc, regs->lines_occup); 11000 freed = freed_start = REG_RD(sc, regs->lines_freed); 11001 11002 while (occup && 11003 ((uint32_t) ((int32_t) freed - (int32_t) freed_start) < 11004 to_free)) { 11005 if (cur_cnt--) { 11006 DELAY(FLR_WAIT_INTERVAL); 11007 occup = REG_RD(sc, regs->lines_occup); 11008 freed = REG_RD(sc, regs->lines_freed); 11009 } else { 11010 break; 11011 } 11012 } 11013 } 11014 11015 static void bnx2x_tx_hw_flushed(struct bnx2x_softc *sc, uint32_t poll_count) 11016 { 11017 struct pbf_pN_cmd_regs cmd_regs[] = { 11018 {0, (CHIP_IS_E3B0(sc)) ? 11019 PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, 11020 (CHIP_IS_E3B0(sc)) ? 11021 PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, 11022 {1, (CHIP_IS_E3B0(sc)) ? 11023 PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, 11024 (CHIP_IS_E3B0(sc)) ? 11025 PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, 11026 {4, (CHIP_IS_E3B0(sc)) ? 11027 PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, 11028 (CHIP_IS_E3B0(sc)) ? 11029 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 11030 PBF_REG_P4_TQ_LINES_FREED_CNT} 11031 }; 11032 11033 struct pbf_pN_buf_regs buf_regs[] = { 11034 {0, (CHIP_IS_E3B0(sc)) ? 11035 PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD, 11036 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, 11037 (CHIP_IS_E3B0(sc)) ? 11038 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 11039 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 11040 {1, (CHIP_IS_E3B0(sc)) ? 11041 PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, 11042 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, 11043 (CHIP_IS_E3B0(sc)) ? 11044 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 11045 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 11046 {4, (CHIP_IS_E3B0(sc)) ? 11047 PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, 11048 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, 11049 (CHIP_IS_E3B0(sc)) ? 11050 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 11051 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 11052 }; 11053 11054 uint32_t i; 11055 11056 /* Verify the command queues are flushed P0, P1, P4 */ 11057 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 11058 bnx2x_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 11059 } 11060 11061 /* Verify the transmission buffers are flushed P0, P1, P4 */ 11062 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 11063 bnx2x_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 11064 } 11065 } 11066 11067 static void bnx2x_hw_enable_status(struct bnx2x_softc *sc) 11068 { 11069 __rte_unused uint32_t val; 11070 11071 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 11072 PMD_DRV_LOG(DEBUG, sc, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val); 11073 11074 val = REG_RD(sc, PBF_REG_DISABLE_PF); 11075 PMD_DRV_LOG(DEBUG, sc, "PBF_REG_DISABLE_PF is 0x%x", val); 11076 11077 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 11078 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val); 11079 11080 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 11081 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val); 11082 11083 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 11084 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val); 11085 11086 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 11087 PMD_DRV_LOG(DEBUG, sc, 11088 "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val); 11089 11090 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 11091 PMD_DRV_LOG(DEBUG, sc, 11092 "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val); 11093 11094 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 11095 PMD_DRV_LOG(DEBUG, sc, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x", 11096 val); 11097 } 11098 11099 /** 11100 * bnx2x_pf_flr_clnup 11101 * a. re-enable target read on the PF 11102 * b. poll cfc per function usgae counter 11103 * c. poll the qm perfunction usage counter 11104 * d. poll the tm per function usage counter 11105 * e. poll the tm per function scan-done indication 11106 * f. clear the dmae channel associated wit hthe PF 11107 * g. zero the igu 'trailing edge' and 'leading edge' regs (attentions) 11108 * h. call the common flr cleanup code with -1 (pf indication) 11109 */ 11110 static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc) 11111 { 11112 uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(sc); 11113 11114 /* Re-enable PF target read access */ 11115 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 11116 11117 /* Poll HW usage counters */ 11118 if (bnx2x_poll_hw_usage_counters(sc, poll_cnt)) { 11119 return -1; 11120 } 11121 11122 /* Zero the igu 'trailing edge' and 'leading edge' */ 11123 11124 /* Send the FW cleanup command */ 11125 if (bnx2x_send_final_clnup(sc, (uint8_t) SC_FUNC(sc), poll_cnt)) { 11126 return -1; 11127 } 11128 11129 /* ATC cleanup */ 11130 11131 /* Verify TX hw is flushed */ 11132 bnx2x_tx_hw_flushed(sc, poll_cnt); 11133 11134 /* Wait 100ms (not adjusted according to platform) */ 11135 DELAY(100000); 11136 11137 /* Verify no pending pci transactions */ 11138 if (bnx2x_is_pcie_pending(sc)) { 11139 PMD_DRV_LOG(NOTICE, sc, "PCIE Transactions still pending"); 11140 } 11141 11142 /* Debug */ 11143 bnx2x_hw_enable_status(sc); 11144 11145 /* 11146 * Master enable - Due to WB DMAE writes performed before this 11147 * register is re-initialized as part of the regular function init 11148 */ 11149 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 11150 11151 return 0; 11152 } 11153 11154 static int bnx2x_init_hw_func(struct bnx2x_softc *sc) 11155 { 11156 int port = SC_PORT(sc); 11157 int func = SC_FUNC(sc); 11158 int init_phase = PHASE_PF0 + func; 11159 struct ecore_ilt *ilt = sc->ilt; 11160 uint16_t cdu_ilt_start; 11161 uint32_t addr, val; 11162 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 11163 int main_mem_width, rc; 11164 uint32_t i; 11165 11166 PMD_DRV_LOG(DEBUG, sc, "starting func init for func %d", func); 11167 11168 /* FLR cleanup */ 11169 if (!CHIP_IS_E1x(sc)) { 11170 rc = bnx2x_pf_flr_clnup(sc); 11171 if (rc) { 11172 PMD_DRV_LOG(NOTICE, sc, "FLR cleanup failed!"); 11173 return rc; 11174 } 11175 } 11176 11177 /* set MSI reconfigure capability */ 11178 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11179 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 11180 val = REG_RD(sc, addr); 11181 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 11182 REG_WR(sc, addr, val); 11183 } 11184 11185 ecore_init_block(sc, BLOCK_PXP, init_phase); 11186 ecore_init_block(sc, BLOCK_PXP2, init_phase); 11187 11188 ilt = sc->ilt; 11189 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 11190 11191 for (i = 0; i < L2_ILT_LINES(sc); i++) { 11192 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 11193 ilt->lines[cdu_ilt_start + i].page_mapping = 11194 (rte_iova_t)sc->context[i].vcxt_dma.paddr; 11195 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 11196 } 11197 ecore_ilt_init_op(sc, INITOP_SET); 11198 11199 REG_WR(sc, PRS_REG_NIC_MODE, 1); 11200 11201 if (!CHIP_IS_E1x(sc)) { 11202 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 11203 11204 /* Turn on a single ISR mode in IGU if driver is going to use 11205 * INT#x or MSI 11206 */ 11207 if ((sc->interrupt_mode != INTR_MODE_MSIX) 11208 || (sc->interrupt_mode != INTR_MODE_SINGLE_MSIX)) { 11209 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 11210 } 11211 11212 /* 11213 * Timers workaround bug: function init part. 11214 * Need to wait 20msec after initializing ILT, 11215 * needed to make sure there are no requests in 11216 * one of the PXP internal queues with "old" ILT addresses 11217 */ 11218 DELAY(20000); 11219 11220 /* 11221 * Master enable - Due to WB DMAE writes performed before this 11222 * register is re-initialized as part of the regular function 11223 * init 11224 */ 11225 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 11226 /* Enable the function in IGU */ 11227 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 11228 } 11229 11230 sc->dmae_ready = 1; 11231 11232 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 11233 11234 if (!CHIP_IS_E1x(sc)) 11235 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 11236 11237 ecore_init_block(sc, BLOCK_ATC, init_phase); 11238 ecore_init_block(sc, BLOCK_DMAE, init_phase); 11239 ecore_init_block(sc, BLOCK_NIG, init_phase); 11240 ecore_init_block(sc, BLOCK_SRC, init_phase); 11241 ecore_init_block(sc, BLOCK_MISC, init_phase); 11242 ecore_init_block(sc, BLOCK_TCM, init_phase); 11243 ecore_init_block(sc, BLOCK_UCM, init_phase); 11244 ecore_init_block(sc, BLOCK_CCM, init_phase); 11245 ecore_init_block(sc, BLOCK_XCM, init_phase); 11246 ecore_init_block(sc, BLOCK_TSEM, init_phase); 11247 ecore_init_block(sc, BLOCK_USEM, init_phase); 11248 ecore_init_block(sc, BLOCK_CSEM, init_phase); 11249 ecore_init_block(sc, BLOCK_XSEM, init_phase); 11250 11251 if (!CHIP_IS_E1x(sc)) 11252 REG_WR(sc, QM_REG_PF_EN, 1); 11253 11254 if (!CHIP_IS_E1x(sc)) { 11255 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11256 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11257 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11258 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11259 } 11260 ecore_init_block(sc, BLOCK_QM, init_phase); 11261 11262 ecore_init_block(sc, BLOCK_TM, init_phase); 11263 ecore_init_block(sc, BLOCK_DORQ, init_phase); 11264 11265 ecore_init_block(sc, BLOCK_BRB1, init_phase); 11266 ecore_init_block(sc, BLOCK_PRS, init_phase); 11267 ecore_init_block(sc, BLOCK_TSDM, init_phase); 11268 ecore_init_block(sc, BLOCK_CSDM, init_phase); 11269 ecore_init_block(sc, BLOCK_USDM, init_phase); 11270 ecore_init_block(sc, BLOCK_XSDM, init_phase); 11271 ecore_init_block(sc, BLOCK_UPB, init_phase); 11272 ecore_init_block(sc, BLOCK_XPB, init_phase); 11273 ecore_init_block(sc, BLOCK_PBF, init_phase); 11274 if (!CHIP_IS_E1x(sc)) 11275 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 11276 11277 ecore_init_block(sc, BLOCK_CDU, init_phase); 11278 11279 ecore_init_block(sc, BLOCK_CFC, init_phase); 11280 11281 if (!CHIP_IS_E1x(sc)) 11282 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 11283 11284 if (IS_MF(sc)) { 11285 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 11286 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, OVLAN(sc)); 11287 } 11288 11289 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 11290 11291 /* HC init per function */ 11292 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11293 if (CHIP_IS_E1H(sc)) { 11294 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 11295 11296 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); 11297 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); 11298 } 11299 ecore_init_block(sc, BLOCK_HC, init_phase); 11300 11301 } else { 11302 uint32_t num_segs, sb_idx, prod_offset; 11303 11304 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 11305 11306 if (!CHIP_IS_E1x(sc)) { 11307 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 11308 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 11309 } 11310 11311 ecore_init_block(sc, BLOCK_IGU, init_phase); 11312 11313 if (!CHIP_IS_E1x(sc)) { 11314 int dsb_idx = 0; 11315 /** 11316 * Producer memory: 11317 * E2 mode: address 0-135 match to the mapping memory; 11318 * 136 - PF0 default prod; 137 - PF1 default prod; 11319 * 138 - PF2 default prod; 139 - PF3 default prod; 11320 * 140 - PF0 attn prod; 141 - PF1 attn prod; 11321 * 142 - PF2 attn prod; 143 - PF3 attn prod; 11322 * 144-147 reserved. 11323 * 11324 * E1.5 mode - In backward compatible mode; 11325 * for non default SB; each even line in the memory 11326 * holds the U producer and each odd line hold 11327 * the C producer. The first 128 producers are for 11328 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 11329 * producers are for the DSB for each PF. 11330 * Each PF has five segments: (the order inside each 11331 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 11332 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 11333 * 144-147 attn prods; 11334 */ 11335 /* non-default-status-blocks */ 11336 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 11337 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 11338 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 11339 prod_offset = (sc->igu_base_sb + sb_idx) * 11340 num_segs; 11341 11342 for (i = 0; i < num_segs; i++) { 11343 addr = IGU_REG_PROD_CONS_MEMORY + 11344 (prod_offset + i) * 4; 11345 REG_WR(sc, addr, 0); 11346 } 11347 /* send consumer update with value 0 */ 11348 bnx2x_ack_sb(sc, sc->igu_base_sb + sb_idx, 11349 USTORM_ID, 0, IGU_INT_NOP, 1); 11350 bnx2x_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 11351 } 11352 11353 /* default-status-blocks */ 11354 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 11355 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 11356 11357 if (CHIP_IS_MODE_4_PORT(sc)) 11358 dsb_idx = SC_FUNC(sc); 11359 else 11360 dsb_idx = SC_VN(sc); 11361 11362 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 11363 IGU_BC_BASE_DSB_PROD + dsb_idx : 11364 IGU_NORM_BASE_DSB_PROD + dsb_idx); 11365 11366 /* 11367 * igu prods come in chunks of E1HVN_MAX (4) - 11368 * does not matters what is the current chip mode 11369 */ 11370 for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { 11371 addr = IGU_REG_PROD_CONS_MEMORY + 11372 (prod_offset + i) * 4; 11373 REG_WR(sc, addr, 0); 11374 } 11375 /* send consumer update with 0 */ 11376 if (CHIP_INT_MODE_IS_BC(sc)) { 11377 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11378 USTORM_ID, 0, IGU_INT_NOP, 1); 11379 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11380 CSTORM_ID, 0, IGU_INT_NOP, 1); 11381 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11382 XSTORM_ID, 0, IGU_INT_NOP, 1); 11383 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11384 TSTORM_ID, 0, IGU_INT_NOP, 1); 11385 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11386 ATTENTION_ID, 0, IGU_INT_NOP, 1); 11387 } else { 11388 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11389 USTORM_ID, 0, IGU_INT_NOP, 1); 11390 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11391 ATTENTION_ID, 0, IGU_INT_NOP, 1); 11392 } 11393 bnx2x_igu_clear_sb(sc, sc->igu_dsb_id); 11394 11395 /* !!! these should become driver const once 11396 rf-tool supports split-68 const */ 11397 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 11398 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 11399 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 11400 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 11401 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 11402 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 11403 } 11404 } 11405 11406 /* Reset PCIE errors for debug */ 11407 REG_WR(sc, 0x2114, 0xffffffff); 11408 REG_WR(sc, 0x2120, 0xffffffff); 11409 11410 if (CHIP_IS_E1x(sc)) { 11411 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords */ 11412 main_mem_base = HC_REG_MAIN_MEMORY + 11413 SC_PORT(sc) * (main_mem_size * 4); 11414 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 11415 main_mem_width = 8; 11416 11417 val = REG_RD(sc, main_mem_prty_clr); 11418 if (val) { 11419 PMD_DRV_LOG(DEBUG, sc, 11420 "Parity errors in HC block during function init (0x%x)!", 11421 val); 11422 } 11423 11424 /* Clear "false" parity errors in MSI-X table */ 11425 for (i = main_mem_base; 11426 i < main_mem_base + main_mem_size * 4; 11427 i += main_mem_width) { 11428 bnx2x_read_dmae(sc, i, main_mem_width / 4); 11429 bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), 11430 i, main_mem_width / 4); 11431 } 11432 /* Clear HC parity attention */ 11433 REG_RD(sc, main_mem_prty_clr); 11434 } 11435 11436 /* Enable STORMs SP logging */ 11437 REG_WR8(sc, BAR_USTRORM_INTMEM + 11438 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11439 REG_WR8(sc, BAR_TSTRORM_INTMEM + 11440 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11441 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11442 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11443 REG_WR8(sc, BAR_XSTRORM_INTMEM + 11444 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11445 11446 elink_phy_probe(&sc->link_params); 11447 11448 return 0; 11449 } 11450 11451 static void bnx2x_link_reset(struct bnx2x_softc *sc) 11452 { 11453 if (!BNX2X_NOMCP(sc)) { 11454 bnx2x_acquire_phy_lock(sc); 11455 elink_lfa_reset(&sc->link_params, &sc->link_vars); 11456 bnx2x_release_phy_lock(sc); 11457 } else { 11458 if (!CHIP_REV_IS_SLOW(sc)) { 11459 PMD_DRV_LOG(WARNING, sc, 11460 "Bootcode is missing - cannot reset link"); 11461 } 11462 } 11463 } 11464 11465 static void bnx2x_reset_port(struct bnx2x_softc *sc) 11466 { 11467 int port = SC_PORT(sc); 11468 uint32_t val; 11469 11470 /* reset physical Link */ 11471 bnx2x_link_reset(sc); 11472 11473 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); 11474 11475 /* Do not rcv packets to BRB */ 11476 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0); 11477 /* Do not direct rcv packets that are not for MCP to the BRB */ 11478 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 11479 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 11480 11481 /* Configure AEU */ 11482 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0); 11483 11484 DELAY(100000); 11485 11486 /* Check for BRB port occupancy */ 11487 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4); 11488 if (val) { 11489 PMD_DRV_LOG(DEBUG, sc, 11490 "BRB1 is not empty, %d blocks are occupied", val); 11491 } 11492 } 11493 11494 static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr) 11495 { 11496 int reg; 11497 uint32_t wb_write[2]; 11498 11499 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8; 11500 11501 wb_write[0] = ONCHIP_ADDR1(addr); 11502 wb_write[1] = ONCHIP_ADDR2(addr); 11503 REG_WR_DMAE(sc, reg, wb_write, 2); 11504 } 11505 11506 static void bnx2x_clear_func_ilt(struct bnx2x_softc *sc, uint32_t func) 11507 { 11508 uint32_t i, base = FUNC_ILT_BASE(func); 11509 for (i = base; i < base + ILT_PER_FUNC; i++) { 11510 bnx2x_ilt_wr(sc, i, 0); 11511 } 11512 } 11513 11514 static void bnx2x_reset_func(struct bnx2x_softc *sc) 11515 { 11516 struct bnx2x_fastpath *fp; 11517 int port = SC_PORT(sc); 11518 int func = SC_FUNC(sc); 11519 int i; 11520 11521 /* Disable the function in the FW */ 11522 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 11523 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 11524 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 11525 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 11526 11527 /* FP SBs */ 11528 FOR_EACH_ETH_QUEUE(sc, i) { 11529 fp = &sc->fp[i]; 11530 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11531 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 11532 SB_DISABLED); 11533 } 11534 11535 /* SP SB */ 11536 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11537 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); 11538 11539 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 11540 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 11541 0); 11542 } 11543 11544 /* Configure IGU */ 11545 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11546 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); 11547 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); 11548 } else { 11549 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 11550 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 11551 } 11552 11553 if (CNIC_LOADED(sc)) { 11554 /* Disable Timer scan */ 11555 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port * 4, 0); 11556 /* 11557 * Wait for at least 10ms and up to 2 second for the timers 11558 * scan to complete 11559 */ 11560 for (i = 0; i < 200; i++) { 11561 DELAY(10000); 11562 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port * 4)) 11563 break; 11564 } 11565 } 11566 11567 /* Clear ILT */ 11568 bnx2x_clear_func_ilt(sc, func); 11569 11570 /* 11571 * Timers workaround bug for E2: if this is vnic-3, 11572 * we need to set the entire ilt range for this timers. 11573 */ 11574 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 11575 struct ilt_client_info ilt_cli; 11576 /* use dummy TM client */ 11577 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 11578 ilt_cli.start = 0; 11579 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 11580 ilt_cli.client_num = ILT_CLIENT_TM; 11581 11582 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0); 11583 } 11584 11585 /* this assumes that reset_port() called before reset_func() */ 11586 if (!CHIP_IS_E1x(sc)) { 11587 bnx2x_pf_disable(sc); 11588 } 11589 11590 sc->dmae_ready = 0; 11591 } 11592 11593 static void bnx2x_release_firmware(struct bnx2x_softc *sc) 11594 { 11595 rte_free(sc->init_ops); 11596 rte_free(sc->init_ops_offsets); 11597 rte_free(sc->init_data); 11598 rte_free(sc->iro_array); 11599 } 11600 11601 static int bnx2x_init_firmware(struct bnx2x_softc *sc) 11602 { 11603 uint32_t len, i; 11604 uint8_t *p = sc->firmware; 11605 uint32_t off[24]; 11606 11607 for (i = 0; i < 24; ++i) 11608 off[i] = rte_be_to_cpu_32(*((uint32_t *) sc->firmware + i)); 11609 11610 len = off[0]; 11611 sc->init_ops = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11612 if (!sc->init_ops) 11613 goto alloc_failed; 11614 bnx2x_data_to_init_ops(p + off[1], sc->init_ops, len); 11615 11616 len = off[2]; 11617 sc->init_ops_offsets = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11618 if (!sc->init_ops_offsets) 11619 goto alloc_failed; 11620 bnx2x_data_to_init_offsets(p + off[3], sc->init_ops_offsets, len); 11621 11622 len = off[4]; 11623 sc->init_data = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11624 if (!sc->init_data) 11625 goto alloc_failed; 11626 bnx2x_data_to_init_data(p + off[5], sc->init_data, len); 11627 11628 sc->tsem_int_table_data = p + off[7]; 11629 sc->tsem_pram_data = p + off[9]; 11630 sc->usem_int_table_data = p + off[11]; 11631 sc->usem_pram_data = p + off[13]; 11632 sc->csem_int_table_data = p + off[15]; 11633 sc->csem_pram_data = p + off[17]; 11634 sc->xsem_int_table_data = p + off[19]; 11635 sc->xsem_pram_data = p + off[21]; 11636 11637 len = off[22]; 11638 sc->iro_array = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11639 if (!sc->iro_array) 11640 goto alloc_failed; 11641 bnx2x_data_to_iro_array(p + off[23], sc->iro_array, len); 11642 11643 return 0; 11644 11645 alloc_failed: 11646 bnx2x_release_firmware(sc); 11647 return -1; 11648 } 11649 11650 static int cut_gzip_prefix(const uint8_t * zbuf, int len) 11651 { 11652 #define MIN_PREFIX_SIZE (10) 11653 11654 int n = MIN_PREFIX_SIZE; 11655 uint16_t xlen; 11656 11657 if (!(zbuf[0] == 0x1f && zbuf[1] == 0x8b && zbuf[2] == Z_DEFLATED) || 11658 len <= MIN_PREFIX_SIZE) { 11659 return -1; 11660 } 11661 11662 /* optional extra fields are present */ 11663 if (zbuf[3] & 0x4) { 11664 xlen = zbuf[13]; 11665 xlen <<= 8; 11666 xlen += zbuf[12]; 11667 11668 n += xlen; 11669 } 11670 /* file name is present */ 11671 if (zbuf[3] & 0x8) { 11672 while ((zbuf[n++] != 0) && (n < len)) ; 11673 } 11674 11675 return n; 11676 } 11677 11678 static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len) 11679 { 11680 int ret; 11681 int data_begin = cut_gzip_prefix(zbuf, len); 11682 11683 PMD_DRV_LOG(DEBUG, sc, "ecore_gunzip %d", len); 11684 11685 if (data_begin <= 0) { 11686 PMD_DRV_LOG(NOTICE, sc, "bad gzip prefix"); 11687 return -1; 11688 } 11689 11690 memset(&zlib_stream, 0, sizeof(zlib_stream)); 11691 zlib_stream.next_in = zbuf + data_begin; 11692 zlib_stream.avail_in = len - data_begin; 11693 zlib_stream.next_out = sc->gz_buf; 11694 zlib_stream.avail_out = FW_BUF_SIZE; 11695 11696 ret = inflateInit2(&zlib_stream, -MAX_WBITS); 11697 if (ret != Z_OK) { 11698 PMD_DRV_LOG(NOTICE, sc, "zlib inflateInit2 error"); 11699 return ret; 11700 } 11701 11702 ret = inflate(&zlib_stream, Z_FINISH); 11703 if ((ret != Z_STREAM_END) && (ret != Z_OK)) { 11704 PMD_DRV_LOG(NOTICE, sc, "zlib inflate error: %d %s", ret, 11705 zlib_stream.msg); 11706 } 11707 11708 sc->gz_outlen = zlib_stream.total_out; 11709 if (sc->gz_outlen & 0x3) { 11710 PMD_DRV_LOG(NOTICE, sc, "firmware is not aligned. gz_outlen == %d", 11711 sc->gz_outlen); 11712 } 11713 sc->gz_outlen >>= 2; 11714 11715 inflateEnd(&zlib_stream); 11716 11717 if (ret == Z_STREAM_END) 11718 return 0; 11719 11720 return ret; 11721 } 11722 11723 static void 11724 ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, 11725 uint32_t addr, uint32_t len) 11726 { 11727 bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len); 11728 } 11729 11730 void 11731 ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, size_t size, 11732 uint32_t * data) 11733 { 11734 uint8_t i; 11735 for (i = 0; i < size / 4; i++) { 11736 REG_WR(sc, addr + (i * 4), data[i]); 11737 } 11738 } 11739 11740 static const char *get_ext_phy_type(uint32_t ext_phy_type) 11741 { 11742 uint32_t phy_type_idx = ext_phy_type >> 8; 11743 static const char *types[] = 11744 { "DIRECT", "BNX2X-8071", "BNX2X-8072", "BNX2X-8073", 11745 "BNX2X-8705", "BNX2X-8706", "BNX2X-8726", "BNX2X-8481", "SFX-7101", 11746 "BNX2X-8727", 11747 "BNX2X-8727-NOC", "BNX2X-84823", "NOT_CONN", "FAILURE" 11748 }; 11749 11750 if (phy_type_idx < 12) 11751 return types[phy_type_idx]; 11752 else if (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN == ext_phy_type) 11753 return types[12]; 11754 else 11755 return types[13]; 11756 } 11757 11758 static const char *get_state(uint32_t state) 11759 { 11760 uint32_t state_idx = state >> 12; 11761 static const char *states[] = { "CLOSED", "OPENING_WAIT4_LOAD", 11762 "OPENING_WAIT4_PORT", "OPEN", "CLOSING_WAIT4_HALT", 11763 "CLOSING_WAIT4_DELETE", "CLOSING_WAIT4_UNLOAD", 11764 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", 11765 "UNKNOWN", "DISABLED", "DIAG", "ERROR", "UNDEFINED" 11766 }; 11767 11768 if (state_idx <= 0xF) 11769 return states[state_idx]; 11770 else 11771 return states[0x10]; 11772 } 11773 11774 static const char *get_recovery_state(uint32_t state) 11775 { 11776 static const char *states[] = { "NONE", "DONE", "INIT", 11777 "WAIT", "FAILED", "NIC_LOADING" 11778 }; 11779 return states[state]; 11780 } 11781 11782 static const char *get_rx_mode(uint32_t mode) 11783 { 11784 static const char *modes[] = { "NONE", "NORMAL", "ALLMULTI", 11785 "PROMISC", "MAX_MULTICAST", "ERROR" 11786 }; 11787 11788 if (mode < 0x4) 11789 return modes[mode]; 11790 else if (BNX2X_MAX_MULTICAST == mode) 11791 return modes[4]; 11792 else 11793 return modes[5]; 11794 } 11795 11796 #define BNX2X_INFO_STR_MAX 256 11797 static const char *get_bnx2x_flags(uint32_t flags) 11798 { 11799 int i; 11800 static const char *flag[] = { "ONE_PORT ", "NO_ISCSI ", 11801 "NO_FCOE ", "NO_WOL ", "USING_DAC ", "USING_MSIX ", 11802 "USING_MSI ", "DISABLE_MSI ", "UNKNOWN ", "NO_MCP ", 11803 "SAFC_TX_FLAG ", "MF_FUNC_DIS ", "TX_SWITCHING " 11804 }; 11805 static char flag_str[BNX2X_INFO_STR_MAX]; 11806 memset(flag_str, 0, BNX2X_INFO_STR_MAX); 11807 11808 for (i = 0; i < 5; i++) 11809 if (flags & (1 << i)) { 11810 strlcat(flag_str, flag[i], sizeof(flag_str)); 11811 flags ^= (1 << i); 11812 } 11813 if (flags) { 11814 static char unknown[BNX2X_INFO_STR_MAX]; 11815 snprintf(unknown, 32, "Unknown flag mask %x", flags); 11816 strlcat(flag_str, unknown, sizeof(flag_str)); 11817 } 11818 return flag_str; 11819 } 11820 11821 /* Prints useful adapter info. */ 11822 void bnx2x_print_adapter_info(struct bnx2x_softc *sc) 11823 { 11824 int i = 0; 11825 11826 PMD_DRV_LOG(INFO, sc, "========================================"); 11827 /* DPDK and Driver versions */ 11828 PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK", 11829 rte_version()); 11830 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver", 11831 bnx2x_pmd_version()); 11832 /* Firmware versions. */ 11833 PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d", 11834 "Firmware", 11835 BNX2X_5710_FW_MAJOR_VERSION, 11836 BNX2X_5710_FW_MINOR_VERSION, 11837 BNX2X_5710_FW_REVISION_VERSION); 11838 PMD_DRV_LOG(INFO, sc, "%12s : %s", 11839 "Bootcode", sc->devinfo.bc_ver_str); 11840 /* Hardware chip info. */ 11841 PMD_DRV_LOG(INFO, sc, "%12s : %#08x", "ASIC", sc->devinfo.chip_id); 11842 PMD_DRV_LOG(INFO, sc, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A', 11843 (CHIP_METAL(sc) >> 4)); 11844 /* Bus PCIe info. */ 11845 PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Vendor Id", 11846 sc->devinfo.vendor_id); 11847 PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Device Id", 11848 sc->devinfo.device_id); 11849 PMD_DRV_LOG(INFO, sc, "%12s : width x%d, ", "Bus PCIe", 11850 sc->devinfo.pcie_link_width); 11851 switch (sc->devinfo.pcie_link_speed) { 11852 case 1: 11853 PMD_DRV_LOG(INFO, sc, "%23s", "2.5 Gbps"); 11854 break; 11855 case 2: 11856 PMD_DRV_LOG(INFO, sc, "%21s", "5 Gbps"); 11857 break; 11858 case 4: 11859 PMD_DRV_LOG(INFO, sc, "%21s", "8 Gbps"); 11860 break; 11861 default: 11862 PMD_DRV_LOG(INFO, sc, "%33s", "Unknown link speed"); 11863 } 11864 /* Device features. */ 11865 PMD_DRV_LOG(INFO, sc, "%12s : ", "Flags"); 11866 /* Miscellaneous flags. */ 11867 if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) { 11868 PMD_DRV_LOG(INFO, sc, "%18s", "MSI"); 11869 i++; 11870 } 11871 if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) { 11872 if (i > 0) 11873 PMD_DRV_LOG(INFO, sc, "|"); 11874 PMD_DRV_LOG(INFO, sc, "%20s", "MSI-X"); 11875 i++; 11876 } 11877 PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO")); 11878 PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO")); 11879 PMD_DRV_LOG(INFO, sc, "========================================"); 11880 } 11881 11882 /* Prints useful device info. */ 11883 void bnx2x_print_device_info(struct bnx2x_softc *sc) 11884 { 11885 __rte_unused uint32_t ext_phy_type; 11886 uint32_t offset, reg_val; 11887 11888 PMD_INIT_FUNC_TRACE(sc); 11889 offset = offsetof(struct shmem_region, 11890 dev_info.port_hw_config[0].external_phy_config); 11891 reg_val = REG_RD(sc, sc->devinfo.shmem_base + offset); 11892 if (sc->link_vars.phy_flags & PHY_XGXS_FLAG) 11893 ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(reg_val); 11894 else 11895 ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(reg_val); 11896 11897 /* Device features. */ 11898 PMD_DRV_LOG(INFO, sc, "%12s : %u", "Bnx2x Func", sc->pcie_func); 11899 PMD_DRV_LOG(INFO, sc, 11900 "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags)); 11901 PMD_DRV_LOG(INFO, sc, "%12s : %s", "DMAE Is", 11902 (sc->dmae_ready ? "Ready" : "Not Ready")); 11903 PMD_DRV_LOG(INFO, sc, "%12s : %u", "MTU", sc->mtu); 11904 PMD_DRV_LOG(INFO, sc, 11905 "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type)); 11906 PMD_DRV_LOG(INFO, sc, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr", 11907 sc->link_params.mac_addr[0], 11908 sc->link_params.mac_addr[1], 11909 sc->link_params.mac_addr[2], 11910 sc->link_params.mac_addr[3], 11911 sc->link_params.mac_addr[4], 11912 sc->link_params.mac_addr[5]); 11913 PMD_DRV_LOG(INFO, sc, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode)); 11914 PMD_DRV_LOG(INFO, sc, "%12s : %s", "State", get_state(sc->state)); 11915 if (sc->recovery_state) 11916 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Recovery", 11917 get_recovery_state(sc->recovery_state)); 11918 /* Queue info. */ 11919 if (IS_PF(sc)) { 11920 switch (sc->sp->rss_rdata.rss_mode) { 11921 case ETH_RSS_MODE_DISABLED: 11922 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - None"); 11923 break; 11924 case ETH_RSS_MODE_REGULAR: 11925 PMD_DRV_LOG(INFO, sc, "%12s : %s,", "Queues", "RSS mode - Regular"); 11926 PMD_DRV_LOG(INFO, sc, "%16d", sc->num_queues); 11927 break; 11928 default: 11929 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - Unknown"); 11930 break; 11931 } 11932 } 11933 PMD_DRV_LOG(INFO, sc, "%12s : CQ = %lx, EQ = %lx", "SPQ Left", 11934 sc->cq_spq_left, sc->eq_spq_left); 11935 11936 PMD_DRV_LOG(INFO, sc, 11937 "%12s : %x", "Switch", sc->link_params.switch_cfg); 11938 PMD_DRV_LOG(INFO, sc, "pcie_bus=%d, pcie_device=%d", 11939 sc->pcie_bus, sc->pcie_device); 11940 PMD_DRV_LOG(INFO, sc, "bar0.addr=%p, bar1.addr=%p", 11941 sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr); 11942 PMD_DRV_LOG(INFO, sc, "port=%d, path=%d, vnic=%d, func=%d", 11943 PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc)); 11944 } 11945