1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2007-2013 Broadcom Corporation. 3 * 4 * Eric Davis <edavis@broadcom.com> 5 * David Christensen <davidch@broadcom.com> 6 * Gary Zambrano <zambrano@broadcom.com> 7 * 8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. 9 * Copyright (c) 2015-2018 Cavium Inc. 10 * All rights reserved. 11 * www.cavium.com 12 */ 13 14 #define BNX2X_DRIVER_VERSION "1.78.18" 15 16 #include "bnx2x.h" 17 #include "bnx2x_vfpf.h" 18 #include "ecore_sp.h" 19 #include "ecore_init.h" 20 #include "ecore_init_ops.h" 21 22 #include "rte_version.h" 23 24 #include <sys/types.h> 25 #include <sys/stat.h> 26 #include <fcntl.h> 27 #include <zlib.h> 28 #include <rte_bitops.h> 29 #include <rte_string_fns.h> 30 31 #define BNX2X_PMD_VER_PREFIX "BNX2X PMD" 32 #define BNX2X_PMD_VERSION_MAJOR 1 33 #define BNX2X_PMD_VERSION_MINOR 1 34 #define BNX2X_PMD_VERSION_REVISION 0 35 #define BNX2X_PMD_VERSION_PATCH 1 36 37 static inline const char * 38 bnx2x_pmd_version(void) 39 { 40 static char version[32]; 41 42 snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d", 43 BNX2X_PMD_VER_PREFIX, 44 BNX2X_DRIVER_VERSION, 45 BNX2X_PMD_VERSION_MAJOR, 46 BNX2X_PMD_VERSION_MINOR, 47 BNX2X_PMD_VERSION_REVISION, 48 BNX2X_PMD_VERSION_PATCH); 49 50 return version; 51 } 52 53 static z_stream zlib_stream; 54 55 #define EVL_VLID_MASK 0x0FFF 56 57 #define BNX2X_DEF_SB_ATT_IDX 0x0001 58 #define BNX2X_DEF_SB_IDX 0x0002 59 60 /* 61 * FLR Support - bnx2x_pf_flr_clnup() is called during nic_load in the per 62 * function HW initialization. 63 */ 64 #define FLR_WAIT_USEC 10000 /* 10 msecs */ 65 #define FLR_WAIT_INTERVAL 50 /* usecs */ 66 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 67 68 struct pbf_pN_buf_regs { 69 int pN; 70 uint32_t init_crd; 71 uint32_t crd; 72 uint32_t crd_freed; 73 }; 74 75 struct pbf_pN_cmd_regs { 76 int pN; 77 uint32_t lines_occup; 78 uint32_t lines_freed; 79 }; 80 81 /* resources needed for unloading a previously loaded device */ 82 83 #define BNX2X_PREV_WAIT_NEEDED 1 84 rte_spinlock_t bnx2x_prev_mtx; 85 struct bnx2x_prev_list_node { 86 LIST_ENTRY(bnx2x_prev_list_node) node; 87 uint8_t bus; 88 uint8_t slot; 89 uint8_t path; 90 uint8_t aer; 91 uint8_t undi; 92 }; 93 94 static LIST_HEAD(, bnx2x_prev_list_node) bnx2x_prev_list 95 = LIST_HEAD_INITIALIZER(bnx2x_prev_list); 96 97 static int load_count[2][3] = { { 0 } }; 98 /* per-path: 0-common, 1-port0, 2-port1 */ 99 100 static void bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, 101 uint8_t cmng_type); 102 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc); 103 static void storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, 104 uint8_t port); 105 static void bnx2x_set_reset_global(struct bnx2x_softc *sc); 106 static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc); 107 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine); 108 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc); 109 static uint8_t bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, 110 uint8_t print); 111 static void bnx2x_int_disable(struct bnx2x_softc *sc); 112 static int bnx2x_release_leader_lock(struct bnx2x_softc *sc); 113 static void bnx2x_pf_disable(struct bnx2x_softc *sc); 114 static void bnx2x_update_rx_prod(struct bnx2x_softc *sc, 115 struct bnx2x_fastpath *fp, 116 uint16_t rx_bd_prod, uint16_t rx_cq_prod); 117 static void bnx2x_link_report_locked(struct bnx2x_softc *sc); 118 static void bnx2x_link_report(struct bnx2x_softc *sc); 119 void bnx2x_link_status_update(struct bnx2x_softc *sc); 120 static int bnx2x_alloc_mem(struct bnx2x_softc *sc); 121 static void bnx2x_free_mem(struct bnx2x_softc *sc); 122 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc); 123 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc); 124 static __rte_noinline 125 int bnx2x_nic_load(struct bnx2x_softc *sc); 126 127 static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); 128 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp); 129 static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, 130 uint8_t storm, uint16_t index, uint8_t op, 131 uint8_t update); 132 133 int bnx2x_cmpxchg(volatile int *addr, int old, int new) 134 { 135 return __sync_val_compare_and_swap(addr, old, new); 136 } 137 138 int 139 bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, 140 const char *msg, uint32_t align) 141 { 142 char mz_name[RTE_MEMZONE_NAMESIZE]; 143 const struct rte_memzone *z; 144 145 dma->sc = sc; 146 if (IS_PF(sc)) 147 snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, 148 rte_get_timer_cycles()); 149 else 150 snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, 151 rte_get_timer_cycles()); 152 153 /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ 154 z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size, 155 SOCKET_ID_ANY, 156 RTE_MEMZONE_IOVA_CONTIG, align); 157 if (z == NULL) { 158 PMD_DRV_LOG(ERR, sc, "DMA alloc failed for %s", msg); 159 return -ENOMEM; 160 } 161 dma->paddr = (uint64_t) z->iova; 162 dma->vaddr = z->addr; 163 dma->mzone = (const void *)z; 164 165 PMD_DRV_LOG(DEBUG, sc, 166 "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr); 167 168 return 0; 169 } 170 171 void bnx2x_dma_free(struct bnx2x_dma *dma) 172 { 173 if (dma->mzone == NULL) 174 return; 175 176 rte_memzone_free((const struct rte_memzone *)dma->mzone); 177 dma->sc = NULL; 178 dma->paddr = 0; 179 dma->vaddr = NULL; 180 dma->nseg = 0; 181 dma->mzone = NULL; 182 } 183 184 static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 185 { 186 uint32_t lock_status; 187 uint32_t resource_bit = (1 << resource); 188 int func = SC_FUNC(sc); 189 uint32_t hw_lock_control_reg; 190 int cnt; 191 192 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC 193 if (resource) 194 PMD_INIT_FUNC_TRACE(sc); 195 #else 196 PMD_INIT_FUNC_TRACE(sc); 197 #endif 198 199 /* validate the resource is within range */ 200 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 201 PMD_DRV_LOG(NOTICE, sc, 202 "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE", 203 resource); 204 return -1; 205 } 206 207 if (func <= 5) { 208 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 209 } else { 210 hw_lock_control_reg = 211 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 212 } 213 214 /* validate the resource is not already taken */ 215 lock_status = REG_RD(sc, hw_lock_control_reg); 216 if (lock_status & resource_bit) { 217 PMD_DRV_LOG(NOTICE, sc, 218 "resource in use (status 0x%x bit 0x%x)", 219 lock_status, resource_bit); 220 return -1; 221 } 222 223 /* try every 5ms for 5 seconds */ 224 for (cnt = 0; cnt < 1000; cnt++) { 225 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 226 lock_status = REG_RD(sc, hw_lock_control_reg); 227 if (lock_status & resource_bit) { 228 return 0; 229 } 230 DELAY(5000); 231 } 232 233 PMD_DRV_LOG(NOTICE, sc, "Resource 0x%x resource_bit 0x%x lock timeout!", 234 resource, resource_bit); 235 return -1; 236 } 237 238 static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 239 { 240 uint32_t lock_status; 241 uint32_t resource_bit = (1 << resource); 242 int func = SC_FUNC(sc); 243 uint32_t hw_lock_control_reg; 244 245 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC 246 if (resource) 247 PMD_INIT_FUNC_TRACE(sc); 248 #else 249 PMD_INIT_FUNC_TRACE(sc); 250 #endif 251 252 /* validate the resource is within range */ 253 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 254 PMD_DRV_LOG(NOTICE, sc, 255 "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 256 " resource_bit 0x%x", resource, resource_bit); 257 return -1; 258 } 259 260 if (func <= 5) { 261 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 262 } else { 263 hw_lock_control_reg = 264 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 265 } 266 267 /* validate the resource is currently taken */ 268 lock_status = REG_RD(sc, hw_lock_control_reg); 269 if (!(lock_status & resource_bit)) { 270 PMD_DRV_LOG(NOTICE, sc, 271 "resource not in use (status 0x%x bit 0x%x)", 272 lock_status, resource_bit); 273 return -1; 274 } 275 276 REG_WR(sc, hw_lock_control_reg, resource_bit); 277 return 0; 278 } 279 280 static void bnx2x_acquire_phy_lock(struct bnx2x_softc *sc) 281 { 282 BNX2X_PHY_LOCK(sc); 283 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); 284 } 285 286 static void bnx2x_release_phy_lock(struct bnx2x_softc *sc) 287 { 288 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); 289 BNX2X_PHY_UNLOCK(sc); 290 } 291 292 /* copy command into DMAE command memory and set DMAE command Go */ 293 void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx) 294 { 295 uint32_t cmd_offset; 296 uint32_t i; 297 298 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 299 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 300 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *) dmae) + i)); 301 } 302 303 REG_WR(sc, dmae_reg_go_c[idx], 1); 304 } 305 306 uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) 307 { 308 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 309 DMAE_COMMAND_C_TYPE_ENABLE); 310 } 311 312 uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode) 313 { 314 return opcode & ~DMAE_COMMAND_SRC_RESET; 315 } 316 317 uint32_t 318 bnx2x_dmae_opcode(struct bnx2x_softc * sc, uint8_t src_type, uint8_t dst_type, 319 uint8_t with_comp, uint8_t comp_type) 320 { 321 uint32_t opcode = 0; 322 323 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 324 (dst_type << DMAE_COMMAND_DST_SHIFT)); 325 326 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 327 328 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 329 330 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 331 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 332 333 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 334 335 #ifdef __BIG_ENDIAN 336 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 337 #else 338 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 339 #endif 340 341 if (with_comp) { 342 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 343 } 344 345 return opcode; 346 } 347 348 static void 349 bnx2x_prep_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae, 350 uint8_t src_type, uint8_t dst_type) 351 { 352 memset(dmae, 0, sizeof(struct dmae_command)); 353 354 /* set the opcode */ 355 dmae->opcode = bnx2x_dmae_opcode(sc, src_type, dst_type, 356 TRUE, DMAE_COMP_PCI); 357 358 /* fill in the completion parameters */ 359 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_comp)); 360 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_comp)); 361 dmae->comp_val = DMAE_COMP_VAL; 362 } 363 364 /* issue a DMAE command over the init channel and wait for completion */ 365 static int 366 bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae) 367 { 368 uint32_t *wb_comp = BNX2X_SP(sc, wb_comp); 369 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 370 371 /* reset completion */ 372 *wb_comp = 0; 373 374 /* post the command on the channel used for initializations */ 375 bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 376 377 /* wait for completion */ 378 DELAY(500); 379 380 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 381 if (!timeout || 382 (sc->recovery_state != BNX2X_RECOVERY_DONE && 383 sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 384 PMD_DRV_LOG(INFO, sc, "DMAE timeout!"); 385 return DMAE_TIMEOUT; 386 } 387 388 timeout--; 389 DELAY(50); 390 } 391 392 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 393 PMD_DRV_LOG(INFO, sc, "DMAE PCI error!"); 394 return DMAE_PCI_ERROR; 395 } 396 397 return 0; 398 } 399 400 void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32) 401 { 402 struct dmae_command dmae; 403 uint32_t *data; 404 uint32_t i; 405 int rc; 406 407 if (!sc->dmae_ready) { 408 data = BNX2X_SP(sc, wb_data[0]); 409 410 for (i = 0; i < len32; i++) { 411 data[i] = REG_RD(sc, (src_addr + (i * 4))); 412 } 413 414 return; 415 } 416 417 /* set opcode and fixed command fields */ 418 bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 419 420 /* fill in addresses and len */ 421 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 422 dmae.src_addr_hi = 0; 423 dmae.dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_data)); 424 dmae.dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_data)); 425 dmae.len = len32; 426 427 /* issue the command and wait for completion */ 428 if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { 429 rte_panic("DMAE failed (%d)", rc); 430 }; 431 } 432 433 void 434 bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr, 435 uint32_t len32) 436 { 437 struct dmae_command dmae; 438 int rc; 439 440 if (!sc->dmae_ready) { 441 ecore_init_str_wr(sc, dst_addr, BNX2X_SP(sc, wb_data[0]), len32); 442 return; 443 } 444 445 /* set opcode and fixed command fields */ 446 bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 447 448 /* fill in addresses and len */ 449 dmae.src_addr_lo = U64_LO(dma_addr); 450 dmae.src_addr_hi = U64_HI(dma_addr); 451 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 452 dmae.dst_addr_hi = 0; 453 dmae.len = len32; 454 455 /* issue the command and wait for completion */ 456 if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { 457 rte_panic("DMAE failed (%d)", rc); 458 } 459 } 460 461 static void 462 bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, 463 uint32_t addr, uint32_t len) 464 { 465 uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 466 uint32_t offset = 0; 467 468 while (len > dmae_wr_max) { 469 bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ 470 (addr + offset), /* dst GRC address */ 471 dmae_wr_max); 472 offset += (dmae_wr_max * 4); 473 len -= dmae_wr_max; 474 } 475 476 bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ 477 (addr + offset), /* dst GRC address */ 478 len); 479 } 480 481 void 482 bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt, 483 uint32_t cid) 484 { 485 /* ustorm cxt validation */ 486 cxt->ustorm_ag_context.cdu_usage = 487 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 488 CDU_REGION_NUMBER_UCM_AG, 489 ETH_CONNECTION_TYPE); 490 /* xcontext validation */ 491 cxt->xstorm_ag_context.cdu_reserved = 492 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 493 CDU_REGION_NUMBER_XCM_AG, 494 ETH_CONNECTION_TYPE); 495 } 496 497 static void 498 bnx2x_storm_memset_hc_timeout(struct bnx2x_softc *sc, uint8_t fw_sb_id, 499 uint8_t sb_index, uint8_t ticks) 500 { 501 uint32_t addr = 502 (BAR_CSTRORM_INTMEM + 503 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 504 505 REG_WR8(sc, addr, ticks); 506 } 507 508 static void 509 bnx2x_storm_memset_hc_disable(struct bnx2x_softc *sc, uint16_t fw_sb_id, 510 uint8_t sb_index, uint8_t disable) 511 { 512 uint32_t enable_flag = 513 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 514 uint32_t addr = 515 (BAR_CSTRORM_INTMEM + 516 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 517 uint8_t flags; 518 519 /* clear and set */ 520 flags = REG_RD8(sc, addr); 521 flags &= ~HC_INDEX_DATA_HC_ENABLED; 522 flags |= enable_flag; 523 REG_WR8(sc, addr, flags); 524 } 525 526 void 527 bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id, 528 uint8_t sb_index, uint8_t disable, uint16_t usec) 529 { 530 uint8_t ticks = (usec / 4); 531 532 bnx2x_storm_memset_hc_timeout(sc, fw_sb_id, sb_index, ticks); 533 534 disable = (disable) ? 1 : ((usec) ? 0 : 1); 535 bnx2x_storm_memset_hc_disable(sc, fw_sb_id, sb_index, disable); 536 } 537 538 uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr) 539 { 540 return REG_RD(sc, reg_addr); 541 } 542 543 void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val) 544 { 545 REG_WR(sc, reg_addr, val); 546 } 547 548 void 549 elink_cb_event_log(__rte_unused struct bnx2x_softc *sc, 550 __rte_unused const elink_log_id_t elink_log_id, ...) 551 { 552 PMD_DRV_LOG(DEBUG, sc, "ELINK EVENT LOG (%d)", elink_log_id); 553 } 554 555 static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode) 556 { 557 uint32_t spio_reg; 558 559 /* Only 2 SPIOs are configurable */ 560 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 561 PMD_DRV_LOG(NOTICE, sc, "Invalid SPIO 0x%x", spio); 562 return -1; 563 } 564 565 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 566 567 /* read SPIO and mask except the float bits */ 568 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 569 570 switch (mode) { 571 case MISC_SPIO_OUTPUT_LOW: 572 /* clear FLOAT and set CLR */ 573 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 574 spio_reg |= (spio << MISC_SPIO_CLR_POS); 575 break; 576 577 case MISC_SPIO_OUTPUT_HIGH: 578 /* clear FLOAT and set SET */ 579 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 580 spio_reg |= (spio << MISC_SPIO_SET_POS); 581 break; 582 583 case MISC_SPIO_INPUT_HI_Z: 584 /* set FLOAT */ 585 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 586 break; 587 588 default: 589 break; 590 } 591 592 REG_WR(sc, MISC_REG_SPIO, spio_reg); 593 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 594 595 return 0; 596 } 597 598 static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port) 599 { 600 /* The GPIO should be swapped if swap register is set and active */ 601 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 602 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 603 int gpio_shift = gpio_num; 604 if (gpio_port) 605 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 606 607 uint32_t gpio_mask = (1 << gpio_shift); 608 uint32_t gpio_reg; 609 610 if (gpio_num > MISC_REGISTERS_GPIO_3) { 611 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 612 return -1; 613 } 614 615 /* read GPIO value */ 616 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 617 618 /* get the requested pin value */ 619 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 620 } 621 622 static int 623 bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t port) 624 { 625 /* The GPIO should be swapped if swap register is set and active */ 626 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 627 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 628 int gpio_shift = gpio_num; 629 if (gpio_port) 630 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 631 632 uint32_t gpio_mask = (1 << gpio_shift); 633 uint32_t gpio_reg; 634 635 if (gpio_num > MISC_REGISTERS_GPIO_3) { 636 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 637 return -1; 638 } 639 640 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 641 642 /* read GPIO and mask except the float bits */ 643 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 644 645 switch (mode) { 646 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 647 /* clear FLOAT and set CLR */ 648 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 649 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 650 break; 651 652 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 653 /* clear FLOAT and set SET */ 654 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 655 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 656 break; 657 658 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 659 /* set FLOAT */ 660 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 661 break; 662 663 default: 664 break; 665 } 666 667 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 668 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 669 670 return 0; 671 } 672 673 static int 674 bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode) 675 { 676 uint32_t gpio_reg; 677 678 /* any port swapping should be handled by caller */ 679 680 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 681 682 /* read GPIO and mask except the float bits */ 683 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 684 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 685 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 686 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 687 688 switch (mode) { 689 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 690 /* set CLR */ 691 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 692 break; 693 694 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 695 /* set SET */ 696 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 697 break; 698 699 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 700 /* set FLOAT */ 701 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 702 break; 703 704 default: 705 PMD_DRV_LOG(NOTICE, sc, 706 "Invalid GPIO mode assignment %d", mode); 707 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 708 return -1; 709 } 710 711 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 712 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 713 714 return 0; 715 } 716 717 static int 718 bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, 719 uint8_t port) 720 { 721 /* The GPIO should be swapped if swap register is set and active */ 722 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 723 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 724 int gpio_shift = gpio_num; 725 if (gpio_port) 726 gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; 727 728 uint32_t gpio_mask = (1 << gpio_shift); 729 uint32_t gpio_reg; 730 731 if (gpio_num > MISC_REGISTERS_GPIO_3) { 732 PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); 733 return -1; 734 } 735 736 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 737 738 /* read GPIO int */ 739 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 740 741 switch (mode) { 742 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 743 /* clear SET and set CLR */ 744 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 745 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 746 break; 747 748 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 749 /* clear CLR and set SET */ 750 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 751 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 752 break; 753 754 default: 755 break; 756 } 757 758 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 759 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 760 761 return 0; 762 } 763 764 uint32_t 765 elink_cb_gpio_read(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t port) 766 { 767 return bnx2x_gpio_read(sc, gpio_num, port); 768 } 769 770 uint8_t elink_cb_gpio_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ 771 uint8_t port) 772 { 773 return bnx2x_gpio_write(sc, gpio_num, mode, port); 774 } 775 776 uint8_t 777 elink_cb_gpio_mult_write(struct bnx2x_softc * sc, uint8_t pins, 778 uint8_t mode /* 0=low 1=high */ ) 779 { 780 return bnx2x_gpio_mult_write(sc, pins, mode); 781 } 782 783 uint8_t elink_cb_gpio_int_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ 784 uint8_t port) 785 { 786 return bnx2x_gpio_int_write(sc, gpio_num, mode, port); 787 } 788 789 void elink_cb_notify_link_changed(struct bnx2x_softc *sc) 790 { 791 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 792 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 793 } 794 795 /* send the MCP a request, block until there is a reply */ 796 uint32_t 797 elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) 798 { 799 int mb_idx = SC_FW_MB_IDX(sc); 800 uint32_t seq; 801 uint32_t rc = 0; 802 uint32_t cnt = 1; 803 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 804 805 seq = ++sc->fw_seq; 806 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 807 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 808 809 PMD_DRV_LOG(DEBUG, sc, 810 "wrote command 0x%08x to FW MB param 0x%08x", 811 (command | seq), param); 812 813 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 814 do { 815 DELAY(delay * 1000); 816 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 817 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 818 819 /* is this a reply to our command? */ 820 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 821 rc &= FW_MSG_CODE_MASK; 822 } else { 823 /* Ruh-roh! */ 824 PMD_DRV_LOG(NOTICE, sc, "FW failed to respond!"); 825 rc = 0; 826 } 827 828 return rc; 829 } 830 831 static uint32_t 832 bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) 833 { 834 return elink_cb_fw_command(sc, command, param); 835 } 836 837 static void 838 __storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr, 839 rte_iova_t mapping) 840 { 841 REG_WR(sc, addr, U64_LO(mapping)); 842 REG_WR(sc, (addr + 4), U64_HI(mapping)); 843 } 844 845 static void 846 storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping, 847 uint16_t abs_fid) 848 { 849 uint32_t addr = (XSEM_REG_FAST_MEMORY + 850 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 851 __storm_memset_dma_mapping(sc, addr, mapping); 852 } 853 854 static void 855 storm_memset_vf_to_pf(struct bnx2x_softc *sc, uint16_t abs_fid, uint16_t pf_id) 856 { 857 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), 858 pf_id); 859 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), 860 pf_id); 861 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), 862 pf_id); 863 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), 864 pf_id); 865 } 866 867 static void 868 storm_memset_func_en(struct bnx2x_softc *sc, uint16_t abs_fid, uint8_t enable) 869 { 870 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), 871 enable); 872 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), 873 enable); 874 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), 875 enable); 876 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), 877 enable); 878 } 879 880 static void 881 storm_memset_eq_data(struct bnx2x_softc *sc, struct event_ring_data *eq_data, 882 uint16_t pfid) 883 { 884 uint32_t addr; 885 size_t size; 886 887 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 888 size = sizeof(struct event_ring_data); 889 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) eq_data); 890 } 891 892 static void 893 storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid) 894 { 895 uint32_t addr = (BAR_CSTRORM_INTMEM + 896 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 897 REG_WR16(sc, addr, eq_prod); 898 } 899 900 /* 901 * Post a slowpath command. 902 * 903 * A slowpath command is used to propagate a configuration change through 904 * the controller in a controlled manner, allowing each STORM processor and 905 * other H/W blocks to phase in the change. The commands sent on the 906 * slowpath are referred to as ramrods. Depending on the ramrod used the 907 * completion of the ramrod will occur in different ways. Here's a 908 * breakdown of ramrods and how they complete: 909 * 910 * RAMROD_CMD_ID_ETH_PORT_SETUP 911 * Used to setup the leading connection on a port. Completes on the 912 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 913 * 914 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 915 * Used to setup an additional connection on a port. Completes on the 916 * RCQ of the multi-queue/RSS connection being initialized. 917 * 918 * RAMROD_CMD_ID_ETH_STAT_QUERY 919 * Used to force the storm processors to update the statistics database 920 * in host memory. This ramrod is send on the leading connection CID and 921 * completes as an index increment of the CSTORM on the default status 922 * block. 923 * 924 * RAMROD_CMD_ID_ETH_UPDATE 925 * Used to update the state of the leading connection, usually to udpate 926 * the RSS indirection table. Completes on the RCQ of the leading 927 * connection. (Not currently used under FreeBSD until OS support becomes 928 * available.) 929 * 930 * RAMROD_CMD_ID_ETH_HALT 931 * Used when tearing down a connection prior to driver unload. Completes 932 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 933 * use this on the leading connection. 934 * 935 * RAMROD_CMD_ID_ETH_SET_MAC 936 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 937 * the RCQ of the leading connection. 938 * 939 * RAMROD_CMD_ID_ETH_CFC_DEL 940 * Used when tearing down a conneciton prior to driver unload. Completes 941 * on the RCQ of the leading connection (since the current connection 942 * has been completely removed from controller memory). 943 * 944 * RAMROD_CMD_ID_ETH_PORT_DEL 945 * Used to tear down the leading connection prior to driver unload, 946 * typically fp[0]. Completes as an index increment of the CSTORM on the 947 * default status block. 948 * 949 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 950 * Used for connection offload. Completes on the RCQ of the multi-queue 951 * RSS connection that is being offloaded. (Not currently used under 952 * FreeBSD.) 953 * 954 * There can only be one command pending per function. 955 * 956 * Returns: 957 * 0 = Success, !0 = Failure. 958 */ 959 960 /* must be called under the spq lock */ 961 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x_softc *sc) 962 { 963 struct eth_spe *next_spe = sc->spq_prod_bd; 964 965 if (sc->spq_prod_bd == sc->spq_last_bd) { 966 /* wrap back to the first eth_spq */ 967 sc->spq_prod_bd = sc->spq; 968 sc->spq_prod_idx = 0; 969 } else { 970 sc->spq_prod_bd++; 971 sc->spq_prod_idx++; 972 } 973 974 return next_spe; 975 } 976 977 /* must be called under the spq lock */ 978 static void bnx2x_sp_prod_update(struct bnx2x_softc *sc) 979 { 980 int func = SC_FUNC(sc); 981 982 /* 983 * Make sure that BD data is updated before writing the producer. 984 * BD data is written to the memory, the producer is read from the 985 * memory, thus we need a full memory barrier to ensure the ordering. 986 */ 987 mb(); 988 989 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 990 sc->spq_prod_idx); 991 992 mb(); 993 } 994 995 /** 996 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 997 * 998 * @cmd: command to check 999 * @cmd_type: command type 1000 */ 1001 static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 1002 { 1003 if ((cmd_type == NONE_CONNECTION_TYPE) || 1004 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 1005 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 1006 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 1007 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 1008 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 1009 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 1010 return TRUE; 1011 } else { 1012 return FALSE; 1013 } 1014 } 1015 1016 /** 1017 * bnx2x_sp_post - place a single command on an SP ring 1018 * 1019 * @sc: driver handle 1020 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 1021 * @cid: SW CID the command is related to 1022 * @data_hi: command private data address (high 32 bits) 1023 * @data_lo: command private data address (low 32 bits) 1024 * @cmd_type: command type (e.g. NONE, ETH) 1025 * 1026 * SP data is handled as if it's always an address pair, thus data fields are 1027 * not swapped to little endian in upper functions. Instead this function swaps 1028 * data as if it's two uint32 fields. 1029 */ 1030 int 1031 bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi, 1032 uint32_t data_lo, int cmd_type) 1033 { 1034 struct eth_spe *spe; 1035 uint16_t type; 1036 int common; 1037 1038 common = bnx2x_is_contextless_ramrod(command, cmd_type); 1039 1040 if (common) { 1041 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 1042 PMD_DRV_LOG(INFO, sc, "EQ ring is full!"); 1043 return -1; 1044 } 1045 } else { 1046 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 1047 PMD_DRV_LOG(INFO, sc, "SPQ ring is full!"); 1048 return -1; 1049 } 1050 } 1051 1052 spe = bnx2x_sp_get_next(sc); 1053 1054 /* CID needs port number to be encoded int it */ 1055 spe->hdr.conn_and_cmd_data = 1056 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 1057 1058 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1059 1060 /* TBD: Check if it works for VFs */ 1061 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 1062 SPE_HDR_FUNCTION_ID); 1063 1064 spe->hdr.type = htole16(type); 1065 1066 spe->data.update_data_addr.hi = htole32(data_hi); 1067 spe->data.update_data_addr.lo = htole32(data_lo); 1068 1069 /* 1070 * It's ok if the actual decrement is issued towards the memory 1071 * somewhere between the lock and unlock. Thus no more explict 1072 * memory barrier is needed. 1073 */ 1074 if (common) { 1075 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 1076 } else { 1077 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 1078 } 1079 1080 PMD_DRV_LOG(DEBUG, sc, 1081 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x" 1082 "data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)", 1083 sc->spq_prod_idx, 1084 (uint32_t) U64_HI(sc->spq_dma.paddr), 1085 (uint32_t) (U64_LO(sc->spq_dma.paddr) + 1086 (uint8_t *) sc->spq_prod_bd - 1087 (uint8_t *) sc->spq), command, common, 1088 HW_CID(sc, cid), data_hi, data_lo, type, 1089 atomic_load_acq_long(&sc->cq_spq_left), 1090 atomic_load_acq_long(&sc->eq_spq_left)); 1091 1092 /* RAMROD completion is processed in bnx2x_intr_legacy() 1093 * which can run from different contexts. 1094 * Ask bnx2x_intr_intr() to process RAMROD 1095 * completion whenever it gets scheduled. 1096 */ 1097 rte_atomic32_set(&sc->scan_fp, 1); 1098 bnx2x_sp_prod_update(sc); 1099 1100 return 0; 1101 } 1102 1103 static void bnx2x_drv_pulse(struct bnx2x_softc *sc) 1104 { 1105 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 1106 sc->fw_drv_pulse_wr_seq); 1107 } 1108 1109 static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath *fp) 1110 { 1111 uint16_t hw_cons; 1112 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 1113 1114 if (unlikely(!txq)) { 1115 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 1116 return 0; 1117 } 1118 1119 mb(); /* status block fields can change */ 1120 hw_cons = le16toh(*fp->tx_cons_sb); 1121 return hw_cons != txq->tx_pkt_head; 1122 } 1123 1124 static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 1125 { 1126 /* expand this for multi-cos if ever supported */ 1127 return bnx2x_tx_queue_has_work(fp); 1128 } 1129 1130 static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 1131 { 1132 uint16_t rx_cq_cons_sb; 1133 struct bnx2x_rx_queue *rxq; 1134 rxq = fp->sc->rx_queues[fp->index]; 1135 if (unlikely(!rxq)) { 1136 PMD_RX_LOG(ERR, "ERROR: RX queue is NULL"); 1137 return 0; 1138 } 1139 1140 mb(); /* status block fields can change */ 1141 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 1142 if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) == 1143 MAX_RCQ_ENTRIES(rxq))) 1144 rx_cq_cons_sb++; 1145 1146 PMD_RX_LOG(DEBUG, "hw CQ cons = %d, sw CQ cons = %d", 1147 rx_cq_cons_sb, rxq->rx_cq_head); 1148 1149 return rxq->rx_cq_head != rx_cq_cons_sb; 1150 } 1151 1152 static void 1153 bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 1154 union eth_rx_cqe *rr_cqe) 1155 { 1156 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1157 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1158 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 1159 struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; 1160 1161 PMD_DRV_LOG(DEBUG, sc, 1162 "fp=%d cid=%d got ramrod #%d state is %x type is %d", 1163 fp->index, cid, command, sc->state, 1164 rr_cqe->ramrod_cqe.ramrod_type); 1165 1166 switch (command) { 1167 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1168 PMD_DRV_LOG(DEBUG, sc, "got UPDATE ramrod. CID %d", cid); 1169 drv_cmd = ECORE_Q_CMD_UPDATE; 1170 break; 1171 1172 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1173 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] setup ramrod", cid); 1174 drv_cmd = ECORE_Q_CMD_SETUP; 1175 break; 1176 1177 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1178 PMD_DRV_LOG(DEBUG, sc, 1179 "got MULTI[%d] tx-only setup ramrod", cid); 1180 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 1181 break; 1182 1183 case (RAMROD_CMD_ID_ETH_HALT): 1184 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] halt ramrod", cid); 1185 drv_cmd = ECORE_Q_CMD_HALT; 1186 break; 1187 1188 case (RAMROD_CMD_ID_ETH_TERMINATE): 1189 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid); 1190 drv_cmd = ECORE_Q_CMD_TERMINATE; 1191 break; 1192 1193 case (RAMROD_CMD_ID_ETH_EMPTY): 1194 PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] empty ramrod", cid); 1195 drv_cmd = ECORE_Q_CMD_EMPTY; 1196 break; 1197 1198 default: 1199 PMD_DRV_LOG(DEBUG, sc, 1200 "ERROR: unexpected MC reply (%d)" 1201 "on fp[%d]", command, fp->index); 1202 return; 1203 } 1204 1205 if ((drv_cmd != ECORE_Q_CMD_MAX) && 1206 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 1207 /* 1208 * q_obj->complete_cmd() failure means that this was 1209 * an unexpected completion. 1210 * 1211 * In this case we don't want to increase the sc->spq_left 1212 * because apparently we haven't sent this command the first 1213 * place. 1214 */ 1215 // rte_panic("Unexpected SP completion"); 1216 return; 1217 } 1218 1219 atomic_add_acq_long(&sc->cq_spq_left, 1); 1220 1221 PMD_DRV_LOG(DEBUG, sc, "sc->cq_spq_left 0x%lx", 1222 atomic_load_acq_long(&sc->cq_spq_left)); 1223 } 1224 1225 static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) 1226 { 1227 struct bnx2x_rx_queue *rxq; 1228 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1229 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 1230 1231 rte_spinlock_lock(&(fp)->rx_mtx); 1232 1233 rxq = sc->rx_queues[fp->index]; 1234 if (!rxq) { 1235 PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index); 1236 rte_spinlock_unlock(&(fp)->rx_mtx); 1237 return 0; 1238 } 1239 1240 /* CQ "next element" is of the size of the regular element */ 1241 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 1242 if (unlikely((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == 1243 USABLE_RCQ_ENTRIES_PER_PAGE)) { 1244 hw_cq_cons++; 1245 } 1246 1247 bd_cons = rxq->rx_bd_head; 1248 bd_prod = rxq->rx_bd_tail; 1249 bd_prod_fw = bd_prod; 1250 sw_cq_cons = rxq->rx_cq_head; 1251 sw_cq_prod = rxq->rx_cq_tail; 1252 1253 /* 1254 * Memory barrier necessary as speculative reads of the rx 1255 * buffer can be ahead of the index in the status block 1256 */ 1257 rmb(); 1258 1259 while (sw_cq_cons != hw_cq_cons) { 1260 union eth_rx_cqe *cqe; 1261 struct eth_fast_path_rx_cqe *cqe_fp; 1262 uint8_t cqe_fp_flags; 1263 enum eth_rx_cqe_type cqe_fp_type; 1264 1265 comp_ring_cons = RCQ_ENTRY(sw_cq_cons, rxq); 1266 bd_prod = RX_BD(bd_prod, rxq); 1267 bd_cons = RX_BD(bd_cons, rxq); 1268 1269 cqe = &rxq->cq_ring[comp_ring_cons]; 1270 cqe_fp = &cqe->fast_path_cqe; 1271 cqe_fp_flags = cqe_fp->type_error_flags; 1272 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 1273 1274 /* is this a slowpath msg? */ 1275 if (CQE_TYPE_SLOW(cqe_fp_type)) { 1276 bnx2x_sp_event(sc, fp, cqe); 1277 goto next_cqe; 1278 } 1279 1280 /* is this an error packet? */ 1281 if (unlikely(cqe_fp_flags & 1282 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 1283 PMD_RX_LOG(DEBUG, "flags 0x%x rx packet %u", 1284 cqe_fp_flags, sw_cq_cons); 1285 goto next_rx; 1286 } 1287 1288 PMD_RX_LOG(DEBUG, "Dropping fastpath called from attn poller!"); 1289 1290 next_rx: 1291 bd_cons = NEXT_RX_BD(bd_cons); 1292 bd_prod = NEXT_RX_BD(bd_prod); 1293 bd_prod_fw = NEXT_RX_BD(bd_prod_fw); 1294 1295 next_cqe: 1296 sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); 1297 sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); 1298 1299 } /* while work to do */ 1300 1301 rxq->rx_bd_head = bd_cons; 1302 rxq->rx_bd_tail = bd_prod_fw; 1303 rxq->rx_cq_head = sw_cq_cons; 1304 rxq->rx_cq_tail = sw_cq_prod; 1305 1306 PMD_RX_LOG(DEBUG, "BD prod = %d, sw CQ prod = %d", 1307 bd_prod_fw, sw_cq_prod); 1308 1309 /* Update producers */ 1310 bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod); 1311 1312 rte_spinlock_unlock(&(fp)->rx_mtx); 1313 1314 return sw_cq_cons != hw_cq_cons; 1315 } 1316 1317 static uint16_t 1318 bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue *txq, 1319 uint16_t pkt_idx, uint16_t bd_idx) 1320 { 1321 struct eth_tx_start_bd *tx_start_bd = 1322 &txq->tx_ring[TX_BD(bd_idx, txq)].start_bd; 1323 uint16_t nbd = rte_le_to_cpu_16(tx_start_bd->nbd); 1324 struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)]; 1325 1326 if (likely(tx_mbuf != NULL)) { 1327 rte_pktmbuf_free_seg(tx_mbuf); 1328 } else { 1329 PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu", 1330 fp->index, (unsigned long)TX_BD(pkt_idx, txq)); 1331 } 1332 1333 txq->sw_ring[TX_BD(pkt_idx, txq)] = NULL; 1334 txq->nb_tx_avail += nbd; 1335 1336 while (nbd--) 1337 bd_idx = NEXT_TX_BD(bd_idx); 1338 1339 return bd_idx; 1340 } 1341 1342 /* processes transmit completions */ 1343 uint8_t bnx2x_txeof(__rte_unused struct bnx2x_softc * sc, struct bnx2x_fastpath * fp) 1344 { 1345 uint16_t bd_cons, hw_cons, sw_cons; 1346 __rte_unused uint16_t tx_bd_avail; 1347 1348 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 1349 1350 if (unlikely(!txq)) { 1351 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 1352 return 0; 1353 } 1354 1355 bd_cons = txq->tx_bd_head; 1356 hw_cons = rte_le_to_cpu_16(*fp->tx_cons_sb); 1357 sw_cons = txq->tx_pkt_head; 1358 1359 while (sw_cons != hw_cons) { 1360 bd_cons = bnx2x_free_tx_pkt(fp, txq, sw_cons, bd_cons); 1361 sw_cons++; 1362 } 1363 1364 txq->tx_pkt_head = sw_cons; 1365 txq->tx_bd_head = bd_cons; 1366 1367 tx_bd_avail = txq->nb_tx_avail; 1368 1369 PMD_TX_LOG(DEBUG, "fp[%02d] avail=%u cons_sb=%u, " 1370 "pkt_head=%u pkt_tail=%u bd_head=%u bd_tail=%u", 1371 fp->index, tx_bd_avail, hw_cons, 1372 txq->tx_pkt_head, txq->tx_pkt_tail, 1373 txq->tx_bd_head, txq->tx_bd_tail); 1374 return TRUE; 1375 } 1376 1377 static void bnx2x_drain_tx_queues(struct bnx2x_softc *sc) 1378 { 1379 struct bnx2x_fastpath *fp; 1380 int i, count; 1381 1382 /* wait until all TX fastpath tasks have completed */ 1383 for (i = 0; i < sc->num_queues; i++) { 1384 fp = &sc->fp[i]; 1385 1386 count = 1000; 1387 1388 while (bnx2x_has_tx_work(fp)) { 1389 bnx2x_txeof(sc, fp); 1390 1391 if (count == 0) { 1392 PMD_TX_LOG(ERR, 1393 "Timeout waiting for fp[%d] " 1394 "transmits to complete!", i); 1395 rte_panic("tx drain failure"); 1396 return; 1397 } 1398 1399 count--; 1400 DELAY(1000); 1401 rmb(); 1402 } 1403 } 1404 1405 return; 1406 } 1407 1408 static int 1409 bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj, 1410 int mac_type, uint8_t wait_for_comp) 1411 { 1412 uint32_t ramrod_flags = 0, vlan_mac_flags = 0; 1413 int rc; 1414 1415 /* wait for completion of requested */ 1416 if (wait_for_comp) { 1417 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 1418 } 1419 1420 /* Set the mac type of addresses we want to clear */ 1421 rte_bit_relaxed_set32(mac_type, &vlan_mac_flags); 1422 1423 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 1424 if (rc < 0) 1425 PMD_DRV_LOG(ERR, sc, "Failed to delete MACs (%d)", rc); 1426 1427 return rc; 1428 } 1429 1430 static int 1431 bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode, 1432 uint32_t *rx_accept_flags, uint32_t *tx_accept_flags) 1433 { 1434 /* Clear the flags first */ 1435 *rx_accept_flags = 0; 1436 *tx_accept_flags = 0; 1437 1438 switch (rx_mode) { 1439 case BNX2X_RX_MODE_NONE: 1440 /* 1441 * 'drop all' supersedes any accept flags that may have been 1442 * passed to the function. 1443 */ 1444 break; 1445 1446 case BNX2X_RX_MODE_NORMAL: 1447 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1448 rte_bit_relaxed_set32(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 1449 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1450 1451 /* internal switching mode */ 1452 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, tx_accept_flags); 1453 rte_bit_relaxed_set32(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 1454 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1455 1456 break; 1457 1458 case BNX2X_RX_MODE_ALLMULTI: 1459 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1460 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1461 rx_accept_flags); 1462 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1463 1464 /* internal switching mode */ 1465 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, tx_accept_flags); 1466 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1467 tx_accept_flags); 1468 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1469 1470 break; 1471 1472 case BNX2X_RX_MODE_ALLMULTI_PROMISC: 1473 case BNX2X_RX_MODE_PROMISC: 1474 /* 1475 * According to deffinition of SI mode, iface in promisc mode 1476 * should receive matched and unmatched (in resolution of port) 1477 * unicast packets. 1478 */ 1479 rte_bit_relaxed_set32(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 1480 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags); 1481 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1482 rx_accept_flags); 1483 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 1484 1485 /* internal switching mode */ 1486 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST, 1487 tx_accept_flags); 1488 rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 1489 1490 if (IS_MF_SI(sc)) { 1491 rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_UNICAST, 1492 tx_accept_flags); 1493 } else { 1494 rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, 1495 tx_accept_flags); 1496 } 1497 1498 break; 1499 1500 default: 1501 PMD_RX_LOG(ERR, "Unknown rx_mode (%d)", rx_mode); 1502 return -1; 1503 } 1504 1505 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 1506 if (rx_mode != BNX2X_RX_MODE_NONE) { 1507 rte_bit_relaxed_set32(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 1508 rte_bit_relaxed_set32(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 1509 } 1510 1511 return 0; 1512 } 1513 1514 static int 1515 bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id, 1516 unsigned long rx_mode_flags, 1517 unsigned long rx_accept_flags, 1518 unsigned long tx_accept_flags, unsigned long ramrod_flags) 1519 { 1520 struct ecore_rx_mode_ramrod_params ramrod_param; 1521 int rc; 1522 1523 memset(&ramrod_param, 0, sizeof(ramrod_param)); 1524 1525 /* Prepare ramrod parameters */ 1526 ramrod_param.cid = 0; 1527 ramrod_param.cl_id = cl_id; 1528 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 1529 ramrod_param.func_id = SC_FUNC(sc); 1530 1531 ramrod_param.pstate = &sc->sp_state; 1532 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 1533 1534 ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata); 1535 ramrod_param.rdata_mapping = 1536 (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata), 1537 rte_bit_relaxed_set32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 1538 1539 ramrod_param.ramrod_flags = ramrod_flags; 1540 ramrod_param.rx_mode_flags = rx_mode_flags; 1541 1542 ramrod_param.rx_accept_flags = rx_accept_flags; 1543 ramrod_param.tx_accept_flags = tx_accept_flags; 1544 1545 rc = ecore_config_rx_mode(sc, &ramrod_param); 1546 if (rc < 0) { 1547 PMD_RX_LOG(ERR, "Set rx_mode %d failed", sc->rx_mode); 1548 return rc; 1549 } 1550 1551 return 0; 1552 } 1553 1554 int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc) 1555 { 1556 uint32_t rx_mode_flags = 0, ramrod_flags = 0; 1557 uint32_t rx_accept_flags = 0, tx_accept_flags = 0; 1558 int rc; 1559 1560 rc = bnx2x_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 1561 &tx_accept_flags); 1562 if (rc) { 1563 return rc; 1564 } 1565 1566 rte_bit_relaxed_set32(RAMROD_RX, &ramrod_flags); 1567 rte_bit_relaxed_set32(RAMROD_TX, &ramrod_flags); 1568 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 1569 1570 return bnx2x_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 1571 rx_accept_flags, tx_accept_flags, 1572 ramrod_flags); 1573 } 1574 1575 /* returns the "mcp load_code" according to global load_count array */ 1576 static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc) 1577 { 1578 int path = SC_PATH(sc); 1579 int port = SC_PORT(sc); 1580 1581 PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", 1582 path, load_count[path][0], load_count[path][1], 1583 load_count[path][2]); 1584 1585 load_count[path][0]++; 1586 load_count[path][1 + port]++; 1587 PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", 1588 path, load_count[path][0], load_count[path][1], 1589 load_count[path][2]); 1590 if (load_count[path][0] == 1) 1591 return FW_MSG_CODE_DRV_LOAD_COMMON; 1592 else if (load_count[path][1 + port] == 1) 1593 return FW_MSG_CODE_DRV_LOAD_PORT; 1594 else 1595 return FW_MSG_CODE_DRV_LOAD_FUNCTION; 1596 } 1597 1598 /* returns the "mcp load_code" according to global load_count array */ 1599 static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc) 1600 { 1601 int port = SC_PORT(sc); 1602 int path = SC_PATH(sc); 1603 1604 PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", 1605 path, load_count[path][0], load_count[path][1], 1606 load_count[path][2]); 1607 load_count[path][0]--; 1608 load_count[path][1 + port]--; 1609 PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", 1610 path, load_count[path][0], load_count[path][1], 1611 load_count[path][2]); 1612 if (load_count[path][0] == 0) { 1613 return FW_MSG_CODE_DRV_UNLOAD_COMMON; 1614 } else if (load_count[path][1 + port] == 0) { 1615 return FW_MSG_CODE_DRV_UNLOAD_PORT; 1616 } else { 1617 return FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 1618 } 1619 } 1620 1621 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 1622 static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode) 1623 { 1624 uint32_t reset_code = 0; 1625 1626 /* Select the UNLOAD request mode */ 1627 if (unload_mode == UNLOAD_NORMAL) { 1628 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 1629 } else { 1630 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 1631 } 1632 1633 /* Send the request to the MCP */ 1634 if (!BNX2X_NOMCP(sc)) { 1635 reset_code = bnx2x_fw_command(sc, reset_code, 0); 1636 } else { 1637 reset_code = bnx2x_nic_unload_no_mcp(sc); 1638 } 1639 1640 return reset_code; 1641 } 1642 1643 /* send UNLOAD_DONE command to the MCP */ 1644 static void bnx2x_send_unload_done(struct bnx2x_softc *sc, uint8_t keep_link) 1645 { 1646 uint32_t reset_param = 1647 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 1648 1649 /* Report UNLOAD_DONE to MCP */ 1650 if (!BNX2X_NOMCP(sc)) { 1651 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 1652 } 1653 } 1654 1655 static int bnx2x_func_wait_started(struct bnx2x_softc *sc) 1656 { 1657 int tout = 50; 1658 1659 if (!sc->port.pmf) { 1660 return 0; 1661 } 1662 1663 /* 1664 * (assumption: No Attention from MCP at this stage) 1665 * PMF probably in the middle of TX disable/enable transaction 1666 * 1. Sync IRS for default SB 1667 * 2. Sync SP queue - this guarantees us that attention handling started 1668 * 3. Wait, that TX disable/enable transaction completes 1669 * 1670 * 1+2 guarantee that if DCBX attention was scheduled it already changed 1671 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 1672 * received completion for the transaction the state is TX_STOPPED. 1673 * State will return to STARTED after completion of TX_STOPPED-->STARTED 1674 * transaction. 1675 */ 1676 1677 while (ecore_func_get_state(sc, &sc->func_obj) != 1678 ECORE_F_STATE_STARTED && tout--) { 1679 DELAY(20000); 1680 } 1681 1682 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 1683 /* 1684 * Failed to complete the transaction in a "good way" 1685 * Force both transactions with CLR bit. 1686 */ 1687 struct ecore_func_state_params func_params = { NULL }; 1688 1689 PMD_DRV_LOG(NOTICE, sc, "Unexpected function state! " 1690 "Forcing STARTED-->TX_STOPPED-->STARTED"); 1691 1692 func_params.f_obj = &sc->func_obj; 1693 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, 1694 &func_params.ramrod_flags); 1695 1696 /* STARTED-->TX_STOPPED */ 1697 func_params.cmd = ECORE_F_CMD_TX_STOP; 1698 ecore_func_state_change(sc, &func_params); 1699 1700 /* TX_STOPPED-->STARTED */ 1701 func_params.cmd = ECORE_F_CMD_TX_START; 1702 return ecore_func_state_change(sc, &func_params); 1703 } 1704 1705 return 0; 1706 } 1707 1708 static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index) 1709 { 1710 struct bnx2x_fastpath *fp = &sc->fp[index]; 1711 struct ecore_queue_state_params q_params = { NULL }; 1712 int rc; 1713 1714 PMD_DRV_LOG(DEBUG, sc, "stopping queue %d cid %d", index, fp->index); 1715 1716 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 1717 /* We want to wait for completion in this context */ 1718 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 1719 1720 /* Stop the primary connection: */ 1721 1722 /* ...halt the connection */ 1723 q_params.cmd = ECORE_Q_CMD_HALT; 1724 rc = ecore_queue_state_change(sc, &q_params); 1725 if (rc) { 1726 return rc; 1727 } 1728 1729 /* ...terminate the connection */ 1730 q_params.cmd = ECORE_Q_CMD_TERMINATE; 1731 memset(&q_params.params.terminate, 0, 1732 sizeof(q_params.params.terminate)); 1733 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 1734 rc = ecore_queue_state_change(sc, &q_params); 1735 if (rc) { 1736 return rc; 1737 } 1738 1739 /* ...delete cfc entry */ 1740 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 1741 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 1742 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 1743 return ecore_queue_state_change(sc, &q_params); 1744 } 1745 1746 /* wait for the outstanding SP commands */ 1747 static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, uint32_t mask) 1748 { 1749 uint32_t tmp; 1750 int tout = 5000; /* wait for 5 secs tops */ 1751 1752 while (tout--) { 1753 mb(); 1754 if (!(atomic_load_acq_int(&sc->sp_state) & mask)) 1755 return TRUE; 1756 1757 DELAY(1000); 1758 } 1759 1760 mb(); 1761 1762 tmp = atomic_load_acq_int(&sc->sp_state); 1763 if (tmp & mask) { 1764 PMD_DRV_LOG(INFO, sc, "Filtering completion timed out: " 1765 "sp_state 0x%x, mask 0x%x", tmp, mask); 1766 return FALSE; 1767 } 1768 1769 return FALSE; 1770 } 1771 1772 static int bnx2x_func_stop(struct bnx2x_softc *sc) 1773 { 1774 struct ecore_func_state_params func_params = { NULL }; 1775 int rc; 1776 1777 /* prepare parameters for function state transitions */ 1778 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 1779 func_params.f_obj = &sc->func_obj; 1780 func_params.cmd = ECORE_F_CMD_STOP; 1781 1782 /* 1783 * Try to stop the function the 'good way'. If it fails (in case 1784 * of a parity error during bnx2x_chip_cleanup()) and we are 1785 * not in a debug mode, perform a state transaction in order to 1786 * enable further HW_RESET transaction. 1787 */ 1788 rc = ecore_func_state_change(sc, &func_params); 1789 if (rc) { 1790 PMD_DRV_LOG(NOTICE, sc, "FUNC_STOP ramrod failed. " 1791 "Running a dry transaction"); 1792 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, 1793 &func_params.ramrod_flags); 1794 return ecore_func_state_change(sc, &func_params); 1795 } 1796 1797 return 0; 1798 } 1799 1800 static int bnx2x_reset_hw(struct bnx2x_softc *sc, uint32_t load_code) 1801 { 1802 struct ecore_func_state_params func_params = { NULL }; 1803 1804 /* Prepare parameters for function state transitions */ 1805 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 1806 1807 func_params.f_obj = &sc->func_obj; 1808 func_params.cmd = ECORE_F_CMD_HW_RESET; 1809 1810 func_params.params.hw_init.load_phase = load_code; 1811 1812 return ecore_func_state_change(sc, &func_params); 1813 } 1814 1815 static void bnx2x_int_disable_sync(struct bnx2x_softc *sc, int disable_hw) 1816 { 1817 if (disable_hw) { 1818 /* prevent the HW from sending interrupts */ 1819 bnx2x_int_disable(sc); 1820 } 1821 } 1822 1823 static void 1824 bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) 1825 { 1826 int port = SC_PORT(sc); 1827 struct ecore_mcast_ramrod_params rparam = { NULL }; 1828 uint32_t reset_code; 1829 int i, rc = 0; 1830 1831 bnx2x_drain_tx_queues(sc); 1832 1833 /* give HW time to discard old tx messages */ 1834 DELAY(1000); 1835 1836 /* Clean all ETH MACs */ 1837 rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, 1838 FALSE); 1839 if (rc < 0) { 1840 PMD_DRV_LOG(NOTICE, sc, 1841 "Failed to delete all ETH MACs (%d)", rc); 1842 } 1843 1844 /* Clean up UC list */ 1845 rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, 1846 TRUE); 1847 if (rc < 0) { 1848 PMD_DRV_LOG(NOTICE, sc, 1849 "Failed to delete UC MACs list (%d)", rc); 1850 } 1851 1852 /* Disable LLH */ 1853 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); 1854 1855 /* Set "drop all" to stop Rx */ 1856 1857 /* 1858 * We need to take the if_maddr_lock() here in order to prevent 1859 * a race between the completion code and this code. 1860 */ 1861 1862 if (rte_bit_relaxed_get32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) 1863 rte_bit_relaxed_set32(ECORE_FILTER_RX_MODE_SCHED, 1864 &sc->sp_state); 1865 else 1866 bnx2x_set_storm_rx_mode(sc); 1867 1868 /* Clean up multicast configuration */ 1869 rparam.mcast_obj = &sc->mcast_obj; 1870 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 1871 if (rc < 0) { 1872 PMD_DRV_LOG(NOTICE, sc, 1873 "Failed to send DEL MCAST command (%d)", rc); 1874 } 1875 1876 /* 1877 * Send the UNLOAD_REQUEST to the MCP. This will return if 1878 * this function should perform FUNCTION, PORT, or COMMON HW 1879 * reset. 1880 */ 1881 reset_code = bnx2x_send_unload_req(sc, unload_mode); 1882 1883 /* 1884 * (assumption: No Attention from MCP at this stage) 1885 * PMF probably in the middle of TX disable/enable transaction 1886 */ 1887 rc = bnx2x_func_wait_started(sc); 1888 if (rc) { 1889 PMD_DRV_LOG(NOTICE, sc, "bnx2x_func_wait_started failed"); 1890 } 1891 1892 /* 1893 * Close multi and leading connections 1894 * Completions for ramrods are collected in a synchronous way 1895 */ 1896 for (i = 0; i < sc->num_queues; i++) { 1897 if (bnx2x_stop_queue(sc, i)) { 1898 goto unload_error; 1899 } 1900 } 1901 1902 /* 1903 * If SP settings didn't get completed so far - something 1904 * very wrong has happen. 1905 */ 1906 if (!bnx2x_wait_sp_comp(sc, ~0x0U)) 1907 PMD_DRV_LOG(NOTICE, sc, "Common slow path ramrods got stuck!"); 1908 1909 unload_error: 1910 1911 rc = bnx2x_func_stop(sc); 1912 if (rc) { 1913 PMD_DRV_LOG(NOTICE, sc, "Function stop failed!"); 1914 } 1915 1916 /* disable HW interrupts */ 1917 bnx2x_int_disable_sync(sc, TRUE); 1918 1919 /* Reset the chip */ 1920 rc = bnx2x_reset_hw(sc, reset_code); 1921 if (rc) { 1922 PMD_DRV_LOG(NOTICE, sc, "Hardware reset failed"); 1923 } 1924 1925 /* Report UNLOAD_DONE to MCP */ 1926 bnx2x_send_unload_done(sc, keep_link); 1927 } 1928 1929 static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc) 1930 { 1931 uint32_t val; 1932 1933 PMD_DRV_LOG(DEBUG, sc, "Disabling 'close the gates'"); 1934 1935 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 1936 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 1937 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 1938 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 1939 } 1940 1941 /* 1942 * Cleans the object that have internal lists without sending 1943 * ramrods. Should be run when interrutps are disabled. 1944 */ 1945 static void bnx2x_squeeze_objects(struct bnx2x_softc *sc) 1946 { 1947 uint32_t ramrod_flags = 0, vlan_mac_flags = 0; 1948 struct ecore_mcast_ramrod_params rparam = { NULL }; 1949 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 1950 int rc; 1951 1952 /* Cleanup MACs' object first... */ 1953 1954 /* Wait for completion of requested */ 1955 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 1956 /* Perform a dry cleanup */ 1957 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 1958 1959 /* Clean ETH primary MAC */ 1960 rte_bit_relaxed_set32(ECORE_ETH_MAC, &vlan_mac_flags); 1961 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 1962 &ramrod_flags); 1963 if (rc != 0) { 1964 PMD_DRV_LOG(NOTICE, sc, "Failed to clean ETH MACs (%d)", rc); 1965 } 1966 1967 /* Cleanup UC list */ 1968 vlan_mac_flags = 0; 1969 rte_bit_relaxed_set32(ECORE_UC_LIST_MAC, &vlan_mac_flags); 1970 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 1971 if (rc != 0) { 1972 PMD_DRV_LOG(NOTICE, sc, 1973 "Failed to clean UC list MACs (%d)", rc); 1974 } 1975 1976 /* Now clean mcast object... */ 1977 1978 rparam.mcast_obj = &sc->mcast_obj; 1979 rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 1980 1981 /* Add a DEL command... */ 1982 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 1983 if (rc < 0) { 1984 PMD_DRV_LOG(NOTICE, sc, 1985 "Failed to send DEL MCAST command (%d)", rc); 1986 } 1987 1988 /* now wait until all pending commands are cleared */ 1989 1990 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 1991 while (rc != 0) { 1992 if (rc < 0) { 1993 PMD_DRV_LOG(NOTICE, sc, 1994 "Failed to clean MCAST object (%d)", rc); 1995 return; 1996 } 1997 1998 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 1999 } 2000 } 2001 2002 /* stop the controller */ 2003 __rte_noinline 2004 int 2005 bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) 2006 { 2007 uint8_t global = FALSE; 2008 uint32_t val; 2009 2010 PMD_INIT_FUNC_TRACE(sc); 2011 2012 PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload..."); 2013 2014 /* mark driver as unloaded in shmem2 */ 2015 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 2016 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 2017 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 2018 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 2019 } 2020 2021 if (IS_PF(sc) && sc->recovery_state != BNX2X_RECOVERY_DONE && 2022 (sc->state == BNX2X_STATE_CLOSED || sc->state == BNX2X_STATE_ERROR)) { 2023 /* 2024 * We can get here if the driver has been unloaded 2025 * during parity error recovery and is either waiting for a 2026 * leader to complete or for other functions to unload and 2027 * then ifconfig down has been issued. In this case we want to 2028 * unload and let other functions to complete a recovery 2029 * process. 2030 */ 2031 sc->recovery_state = BNX2X_RECOVERY_DONE; 2032 sc->is_leader = 0; 2033 bnx2x_release_leader_lock(sc); 2034 mb(); 2035 2036 PMD_DRV_LOG(NOTICE, sc, "Can't unload in closed or error state"); 2037 return -1; 2038 } 2039 2040 /* 2041 * Nothing to do during unload if previous bnx2x_nic_load() 2042 * did not completed successfully - all resourses are released. 2043 */ 2044 if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) { 2045 return 0; 2046 } 2047 2048 sc->state = BNX2X_STATE_CLOSING_WAITING_HALT; 2049 mb(); 2050 2051 sc->rx_mode = BNX2X_RX_MODE_NONE; 2052 bnx2x_set_rx_mode(sc); 2053 mb(); 2054 2055 if (IS_PF(sc)) { 2056 /* set ALWAYS_ALIVE bit in shmem */ 2057 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 2058 2059 bnx2x_drv_pulse(sc); 2060 2061 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 2062 bnx2x_save_statistics(sc); 2063 } 2064 2065 /* wait till consumers catch up with producers in all queues */ 2066 bnx2x_drain_tx_queues(sc); 2067 2068 /* if VF indicate to PF this function is going down (PF will delete sp 2069 * elements and clear initializations 2070 */ 2071 if (IS_VF(sc)) { 2072 bnx2x_vf_unload(sc); 2073 } else if (unload_mode != UNLOAD_RECOVERY) { 2074 /* if this is a normal/close unload need to clean up chip */ 2075 bnx2x_chip_cleanup(sc, unload_mode, keep_link); 2076 } else { 2077 /* Send the UNLOAD_REQUEST to the MCP */ 2078 bnx2x_send_unload_req(sc, unload_mode); 2079 2080 /* 2081 * Prevent transactions to host from the functions on the 2082 * engine that doesn't reset global blocks in case of global 2083 * attention once gloabl blocks are reset and gates are opened 2084 * (the engine which leader will perform the recovery 2085 * last). 2086 */ 2087 if (!CHIP_IS_E1x(sc)) { 2088 bnx2x_pf_disable(sc); 2089 } 2090 2091 /* disable HW interrupts */ 2092 bnx2x_int_disable_sync(sc, TRUE); 2093 2094 /* Report UNLOAD_DONE to MCP */ 2095 bnx2x_send_unload_done(sc, FALSE); 2096 } 2097 2098 /* 2099 * At this stage no more interrupts will arrive so we may safely clean 2100 * the queue'able objects here in case they failed to get cleaned so far. 2101 */ 2102 if (IS_PF(sc)) { 2103 bnx2x_squeeze_objects(sc); 2104 } 2105 2106 /* There should be no more pending SP commands at this stage */ 2107 sc->sp_state = 0; 2108 2109 sc->port.pmf = 0; 2110 2111 if (IS_PF(sc)) { 2112 bnx2x_free_mem(sc); 2113 } 2114 2115 /* free the host hardware/software hsi structures */ 2116 bnx2x_free_hsi_mem(sc); 2117 2118 bnx2x_free_fw_stats_mem(sc); 2119 2120 sc->state = BNX2X_STATE_CLOSED; 2121 2122 /* 2123 * Check if there are pending parity attentions. If there are - set 2124 * RECOVERY_IN_PROGRESS. 2125 */ 2126 if (IS_PF(sc) && bnx2x_chk_parity_attn(sc, &global, FALSE)) { 2127 bnx2x_set_reset_in_progress(sc); 2128 2129 /* Set RESET_IS_GLOBAL if needed */ 2130 if (global) { 2131 bnx2x_set_reset_global(sc); 2132 } 2133 } 2134 2135 /* 2136 * The last driver must disable a "close the gate" if there is no 2137 * parity attention or "process kill" pending. 2138 */ 2139 if (IS_PF(sc) && !bnx2x_clear_pf_load(sc) && 2140 bnx2x_reset_is_done(sc, SC_PATH(sc))) { 2141 bnx2x_disable_close_the_gate(sc); 2142 } 2143 2144 PMD_DRV_LOG(DEBUG, sc, "Ended NIC unload"); 2145 2146 return 0; 2147 } 2148 2149 /* 2150 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 2151 * visible to the controller. 2152 * 2153 * If an mbuf is submitted to this routine and cannot be given to the 2154 * controller (e.g. it has too many fragments) then the function may free 2155 * the mbuf and return to the caller. 2156 * 2157 * Returns: 2158 * int: Number of TX BDs used for the mbuf 2159 * 2160 * Note the side effect that an mbuf may be freed if it causes a problem. 2161 */ 2162 int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0) 2163 { 2164 struct eth_tx_start_bd *tx_start_bd; 2165 uint16_t bd_prod, pkt_prod; 2166 struct bnx2x_softc *sc; 2167 uint32_t nbds = 0; 2168 2169 sc = txq->sc; 2170 bd_prod = txq->tx_bd_tail; 2171 pkt_prod = txq->tx_pkt_tail; 2172 2173 txq->sw_ring[TX_BD(pkt_prod, txq)] = m0; 2174 2175 tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; 2176 2177 tx_start_bd->addr_lo = 2178 rte_cpu_to_le_32(U64_LO(rte_mbuf_data_iova(m0))); 2179 tx_start_bd->addr_hi = 2180 rte_cpu_to_le_32(U64_HI(rte_mbuf_data_iova(m0))); 2181 tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); 2182 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2183 tx_start_bd->general_data = 2184 (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 2185 2186 tx_start_bd->nbd = rte_cpu_to_le_16(2); 2187 2188 if (m0->ol_flags & PKT_TX_VLAN_PKT) { 2189 tx_start_bd->vlan_or_ethertype = 2190 rte_cpu_to_le_16(m0->vlan_tci); 2191 tx_start_bd->bd_flags.as_bitfield |= 2192 (X_ETH_OUTBAND_VLAN << 2193 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 2194 } else { 2195 if (IS_PF(sc)) 2196 tx_start_bd->vlan_or_ethertype = 2197 rte_cpu_to_le_16(pkt_prod); 2198 else { 2199 /* when transmitting in a vf, start bd 2200 * must hold the ethertype for fw to enforce it 2201 */ 2202 struct rte_ether_hdr *eh = 2203 rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); 2204 2205 /* Still need to consider inband vlan for enforced */ 2206 if (eh->ether_type == 2207 rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { 2208 struct rte_vlan_hdr *vh = 2209 (struct rte_vlan_hdr *)(eh + 1); 2210 tx_start_bd->bd_flags.as_bitfield |= 2211 (X_ETH_INBAND_VLAN << 2212 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 2213 tx_start_bd->vlan_or_ethertype = 2214 rte_cpu_to_le_16(ntohs(vh->vlan_tci)); 2215 } else { 2216 tx_start_bd->vlan_or_ethertype = 2217 (rte_cpu_to_le_16 2218 (rte_be_to_cpu_16(eh->ether_type))); 2219 } 2220 } 2221 } 2222 2223 bd_prod = NEXT_TX_BD(bd_prod); 2224 if (IS_VF(sc)) { 2225 struct eth_tx_parse_bd_e2 *tx_parse_bd; 2226 const struct rte_ether_hdr *eh = 2227 rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); 2228 uint8_t mac_type = UNICAST_ADDRESS; 2229 2230 tx_parse_bd = 2231 &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2; 2232 if (rte_is_multicast_ether_addr(&eh->d_addr)) { 2233 if (rte_is_broadcast_ether_addr(&eh->d_addr)) 2234 mac_type = BROADCAST_ADDRESS; 2235 else 2236 mac_type = MULTICAST_ADDRESS; 2237 } 2238 tx_parse_bd->parsing_data = 2239 (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); 2240 2241 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi, 2242 &eh->d_addr.addr_bytes[0], 2); 2243 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid, 2244 &eh->d_addr.addr_bytes[2], 2); 2245 rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo, 2246 &eh->d_addr.addr_bytes[4], 2); 2247 rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi, 2248 &eh->s_addr.addr_bytes[0], 2); 2249 rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid, 2250 &eh->s_addr.addr_bytes[2], 2); 2251 rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo, 2252 &eh->s_addr.addr_bytes[4], 2); 2253 2254 tx_parse_bd->data.mac_addr.dst_hi = 2255 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi); 2256 tx_parse_bd->data.mac_addr.dst_mid = 2257 rte_cpu_to_be_16(tx_parse_bd->data. 2258 mac_addr.dst_mid); 2259 tx_parse_bd->data.mac_addr.dst_lo = 2260 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo); 2261 tx_parse_bd->data.mac_addr.src_hi = 2262 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi); 2263 tx_parse_bd->data.mac_addr.src_mid = 2264 rte_cpu_to_be_16(tx_parse_bd->data. 2265 mac_addr.src_mid); 2266 tx_parse_bd->data.mac_addr.src_lo = 2267 rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo); 2268 2269 PMD_TX_LOG(DEBUG, 2270 "PBD dst %x %x %x src %x %x %x p_data %x", 2271 tx_parse_bd->data.mac_addr.dst_hi, 2272 tx_parse_bd->data.mac_addr.dst_mid, 2273 tx_parse_bd->data.mac_addr.dst_lo, 2274 tx_parse_bd->data.mac_addr.src_hi, 2275 tx_parse_bd->data.mac_addr.src_mid, 2276 tx_parse_bd->data.mac_addr.src_lo, 2277 tx_parse_bd->parsing_data); 2278 } 2279 2280 PMD_TX_LOG(DEBUG, 2281 "start bd: nbytes %d flags %x vlan %x", 2282 tx_start_bd->nbytes, 2283 tx_start_bd->bd_flags.as_bitfield, 2284 tx_start_bd->vlan_or_ethertype); 2285 2286 bd_prod = NEXT_TX_BD(bd_prod); 2287 pkt_prod++; 2288 2289 if (TX_IDX(bd_prod) < 2) 2290 nbds++; 2291 2292 txq->nb_tx_avail -= 2; 2293 txq->tx_bd_tail = bd_prod; 2294 txq->tx_pkt_tail = pkt_prod; 2295 2296 return nbds + 2; 2297 } 2298 2299 static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc) 2300 { 2301 return L2_ILT_LINES(sc); 2302 } 2303 2304 static void bnx2x_ilt_set_info(struct bnx2x_softc *sc) 2305 { 2306 struct ilt_client_info *ilt_client; 2307 struct ecore_ilt *ilt = sc->ilt; 2308 uint16_t line = 0; 2309 2310 PMD_INIT_FUNC_TRACE(sc); 2311 2312 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 2313 2314 /* CDU */ 2315 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 2316 ilt_client->client_num = ILT_CLIENT_CDU; 2317 ilt_client->page_size = CDU_ILT_PAGE_SZ; 2318 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 2319 ilt_client->start = line; 2320 line += bnx2x_cid_ilt_lines(sc); 2321 2322 if (CNIC_SUPPORT(sc)) { 2323 line += CNIC_ILT_LINES; 2324 } 2325 2326 ilt_client->end = (line - 1); 2327 2328 /* QM */ 2329 if (QM_INIT(sc->qm_cid_count)) { 2330 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 2331 ilt_client->client_num = ILT_CLIENT_QM; 2332 ilt_client->page_size = QM_ILT_PAGE_SZ; 2333 ilt_client->flags = 0; 2334 ilt_client->start = line; 2335 2336 /* 4 bytes for each cid */ 2337 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 2338 QM_ILT_PAGE_SZ); 2339 2340 ilt_client->end = (line - 1); 2341 } 2342 2343 if (CNIC_SUPPORT(sc)) { 2344 /* SRC */ 2345 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 2346 ilt_client->client_num = ILT_CLIENT_SRC; 2347 ilt_client->page_size = SRC_ILT_PAGE_SZ; 2348 ilt_client->flags = 0; 2349 ilt_client->start = line; 2350 line += SRC_ILT_LINES; 2351 ilt_client->end = (line - 1); 2352 2353 /* TM */ 2354 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 2355 ilt_client->client_num = ILT_CLIENT_TM; 2356 ilt_client->page_size = TM_ILT_PAGE_SZ; 2357 ilt_client->flags = 0; 2358 ilt_client->start = line; 2359 line += TM_ILT_LINES; 2360 ilt_client->end = (line - 1); 2361 } 2362 2363 assert((line <= ILT_MAX_LINES)); 2364 } 2365 2366 static void bnx2x_set_fp_rx_buf_size(struct bnx2x_softc *sc) 2367 { 2368 int i; 2369 2370 for (i = 0; i < sc->num_queues; i++) { 2371 /* get the Rx buffer size for RX frames */ 2372 sc->fp[i].rx_buf_size = 2373 (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 2374 } 2375 } 2376 2377 int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) 2378 { 2379 2380 sc->ilt = rte_malloc("", sizeof(struct ecore_ilt), RTE_CACHE_LINE_SIZE); 2381 2382 return sc->ilt == NULL; 2383 } 2384 2385 static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) 2386 { 2387 sc->ilt->lines = rte_calloc("", 2388 sizeof(struct ilt_line), ILT_MAX_LINES, 2389 RTE_CACHE_LINE_SIZE); 2390 return sc->ilt->lines == NULL; 2391 } 2392 2393 void bnx2x_free_ilt_mem(struct bnx2x_softc *sc) 2394 { 2395 rte_free(sc->ilt); 2396 sc->ilt = NULL; 2397 } 2398 2399 static void bnx2x_free_ilt_lines_mem(struct bnx2x_softc *sc) 2400 { 2401 if (sc->ilt->lines != NULL) { 2402 rte_free(sc->ilt->lines); 2403 sc->ilt->lines = NULL; 2404 } 2405 } 2406 2407 static void bnx2x_free_mem(struct bnx2x_softc *sc) 2408 { 2409 uint32_t i; 2410 2411 for (i = 0; i < L2_ILT_LINES(sc); i++) { 2412 sc->context[i].vcxt = NULL; 2413 sc->context[i].size = 0; 2414 } 2415 2416 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 2417 2418 bnx2x_free_ilt_lines_mem(sc); 2419 } 2420 2421 static int bnx2x_alloc_mem(struct bnx2x_softc *sc) 2422 { 2423 int context_size; 2424 int allocated; 2425 int i; 2426 char cdu_name[RTE_MEMZONE_NAMESIZE]; 2427 2428 /* 2429 * Allocate memory for CDU context: 2430 * This memory is allocated separately and not in the generic ILT 2431 * functions because CDU differs in few aspects: 2432 * 1. There can be multiple entities allocating memory for context - 2433 * regular L2, CNIC, and SRIOV drivers. Each separately controls 2434 * its own ILT lines. 2435 * 2. Since CDU page-size is not a single 4KB page (which is the case 2436 * for the other ILT clients), to be efficient we want to support 2437 * allocation of sub-page-size in the last entry. 2438 * 3. Context pointers are used by the driver to pass to FW / update 2439 * the context (for the other ILT clients the pointers are used just to 2440 * free the memory during unload). 2441 */ 2442 context_size = (sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(sc)); 2443 for (i = 0, allocated = 0; allocated < context_size; i++) { 2444 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 2445 (context_size - allocated)); 2446 2447 snprintf(cdu_name, sizeof(cdu_name), "cdu_%d", i); 2448 if (bnx2x_dma_alloc(sc, sc->context[i].size, 2449 &sc->context[i].vcxt_dma, 2450 cdu_name, BNX2X_PAGE_SIZE) != 0) { 2451 bnx2x_free_mem(sc); 2452 return -1; 2453 } 2454 2455 sc->context[i].vcxt = 2456 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 2457 2458 allocated += sc->context[i].size; 2459 } 2460 2461 bnx2x_alloc_ilt_lines_mem(sc); 2462 2463 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 2464 PMD_DRV_LOG(NOTICE, sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed"); 2465 bnx2x_free_mem(sc); 2466 return -1; 2467 } 2468 2469 return 0; 2470 } 2471 2472 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc) 2473 { 2474 bnx2x_dma_free(&sc->fw_stats_dma); 2475 sc->fw_stats_num = 0; 2476 2477 sc->fw_stats_req_size = 0; 2478 sc->fw_stats_req = NULL; 2479 sc->fw_stats_req_mapping = 0; 2480 2481 sc->fw_stats_data_size = 0; 2482 sc->fw_stats_data = NULL; 2483 sc->fw_stats_data_mapping = 0; 2484 } 2485 2486 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc) 2487 { 2488 uint8_t num_queue_stats; 2489 int num_groups, vf_headroom = 0; 2490 2491 /* number of queues for statistics is number of eth queues */ 2492 num_queue_stats = BNX2X_NUM_ETH_QUEUES(sc); 2493 2494 /* 2495 * Total number of FW statistics requests = 2496 * 1 for port stats + 1 for PF stats + num of queues 2497 */ 2498 sc->fw_stats_num = (2 + num_queue_stats); 2499 2500 /* 2501 * Request is built from stats_query_header and an array of 2502 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 2503 * rules. The real number or requests is configured in the 2504 * stats_query_header. 2505 */ 2506 num_groups = (sc->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT; 2507 if ((sc->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) 2508 num_groups++; 2509 2510 sc->fw_stats_req_size = 2511 (sizeof(struct stats_query_header) + 2512 (num_groups * sizeof(struct stats_query_cmd_group))); 2513 2514 /* 2515 * Data for statistics requests + stats_counter. 2516 * stats_counter holds per-STORM counters that are incremented when 2517 * STORM has finished with the current request. Memory for FCoE 2518 * offloaded statistics are counted anyway, even if they will not be sent. 2519 * VF stats are not accounted for here as the data of VF stats is stored 2520 * in memory allocated by the VF, not here. 2521 */ 2522 sc->fw_stats_data_size = 2523 (sizeof(struct stats_counter) + 2524 sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + 2525 /* sizeof(struct fcoe_statistics_params) + */ 2526 (sizeof(struct per_queue_stats) * num_queue_stats)); 2527 2528 if (bnx2x_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 2529 &sc->fw_stats_dma, "fw_stats", 2530 RTE_CACHE_LINE_SIZE) != 0) { 2531 bnx2x_free_fw_stats_mem(sc); 2532 return -1; 2533 } 2534 2535 /* set up the shortcuts */ 2536 2537 sc->fw_stats_req = (struct bnx2x_fw_stats_req *)sc->fw_stats_dma.vaddr; 2538 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 2539 2540 sc->fw_stats_data = 2541 (struct bnx2x_fw_stats_data *)((uint8_t *) sc->fw_stats_dma.vaddr + 2542 sc->fw_stats_req_size); 2543 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 2544 sc->fw_stats_req_size); 2545 2546 return 0; 2547 } 2548 2549 /* 2550 * Bits map: 2551 * 0-7 - Engine0 load counter. 2552 * 8-15 - Engine1 load counter. 2553 * 16 - Engine0 RESET_IN_PROGRESS bit. 2554 * 17 - Engine1 RESET_IN_PROGRESS bit. 2555 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 2556 * function on the engine 2557 * 19 - Engine1 ONE_IS_LOADED. 2558 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 2559 * leader to complete (check for both RESET_IN_PROGRESS bits and not 2560 * for just the one belonging to its engine). 2561 */ 2562 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 2563 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 2564 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 2565 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 2566 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 2567 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 2568 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 2569 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 2570 2571 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ 2572 static void bnx2x_set_reset_global(struct bnx2x_softc *sc) 2573 { 2574 uint32_t val; 2575 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2576 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2577 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 2578 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2579 } 2580 2581 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 2582 static void bnx2x_clear_reset_global(struct bnx2x_softc *sc) 2583 { 2584 uint32_t val; 2585 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2586 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2587 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 2588 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2589 } 2590 2591 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 2592 static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc) 2593 { 2594 return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT; 2595 } 2596 2597 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 2598 static void bnx2x_set_reset_done(struct bnx2x_softc *sc) 2599 { 2600 uint32_t val; 2601 uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : 2602 BNX2X_PATH0_RST_IN_PROG_BIT; 2603 2604 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2605 2606 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2607 /* Clear the bit */ 2608 val &= ~bit; 2609 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2610 2611 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2612 } 2613 2614 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 2615 static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc) 2616 { 2617 uint32_t val; 2618 uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : 2619 BNX2X_PATH0_RST_IN_PROG_BIT; 2620 2621 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2622 2623 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2624 /* Set the bit */ 2625 val |= bit; 2626 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2627 2628 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2629 } 2630 2631 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 2632 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine) 2633 { 2634 uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2635 uint32_t bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : 2636 BNX2X_PATH0_RST_IN_PROG_BIT; 2637 2638 /* return false if bit is set */ 2639 return (val & bit) ? FALSE : TRUE; 2640 } 2641 2642 /* get the load status for an engine, should be run under rtnl lock */ 2643 static uint8_t bnx2x_get_load_status(struct bnx2x_softc *sc, int engine) 2644 { 2645 uint32_t mask = engine ? BNX2X_PATH1_LOAD_CNT_MASK : 2646 BNX2X_PATH0_LOAD_CNT_MASK; 2647 uint32_t shift = engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2648 BNX2X_PATH0_LOAD_CNT_SHIFT; 2649 uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2650 2651 val = ((val & mask) >> shift); 2652 2653 return val != 0; 2654 } 2655 2656 /* set pf load mark */ 2657 static void bnx2x_set_pf_load(struct bnx2x_softc *sc) 2658 { 2659 uint32_t val; 2660 uint32_t val1; 2661 uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : 2662 BNX2X_PATH0_LOAD_CNT_MASK; 2663 uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2664 BNX2X_PATH0_LOAD_CNT_SHIFT; 2665 2666 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2667 2668 PMD_INIT_FUNC_TRACE(sc); 2669 2670 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2671 2672 /* get the current counter value */ 2673 val1 = ((val & mask) >> shift); 2674 2675 /* set bit of this PF */ 2676 val1 |= (1 << SC_ABS_FUNC(sc)); 2677 2678 /* clear the old value */ 2679 val &= ~mask; 2680 2681 /* set the new one */ 2682 val |= ((val1 << shift) & mask); 2683 2684 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2685 2686 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2687 } 2688 2689 /* clear pf load mark */ 2690 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc) 2691 { 2692 uint32_t val1, val; 2693 uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : 2694 BNX2X_PATH0_LOAD_CNT_MASK; 2695 uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 2696 BNX2X_PATH0_LOAD_CNT_SHIFT; 2697 2698 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2699 val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); 2700 2701 /* get the current counter value */ 2702 val1 = (val & mask) >> shift; 2703 2704 /* clear bit of that PF */ 2705 val1 &= ~(1 << SC_ABS_FUNC(sc)); 2706 2707 /* clear the old value */ 2708 val &= ~mask; 2709 2710 /* set the new one */ 2711 val |= ((val1 << shift) & mask); 2712 2713 REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); 2714 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 2715 return val1 != 0; 2716 } 2717 2718 /* send load requrest to mcp and analyze response */ 2719 static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code) 2720 { 2721 PMD_INIT_FUNC_TRACE(sc); 2722 2723 /* init fw_seq */ 2724 sc->fw_seq = 2725 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 2726 DRV_MSG_SEQ_NUMBER_MASK); 2727 2728 PMD_DRV_LOG(DEBUG, sc, "initial fw_seq 0x%04x", sc->fw_seq); 2729 2730 #ifdef BNX2X_PULSE 2731 /* get the current FW pulse sequence */ 2732 sc->fw_drv_pulse_wr_seq = 2733 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 2734 DRV_PULSE_SEQ_MASK); 2735 #else 2736 /* set ALWAYS_ALIVE bit in shmem */ 2737 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 2738 bnx2x_drv_pulse(sc); 2739 #endif 2740 2741 /* load request */ 2742 (*load_code) = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 2743 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 2744 2745 /* if the MCP fails to respond we must abort */ 2746 if (!(*load_code)) { 2747 PMD_DRV_LOG(NOTICE, sc, "MCP response failure!"); 2748 return -1; 2749 } 2750 2751 /* if MCP refused then must abort */ 2752 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 2753 PMD_DRV_LOG(NOTICE, sc, "MCP refused load request"); 2754 return -1; 2755 } 2756 2757 return 0; 2758 } 2759 2760 /* 2761 * Check whether another PF has already loaded FW to chip. In virtualized 2762 * environments a pf from anoth VM may have already initialized the device 2763 * including loading FW. 2764 */ 2765 static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code) 2766 { 2767 uint32_t my_fw, loaded_fw; 2768 2769 /* is another pf loaded on this engine? */ 2770 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 2771 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 2772 /* build my FW version dword */ 2773 my_fw = (BNX2X_5710_FW_MAJOR_VERSION + 2774 (BNX2X_5710_FW_MINOR_VERSION << 8) + 2775 (BNX2X_5710_FW_REVISION_VERSION << 16) + 2776 (BNX2X_5710_FW_ENGINEERING_VERSION << 24)); 2777 2778 /* read loaded FW from chip */ 2779 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 2780 PMD_DRV_LOG(DEBUG, sc, "loaded FW 0x%08x / my FW 0x%08x", 2781 loaded_fw, my_fw); 2782 2783 /* abort nic load if version mismatch */ 2784 if (my_fw != loaded_fw) { 2785 PMD_DRV_LOG(NOTICE, sc, 2786 "FW 0x%08x already loaded (mine is 0x%08x)", 2787 loaded_fw, my_fw); 2788 return -1; 2789 } 2790 } 2791 2792 return 0; 2793 } 2794 2795 /* mark PMF if applicable */ 2796 static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code) 2797 { 2798 uint32_t ncsi_oem_data_addr; 2799 2800 PMD_INIT_FUNC_TRACE(sc); 2801 2802 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 2803 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 2804 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 2805 /* 2806 * Barrier here for ordering between the writing to sc->port.pmf here 2807 * and reading it from the periodic task. 2808 */ 2809 sc->port.pmf = 1; 2810 mb(); 2811 } else { 2812 sc->port.pmf = 0; 2813 } 2814 2815 PMD_DRV_LOG(DEBUG, sc, "pmf %d", sc->port.pmf); 2816 2817 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 2818 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 2819 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 2820 if (ncsi_oem_data_addr) { 2821 REG_WR(sc, 2822 (ncsi_oem_data_addr + 2823 offsetof(struct glob_ncsi_oem_data, 2824 driver_version)), 0); 2825 } 2826 } 2827 } 2828 } 2829 2830 static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc) 2831 { 2832 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 2833 int abs_func; 2834 int vn; 2835 2836 if (BNX2X_NOMCP(sc)) { 2837 return; /* what should be the default bvalue in this case */ 2838 } 2839 2840 /* 2841 * The formula for computing the absolute function number is... 2842 * For 2 port configuration (4 functions per port): 2843 * abs_func = 2 * vn + SC_PORT + SC_PATH 2844 * For 4 port configuration (2 functions per port): 2845 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 2846 */ 2847 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 2848 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 2849 if (abs_func >= E1H_FUNC_MAX) { 2850 break; 2851 } 2852 sc->devinfo.mf_info.mf_config[vn] = 2853 MFCFG_RD(sc, func_mf_config[abs_func].config); 2854 } 2855 2856 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 2857 FUNC_MF_CFG_FUNC_DISABLED) { 2858 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); 2859 sc->flags |= BNX2X_MF_FUNC_DIS; 2860 } else { 2861 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); 2862 sc->flags &= ~BNX2X_MF_FUNC_DIS; 2863 } 2864 } 2865 2866 /* acquire split MCP access lock register */ 2867 static int bnx2x_acquire_alr(struct bnx2x_softc *sc) 2868 { 2869 uint32_t j, val; 2870 2871 for (j = 0; j < 1000; j++) { 2872 val = (1UL << 31); 2873 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 2874 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 2875 if (val & (1L << 31)) 2876 break; 2877 2878 DELAY(5000); 2879 } 2880 2881 if (!(val & (1L << 31))) { 2882 PMD_DRV_LOG(NOTICE, sc, "Cannot acquire MCP access lock register"); 2883 return -1; 2884 } 2885 2886 return 0; 2887 } 2888 2889 /* release split MCP access lock register */ 2890 static void bnx2x_release_alr(struct bnx2x_softc *sc) 2891 { 2892 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 2893 } 2894 2895 static void bnx2x_fan_failure(struct bnx2x_softc *sc) 2896 { 2897 int port = SC_PORT(sc); 2898 uint32_t ext_phy_config; 2899 2900 /* mark the failure */ 2901 ext_phy_config = 2902 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 2903 2904 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2905 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 2906 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 2907 ext_phy_config); 2908 2909 /* log the failure */ 2910 PMD_DRV_LOG(INFO, sc, 2911 "Fan Failure has caused the driver to shutdown " 2912 "the card to prevent permanent damage. " 2913 "Please contact OEM Support for assistance"); 2914 2915 rte_panic("Schedule task to handle fan failure"); 2916 } 2917 2918 /* this function is called upon a link interrupt */ 2919 static void bnx2x_link_attn(struct bnx2x_softc *sc) 2920 { 2921 uint32_t pause_enabled = 0; 2922 struct host_port_stats *pstats; 2923 int cmng_fns; 2924 2925 /* Make sure that we are synced with the current statistics */ 2926 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 2927 2928 elink_link_update(&sc->link_params, &sc->link_vars); 2929 2930 if (sc->link_vars.link_up) { 2931 2932 /* dropless flow control */ 2933 if (sc->dropless_fc) { 2934 pause_enabled = 0; 2935 2936 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 2937 pause_enabled = 1; 2938 } 2939 2940 REG_WR(sc, 2941 (BAR_USTRORM_INTMEM + 2942 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 2943 pause_enabled); 2944 } 2945 2946 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 2947 pstats = BNX2X_SP(sc, port_stats); 2948 /* reset old mac stats */ 2949 memset(&(pstats->mac_stx[0]), 0, 2950 sizeof(struct mac_stx)); 2951 } 2952 2953 if (sc->state == BNX2X_STATE_OPEN) { 2954 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 2955 } 2956 } 2957 2958 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 2959 cmng_fns = bnx2x_get_cmng_fns_mode(sc); 2960 2961 if (cmng_fns != CMNG_FNS_NONE) { 2962 bnx2x_cmng_fns_init(sc, FALSE, cmng_fns); 2963 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 2964 } 2965 } 2966 2967 bnx2x_link_report_locked(sc); 2968 2969 if (IS_MF(sc)) { 2970 bnx2x_link_sync_notify(sc); 2971 } 2972 } 2973 2974 static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted) 2975 { 2976 int port = SC_PORT(sc); 2977 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2978 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2979 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2980 NIG_REG_MASK_INTERRUPT_PORT0; 2981 uint32_t aeu_mask; 2982 uint32_t nig_mask = 0; 2983 uint32_t reg_addr; 2984 uint32_t igu_acked; 2985 uint32_t cnt; 2986 2987 if (sc->attn_state & asserted) { 2988 PMD_DRV_LOG(ERR, sc, "IGU ERROR attn=0x%08x", asserted); 2989 } 2990 2991 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 2992 2993 aeu_mask = REG_RD(sc, aeu_addr); 2994 2995 aeu_mask &= ~(asserted & 0x3ff); 2996 2997 REG_WR(sc, aeu_addr, aeu_mask); 2998 2999 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3000 3001 sc->attn_state |= asserted; 3002 3003 if (asserted & ATTN_HARD_WIRED_MASK) { 3004 if (asserted & ATTN_NIG_FOR_FUNC) { 3005 3006 bnx2x_acquire_phy_lock(sc); 3007 /* save nig interrupt mask */ 3008 nig_mask = REG_RD(sc, nig_int_mask_addr); 3009 3010 /* If nig_mask is not set, no need to call the update function */ 3011 if (nig_mask) { 3012 REG_WR(sc, nig_int_mask_addr, 0); 3013 3014 bnx2x_link_attn(sc); 3015 } 3016 3017 /* handle unicore attn? */ 3018 } 3019 3020 if (asserted & ATTN_SW_TIMER_4_FUNC) { 3021 PMD_DRV_LOG(DEBUG, sc, "ATTN_SW_TIMER_4_FUNC!"); 3022 } 3023 3024 if (asserted & GPIO_2_FUNC) { 3025 PMD_DRV_LOG(DEBUG, sc, "GPIO_2_FUNC!"); 3026 } 3027 3028 if (asserted & GPIO_3_FUNC) { 3029 PMD_DRV_LOG(DEBUG, sc, "GPIO_3_FUNC!"); 3030 } 3031 3032 if (asserted & GPIO_4_FUNC) { 3033 PMD_DRV_LOG(DEBUG, sc, "GPIO_4_FUNC!"); 3034 } 3035 3036 if (port == 0) { 3037 if (asserted & ATTN_GENERAL_ATTN_1) { 3038 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_1!"); 3039 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3040 } 3041 if (asserted & ATTN_GENERAL_ATTN_2) { 3042 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_2!"); 3043 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3044 } 3045 if (asserted & ATTN_GENERAL_ATTN_3) { 3046 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_3!"); 3047 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3048 } 3049 } else { 3050 if (asserted & ATTN_GENERAL_ATTN_4) { 3051 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_4!"); 3052 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3053 } 3054 if (asserted & ATTN_GENERAL_ATTN_5) { 3055 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_5!"); 3056 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3057 } 3058 if (asserted & ATTN_GENERAL_ATTN_6) { 3059 PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_6!"); 3060 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3061 } 3062 } 3063 } 3064 /* hardwired */ 3065 if (sc->devinfo.int_block == INT_BLOCK_HC) { 3066 reg_addr = 3067 (HC_REG_COMMAND_REG + port * 32 + 3068 COMMAND_REG_ATTN_BITS_SET); 3069 } else { 3070 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8); 3071 } 3072 3073 PMD_DRV_LOG(DEBUG, sc, "about to mask 0x%08x at %s addr 0x%08x", 3074 asserted, 3075 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", 3076 reg_addr); 3077 REG_WR(sc, reg_addr, asserted); 3078 3079 /* now set back the mask */ 3080 if (asserted & ATTN_NIG_FOR_FUNC) { 3081 /* 3082 * Verify that IGU ack through BAR was written before restoring 3083 * NIG mask. This loop should exit after 2-3 iterations max. 3084 */ 3085 if (sc->devinfo.int_block != INT_BLOCK_HC) { 3086 cnt = 0; 3087 3088 do { 3089 igu_acked = 3090 REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 3091 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) 3092 && (++cnt < MAX_IGU_ATTN_ACK_TO)); 3093 3094 if (!igu_acked) { 3095 PMD_DRV_LOG(ERR, sc, 3096 "Failed to verify IGU ack on time"); 3097 } 3098 3099 mb(); 3100 } 3101 3102 REG_WR(sc, nig_int_mask_addr, nig_mask); 3103 3104 bnx2x_release_phy_lock(sc); 3105 } 3106 } 3107 3108 static void 3109 bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx, 3110 __rte_unused const char *blk) 3111 { 3112 PMD_DRV_LOG(INFO, sc, "%s%s", idx ? ", " : "", blk); 3113 } 3114 3115 static int 3116 bnx2x_check_blocks_with_parity0(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3117 uint8_t print) 3118 { 3119 uint32_t cur_bit = 0; 3120 int i = 0; 3121 3122 for (i = 0; sig; i++) { 3123 cur_bit = ((uint32_t) 0x1 << i); 3124 if (sig & cur_bit) { 3125 switch (cur_bit) { 3126 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 3127 if (print) 3128 bnx2x_print_next_block(sc, par_num++, 3129 "BRB"); 3130 break; 3131 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 3132 if (print) 3133 bnx2x_print_next_block(sc, par_num++, 3134 "PARSER"); 3135 break; 3136 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 3137 if (print) 3138 bnx2x_print_next_block(sc, par_num++, 3139 "TSDM"); 3140 break; 3141 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 3142 if (print) 3143 bnx2x_print_next_block(sc, par_num++, 3144 "SEARCHER"); 3145 break; 3146 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 3147 if (print) 3148 bnx2x_print_next_block(sc, par_num++, 3149 "TCM"); 3150 break; 3151 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 3152 if (print) 3153 bnx2x_print_next_block(sc, par_num++, 3154 "TSEMI"); 3155 break; 3156 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 3157 if (print) 3158 bnx2x_print_next_block(sc, par_num++, 3159 "XPB"); 3160 break; 3161 } 3162 3163 /* Clear the bit */ 3164 sig &= ~cur_bit; 3165 } 3166 } 3167 3168 return par_num; 3169 } 3170 3171 static int 3172 bnx2x_check_blocks_with_parity1(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3173 uint8_t * global, uint8_t print) 3174 { 3175 int i = 0; 3176 uint32_t cur_bit = 0; 3177 for (i = 0; sig; i++) { 3178 cur_bit = ((uint32_t) 0x1 << i); 3179 if (sig & cur_bit) { 3180 switch (cur_bit) { 3181 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 3182 if (print) 3183 bnx2x_print_next_block(sc, par_num++, 3184 "PBF"); 3185 break; 3186 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 3187 if (print) 3188 bnx2x_print_next_block(sc, par_num++, 3189 "QM"); 3190 break; 3191 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 3192 if (print) 3193 bnx2x_print_next_block(sc, par_num++, 3194 "TM"); 3195 break; 3196 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 3197 if (print) 3198 bnx2x_print_next_block(sc, par_num++, 3199 "XSDM"); 3200 break; 3201 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 3202 if (print) 3203 bnx2x_print_next_block(sc, par_num++, 3204 "XCM"); 3205 break; 3206 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 3207 if (print) 3208 bnx2x_print_next_block(sc, par_num++, 3209 "XSEMI"); 3210 break; 3211 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 3212 if (print) 3213 bnx2x_print_next_block(sc, par_num++, 3214 "DOORBELLQ"); 3215 break; 3216 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 3217 if (print) 3218 bnx2x_print_next_block(sc, par_num++, 3219 "NIG"); 3220 break; 3221 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 3222 if (print) 3223 bnx2x_print_next_block(sc, par_num++, 3224 "VAUX PCI CORE"); 3225 *global = TRUE; 3226 break; 3227 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 3228 if (print) 3229 bnx2x_print_next_block(sc, par_num++, 3230 "DEBUG"); 3231 break; 3232 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 3233 if (print) 3234 bnx2x_print_next_block(sc, par_num++, 3235 "USDM"); 3236 break; 3237 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 3238 if (print) 3239 bnx2x_print_next_block(sc, par_num++, 3240 "UCM"); 3241 break; 3242 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 3243 if (print) 3244 bnx2x_print_next_block(sc, par_num++, 3245 "USEMI"); 3246 break; 3247 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 3248 if (print) 3249 bnx2x_print_next_block(sc, par_num++, 3250 "UPB"); 3251 break; 3252 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 3253 if (print) 3254 bnx2x_print_next_block(sc, par_num++, 3255 "CSDM"); 3256 break; 3257 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 3258 if (print) 3259 bnx2x_print_next_block(sc, par_num++, 3260 "CCM"); 3261 break; 3262 } 3263 3264 /* Clear the bit */ 3265 sig &= ~cur_bit; 3266 } 3267 } 3268 3269 return par_num; 3270 } 3271 3272 static int 3273 bnx2x_check_blocks_with_parity2(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3274 uint8_t print) 3275 { 3276 uint32_t cur_bit = 0; 3277 int i = 0; 3278 3279 for (i = 0; sig; i++) { 3280 cur_bit = ((uint32_t) 0x1 << i); 3281 if (sig & cur_bit) { 3282 switch (cur_bit) { 3283 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 3284 if (print) 3285 bnx2x_print_next_block(sc, par_num++, 3286 "CSEMI"); 3287 break; 3288 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 3289 if (print) 3290 bnx2x_print_next_block(sc, par_num++, 3291 "PXP"); 3292 break; 3293 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 3294 if (print) 3295 bnx2x_print_next_block(sc, par_num++, 3296 "PXPPCICLOCKCLIENT"); 3297 break; 3298 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 3299 if (print) 3300 bnx2x_print_next_block(sc, par_num++, 3301 "CFC"); 3302 break; 3303 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 3304 if (print) 3305 bnx2x_print_next_block(sc, par_num++, 3306 "CDU"); 3307 break; 3308 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 3309 if (print) 3310 bnx2x_print_next_block(sc, par_num++, 3311 "DMAE"); 3312 break; 3313 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 3314 if (print) 3315 bnx2x_print_next_block(sc, par_num++, 3316 "IGU"); 3317 break; 3318 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 3319 if (print) 3320 bnx2x_print_next_block(sc, par_num++, 3321 "MISC"); 3322 break; 3323 } 3324 3325 /* Clear the bit */ 3326 sig &= ~cur_bit; 3327 } 3328 } 3329 3330 return par_num; 3331 } 3332 3333 static int 3334 bnx2x_check_blocks_with_parity3(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3335 uint8_t * global, uint8_t print) 3336 { 3337 uint32_t cur_bit = 0; 3338 int i = 0; 3339 3340 for (i = 0; sig; i++) { 3341 cur_bit = ((uint32_t) 0x1 << i); 3342 if (sig & cur_bit) { 3343 switch (cur_bit) { 3344 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 3345 if (print) 3346 bnx2x_print_next_block(sc, par_num++, 3347 "MCP ROM"); 3348 *global = TRUE; 3349 break; 3350 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 3351 if (print) 3352 bnx2x_print_next_block(sc, par_num++, 3353 "MCP UMP RX"); 3354 *global = TRUE; 3355 break; 3356 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 3357 if (print) 3358 bnx2x_print_next_block(sc, par_num++, 3359 "MCP UMP TX"); 3360 *global = TRUE; 3361 break; 3362 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 3363 if (print) 3364 bnx2x_print_next_block(sc, par_num++, 3365 "MCP SCPAD"); 3366 *global = TRUE; 3367 break; 3368 } 3369 3370 /* Clear the bit */ 3371 sig &= ~cur_bit; 3372 } 3373 } 3374 3375 return par_num; 3376 } 3377 3378 static int 3379 bnx2x_check_blocks_with_parity4(struct bnx2x_softc *sc, uint32_t sig, int par_num, 3380 uint8_t print) 3381 { 3382 uint32_t cur_bit = 0; 3383 int i = 0; 3384 3385 for (i = 0; sig; i++) { 3386 cur_bit = ((uint32_t) 0x1 << i); 3387 if (sig & cur_bit) { 3388 switch (cur_bit) { 3389 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 3390 if (print) 3391 bnx2x_print_next_block(sc, par_num++, 3392 "PGLUE_B"); 3393 break; 3394 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 3395 if (print) 3396 bnx2x_print_next_block(sc, par_num++, 3397 "ATC"); 3398 break; 3399 } 3400 3401 /* Clear the bit */ 3402 sig &= ~cur_bit; 3403 } 3404 } 3405 3406 return par_num; 3407 } 3408 3409 static uint8_t 3410 bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print, 3411 uint32_t * sig) 3412 { 3413 int par_num = 0; 3414 3415 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 3416 (sig[1] & HW_PRTY_ASSERT_SET_1) || 3417 (sig[2] & HW_PRTY_ASSERT_SET_2) || 3418 (sig[3] & HW_PRTY_ASSERT_SET_3) || 3419 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 3420 PMD_DRV_LOG(ERR, sc, 3421 "Parity error: HW block parity attention:" 3422 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x", 3423 (uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0), 3424 (uint32_t) (sig[1] & HW_PRTY_ASSERT_SET_1), 3425 (uint32_t) (sig[2] & HW_PRTY_ASSERT_SET_2), 3426 (uint32_t) (sig[3] & HW_PRTY_ASSERT_SET_3), 3427 (uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4)); 3428 3429 if (print) 3430 PMD_DRV_LOG(INFO, sc, "Parity errors detected in blocks: "); 3431 3432 par_num = 3433 bnx2x_check_blocks_with_parity0(sc, sig[0] & 3434 HW_PRTY_ASSERT_SET_0, 3435 par_num, print); 3436 par_num = 3437 bnx2x_check_blocks_with_parity1(sc, sig[1] & 3438 HW_PRTY_ASSERT_SET_1, 3439 par_num, global, print); 3440 par_num = 3441 bnx2x_check_blocks_with_parity2(sc, sig[2] & 3442 HW_PRTY_ASSERT_SET_2, 3443 par_num, print); 3444 par_num = 3445 bnx2x_check_blocks_with_parity3(sc, sig[3] & 3446 HW_PRTY_ASSERT_SET_3, 3447 par_num, global, print); 3448 par_num = 3449 bnx2x_check_blocks_with_parity4(sc, sig[4] & 3450 HW_PRTY_ASSERT_SET_4, 3451 par_num, print); 3452 3453 if (print) 3454 PMD_DRV_LOG(INFO, sc, ""); 3455 3456 return TRUE; 3457 } 3458 3459 return FALSE; 3460 } 3461 3462 static uint8_t 3463 bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print) 3464 { 3465 struct attn_route attn = { {0} }; 3466 int port = SC_PORT(sc); 3467 3468 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); 3469 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); 3470 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); 3471 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); 3472 3473 if (!CHIP_IS_E1x(sc)) 3474 attn.sig[4] = 3475 REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); 3476 3477 return bnx2x_parity_attn(sc, global, print, attn.sig); 3478 } 3479 3480 static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn) 3481 { 3482 uint32_t val; 3483 3484 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 3485 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 3486 PMD_DRV_LOG(INFO, sc, "ERROR: PGLUE hw attention 0x%08x", val); 3487 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 3488 PMD_DRV_LOG(INFO, sc, 3489 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR"); 3490 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 3491 PMD_DRV_LOG(INFO, sc, 3492 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR"); 3493 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 3494 PMD_DRV_LOG(INFO, sc, 3495 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN"); 3496 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 3497 PMD_DRV_LOG(INFO, sc, 3498 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN"); 3499 if (val & 3500 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 3501 PMD_DRV_LOG(INFO, sc, 3502 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN"); 3503 if (val & 3504 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 3505 PMD_DRV_LOG(INFO, sc, 3506 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN"); 3507 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 3508 PMD_DRV_LOG(INFO, sc, 3509 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN"); 3510 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 3511 PMD_DRV_LOG(INFO, sc, 3512 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN"); 3513 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 3514 PMD_DRV_LOG(INFO, sc, 3515 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW"); 3516 } 3517 3518 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 3519 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 3520 PMD_DRV_LOG(INFO, sc, "ERROR: ATC hw attention 0x%08x", val); 3521 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 3522 PMD_DRV_LOG(INFO, sc, 3523 "ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR"); 3524 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 3525 PMD_DRV_LOG(INFO, sc, 3526 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND"); 3527 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 3528 PMD_DRV_LOG(INFO, sc, 3529 "ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS"); 3530 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 3531 PMD_DRV_LOG(INFO, sc, 3532 "ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT"); 3533 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 3534 PMD_DRV_LOG(INFO, sc, 3535 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR"); 3536 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 3537 PMD_DRV_LOG(INFO, sc, 3538 "ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU"); 3539 } 3540 3541 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 3542 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 3543 PMD_DRV_LOG(INFO, sc, 3544 "ERROR: FATAL parity attention set4 0x%08x", 3545 (uint32_t) (attn & 3546 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR 3547 | 3548 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 3549 } 3550 } 3551 3552 static void bnx2x_e1h_disable(struct bnx2x_softc *sc) 3553 { 3554 int port = SC_PORT(sc); 3555 3556 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); 3557 } 3558 3559 static void bnx2x_e1h_enable(struct bnx2x_softc *sc) 3560 { 3561 int port = SC_PORT(sc); 3562 3563 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 3564 } 3565 3566 /* 3567 * called due to MCP event (on pmf): 3568 * reread new bandwidth configuration 3569 * configure FW 3570 * notify others function about the change 3571 */ 3572 static void bnx2x_config_mf_bw(struct bnx2x_softc *sc) 3573 { 3574 if (sc->link_vars.link_up) { 3575 bnx2x_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 3576 bnx2x_link_sync_notify(sc); 3577 } 3578 3579 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 3580 } 3581 3582 static void bnx2x_set_mf_bw(struct bnx2x_softc *sc) 3583 { 3584 bnx2x_config_mf_bw(sc); 3585 bnx2x_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3586 } 3587 3588 static void bnx2x_handle_eee_event(struct bnx2x_softc *sc) 3589 { 3590 bnx2x_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3591 } 3592 3593 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3594 3595 static void bnx2x_drv_info_ether_stat(struct bnx2x_softc *sc) 3596 { 3597 struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; 3598 3599 strncpy(ether_stat->version, BNX2X_DRIVER_VERSION, 3600 ETH_STAT_INFO_VERSION_LEN); 3601 3602 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 3603 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3604 ether_stat->mac_local + MAC_PAD, 3605 MAC_PAD, ETH_ALEN); 3606 3607 ether_stat->mtu_size = sc->mtu; 3608 3609 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3610 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 3611 3612 ether_stat->txq_size = sc->tx_ring_size; 3613 ether_stat->rxq_size = sc->rx_ring_size; 3614 } 3615 3616 static void bnx2x_handle_drv_info_req(struct bnx2x_softc *sc) 3617 { 3618 enum drv_info_opcode op_code; 3619 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 3620 3621 /* if drv_info version supported by MFW doesn't match - send NACK */ 3622 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3623 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3624 return; 3625 } 3626 3627 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3628 DRV_INFO_CONTROL_OP_CODE_SHIFT); 3629 3630 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 3631 3632 switch (op_code) { 3633 case ETH_STATS_OPCODE: 3634 bnx2x_drv_info_ether_stat(sc); 3635 break; 3636 case FCOE_STATS_OPCODE: 3637 case ISCSI_STATS_OPCODE: 3638 default: 3639 /* if op code isn't supported - send NACK */ 3640 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3641 return; 3642 } 3643 3644 /* 3645 * If we got drv_info attn from MFW then these fields are defined in 3646 * shmem2 for sure 3647 */ 3648 SHMEM2_WR(sc, drv_info_host_addr_lo, 3649 U64_LO(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); 3650 SHMEM2_WR(sc, drv_info_host_addr_hi, 3651 U64_HI(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); 3652 3653 bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3654 } 3655 3656 static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event) 3657 { 3658 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3659 /* 3660 * This is the only place besides the function initialization 3661 * where the sc->flags can change so it is done without any 3662 * locks 3663 */ 3664 if (sc->devinfo. 3665 mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 3666 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); 3667 sc->flags |= BNX2X_MF_FUNC_DIS; 3668 bnx2x_e1h_disable(sc); 3669 } else { 3670 PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); 3671 sc->flags &= ~BNX2X_MF_FUNC_DIS; 3672 bnx2x_e1h_enable(sc); 3673 } 3674 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3675 } 3676 3677 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3678 bnx2x_config_mf_bw(sc); 3679 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3680 } 3681 3682 /* Report results to MCP */ 3683 if (dcc_event) 3684 bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 3685 else 3686 bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 3687 } 3688 3689 static void bnx2x_pmf_update(struct bnx2x_softc *sc) 3690 { 3691 int port = SC_PORT(sc); 3692 uint32_t val; 3693 3694 sc->port.pmf = 1; 3695 3696 /* 3697 * We need the mb() to ensure the ordering between the writing to 3698 * sc->port.pmf here and reading it from the bnx2x_periodic_task(). 3699 */ 3700 mb(); 3701 3702 /* enable nig attention */ 3703 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 3704 if (sc->devinfo.int_block == INT_BLOCK_HC) { 3705 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val); 3706 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val); 3707 } else if (!CHIP_IS_E1x(sc)) { 3708 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 3709 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 3710 } 3711 3712 bnx2x_stats_handle(sc, STATS_EVENT_PMF); 3713 } 3714 3715 static int bnx2x_mc_assert(struct bnx2x_softc *sc) 3716 { 3717 char last_idx; 3718 int i, rc = 0; 3719 __rte_unused uint32_t row0, row1, row2, row3; 3720 3721 /* XSTORM */ 3722 last_idx = 3723 REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 3724 if (last_idx) 3725 PMD_DRV_LOG(ERR, sc, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3726 3727 /* print the asserts */ 3728 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3729 3730 row0 = 3731 REG_RD(sc, 3732 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 3733 row1 = 3734 REG_RD(sc, 3735 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3736 4); 3737 row2 = 3738 REG_RD(sc, 3739 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3740 8); 3741 row3 = 3742 REG_RD(sc, 3743 BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 3744 12); 3745 3746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3747 PMD_DRV_LOG(ERR, sc, 3748 "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3749 i, row3, row2, row1, row0); 3750 rc++; 3751 } else { 3752 break; 3753 } 3754 } 3755 3756 /* TSTORM */ 3757 last_idx = 3758 REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 3759 if (last_idx) { 3760 PMD_DRV_LOG(ERR, sc, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3761 } 3762 3763 /* print the asserts */ 3764 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3765 3766 row0 = 3767 REG_RD(sc, 3768 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 3769 row1 = 3770 REG_RD(sc, 3771 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3772 4); 3773 row2 = 3774 REG_RD(sc, 3775 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3776 8); 3777 row3 = 3778 REG_RD(sc, 3779 BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 3780 12); 3781 3782 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3783 PMD_DRV_LOG(ERR, sc, 3784 "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3785 i, row3, row2, row1, row0); 3786 rc++; 3787 } else { 3788 break; 3789 } 3790 } 3791 3792 /* CSTORM */ 3793 last_idx = 3794 REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 3795 if (last_idx) { 3796 PMD_DRV_LOG(ERR, sc, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3797 } 3798 3799 /* print the asserts */ 3800 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3801 3802 row0 = 3803 REG_RD(sc, 3804 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 3805 row1 = 3806 REG_RD(sc, 3807 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3808 4); 3809 row2 = 3810 REG_RD(sc, 3811 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3812 8); 3813 row3 = 3814 REG_RD(sc, 3815 BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 3816 12); 3817 3818 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3819 PMD_DRV_LOG(ERR, sc, 3820 "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3821 i, row3, row2, row1, row0); 3822 rc++; 3823 } else { 3824 break; 3825 } 3826 } 3827 3828 /* USTORM */ 3829 last_idx = 3830 REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 3831 if (last_idx) { 3832 PMD_DRV_LOG(ERR, sc, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx); 3833 } 3834 3835 /* print the asserts */ 3836 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 3837 3838 row0 = 3839 REG_RD(sc, 3840 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 3841 row1 = 3842 REG_RD(sc, 3843 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3844 4); 3845 row2 = 3846 REG_RD(sc, 3847 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3848 8); 3849 row3 = 3850 REG_RD(sc, 3851 BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 3852 12); 3853 3854 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 3855 PMD_DRV_LOG(ERR, sc, 3856 "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", 3857 i, row3, row2, row1, row0); 3858 rc++; 3859 } else { 3860 break; 3861 } 3862 } 3863 3864 return rc; 3865 } 3866 3867 static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn) 3868 { 3869 int func = SC_FUNC(sc); 3870 uint32_t val; 3871 3872 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 3873 3874 if (attn & BNX2X_PMF_LINK_ASSERT(sc)) { 3875 3876 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 3877 bnx2x_read_mf_cfg(sc); 3878 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 3879 MFCFG_RD(sc, 3880 func_mf_config[SC_ABS_FUNC(sc)].config); 3881 val = 3882 SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 3883 3884 if (val & DRV_STATUS_DCC_EVENT_MASK) 3885 bnx2x_dcc_event(sc, 3886 (val & 3887 DRV_STATUS_DCC_EVENT_MASK)); 3888 3889 if (val & DRV_STATUS_SET_MF_BW) 3890 bnx2x_set_mf_bw(sc); 3891 3892 if (val & DRV_STATUS_DRV_INFO_REQ) 3893 bnx2x_handle_drv_info_req(sc); 3894 3895 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3896 bnx2x_pmf_update(sc); 3897 3898 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 3899 bnx2x_handle_eee_event(sc); 3900 3901 if (sc->link_vars.periodic_flags & 3902 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 3903 /* sync with link */ 3904 bnx2x_acquire_phy_lock(sc); 3905 sc->link_vars.periodic_flags &= 3906 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 3907 bnx2x_release_phy_lock(sc); 3908 if (IS_MF(sc)) { 3909 bnx2x_link_sync_notify(sc); 3910 } 3911 bnx2x_link_report(sc); 3912 } 3913 3914 /* 3915 * Always call it here: bnx2x_link_report() will 3916 * prevent the link indication duplication. 3917 */ 3918 bnx2x_link_status_update(sc); 3919 3920 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3921 3922 PMD_DRV_LOG(ERR, sc, "MC assert!"); 3923 bnx2x_mc_assert(sc); 3924 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 3925 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 3926 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 3927 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 3928 rte_panic("MC assert!"); 3929 3930 } else if (attn & BNX2X_MCP_ASSERT) { 3931 3932 PMD_DRV_LOG(ERR, sc, "MCP assert!"); 3933 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 3934 3935 } else { 3936 PMD_DRV_LOG(ERR, sc, 3937 "Unknown HW assert! (attn 0x%08x)", attn); 3938 } 3939 } 3940 3941 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3942 PMD_DRV_LOG(ERR, sc, "LATCHED attention 0x%08x (masked)", attn); 3943 if (attn & BNX2X_GRC_TIMEOUT) { 3944 val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 3945 PMD_DRV_LOG(ERR, sc, "GRC time-out 0x%08x", val); 3946 } 3947 if (attn & BNX2X_GRC_RSV) { 3948 val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 3949 PMD_DRV_LOG(ERR, sc, "GRC reserved 0x%08x", val); 3950 } 3951 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3952 } 3953 } 3954 3955 static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn) 3956 { 3957 int port = SC_PORT(sc); 3958 int reg_offset; 3959 uint32_t val0, mask0, val1, mask1; 3960 uint32_t val; 3961 3962 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3963 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 3964 PMD_DRV_LOG(ERR, sc, "CFC hw attention 0x%08x", val); 3965 /* CFC error attention */ 3966 if (val & 0x2) { 3967 PMD_DRV_LOG(ERR, sc, "FATAL error from CFC"); 3968 } 3969 } 3970 3971 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3972 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 3973 PMD_DRV_LOG(ERR, sc, "PXP hw attention-0 0x%08x", val); 3974 /* RQ_USDMDP_FIFO_OVERFLOW */ 3975 if (val & 0x18000) { 3976 PMD_DRV_LOG(ERR, sc, "FATAL error from PXP"); 3977 } 3978 3979 if (!CHIP_IS_E1x(sc)) { 3980 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 3981 PMD_DRV_LOG(ERR, sc, "PXP hw attention-1 0x%08x", val); 3982 } 3983 } 3984 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 3985 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 3986 3987 if (attn & AEU_PXP2_HW_INT_BIT) { 3988 /* CQ47854 workaround do not panic on 3989 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 3990 */ 3991 if (!CHIP_IS_E1x(sc)) { 3992 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 3993 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 3994 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 3995 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 3996 /* 3997 * If the only PXP2_EOP_ERROR_BIT is set in 3998 * STS0 and STS1 - clear it 3999 * 4000 * probably we lose additional attentions between 4001 * STS0 and STS_CLR0, in this case user will not 4002 * be notified about them 4003 */ 4004 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 4005 !(val1 & mask1)) 4006 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 4007 4008 /* print the register, since no one can restore it */ 4009 PMD_DRV_LOG(ERR, sc, 4010 "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0); 4011 4012 /* 4013 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 4014 * then notify 4015 */ 4016 if (val0 & PXP2_EOP_ERROR_BIT) { 4017 PMD_DRV_LOG(ERR, sc, "PXP2_WR_PGLUE_EOP_ERROR"); 4018 4019 /* 4020 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 4021 * set then clear attention from PXP2 block without panic 4022 */ 4023 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 4024 ((val1 & mask1) == 0)) 4025 attn &= ~AEU_PXP2_HW_INT_BIT; 4026 } 4027 } 4028 } 4029 4030 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 4032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4033 4034 val = REG_RD(sc, reg_offset); 4035 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4036 REG_WR(sc, reg_offset, val); 4037 4038 PMD_DRV_LOG(ERR, sc, 4039 "FATAL HW block attention set2 0x%x", 4040 (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2)); 4041 rte_panic("HW block attention set2"); 4042 } 4043 } 4044 4045 static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn) 4046 { 4047 int port = SC_PORT(sc); 4048 int reg_offset; 4049 uint32_t val; 4050 4051 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 4052 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 4053 PMD_DRV_LOG(ERR, sc, "DB hw attention 0x%08x", val); 4054 /* DORQ discard attention */ 4055 if (val & 0x2) { 4056 PMD_DRV_LOG(ERR, sc, "FATAL error from DORQ"); 4057 } 4058 } 4059 4060 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4061 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 4062 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4063 4064 val = REG_RD(sc, reg_offset); 4065 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4066 REG_WR(sc, reg_offset, val); 4067 4068 PMD_DRV_LOG(ERR, sc, 4069 "FATAL HW block attention set1 0x%08x", 4070 (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1)); 4071 rte_panic("HW block attention set1"); 4072 } 4073 } 4074 4075 static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn) 4076 { 4077 int port = SC_PORT(sc); 4078 int reg_offset; 4079 uint32_t val; 4080 4081 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 4083 4084 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 4085 val = REG_RD(sc, reg_offset); 4086 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 4087 REG_WR(sc, reg_offset, val); 4088 4089 PMD_DRV_LOG(WARNING, sc, "SPIO5 hw attention"); 4090 4091 /* Fan failure attention */ 4092 elink_hw_reset_phy(&sc->link_params); 4093 bnx2x_fan_failure(sc); 4094 } 4095 4096 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 4097 bnx2x_acquire_phy_lock(sc); 4098 elink_handle_module_detect_int(&sc->link_params); 4099 bnx2x_release_phy_lock(sc); 4100 } 4101 4102 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4103 val = REG_RD(sc, reg_offset); 4104 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4105 REG_WR(sc, reg_offset, val); 4106 4107 rte_panic("FATAL HW block attention set0 0x%lx", 4108 (attn & (unsigned long)HW_INTERRUT_ASSERT_SET_0)); 4109 } 4110 } 4111 4112 static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserted) 4113 { 4114 struct attn_route attn; 4115 struct attn_route *group_mask; 4116 int port = SC_PORT(sc); 4117 int index; 4118 uint32_t reg_addr; 4119 uint32_t val; 4120 uint32_t aeu_mask; 4121 uint8_t global = FALSE; 4122 4123 /* 4124 * Need to take HW lock because MCP or other port might also 4125 * try to handle this event. 4126 */ 4127 bnx2x_acquire_alr(sc); 4128 4129 if (bnx2x_chk_parity_attn(sc, &global, TRUE)) { 4130 sc->recovery_state = BNX2X_RECOVERY_INIT; 4131 4132 /* disable HW interrupts */ 4133 bnx2x_int_disable(sc); 4134 bnx2x_release_alr(sc); 4135 return; 4136 } 4137 4138 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); 4139 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); 4140 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); 4141 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); 4142 if (!CHIP_IS_E1x(sc)) { 4143 attn.sig[4] = 4144 REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); 4145 } else { 4146 attn.sig[4] = 0; 4147 } 4148 4149 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4150 if (deasserted & (1 << index)) { 4151 group_mask = &sc->attn_group[index]; 4152 4153 bnx2x_attn_int_deasserted4(sc, 4154 attn. 4155 sig[4] & group_mask->sig[4]); 4156 bnx2x_attn_int_deasserted3(sc, 4157 attn. 4158 sig[3] & group_mask->sig[3]); 4159 bnx2x_attn_int_deasserted1(sc, 4160 attn. 4161 sig[1] & group_mask->sig[1]); 4162 bnx2x_attn_int_deasserted2(sc, 4163 attn. 4164 sig[2] & group_mask->sig[2]); 4165 bnx2x_attn_int_deasserted0(sc, 4166 attn. 4167 sig[0] & group_mask->sig[0]); 4168 } 4169 } 4170 4171 bnx2x_release_alr(sc); 4172 4173 if (sc->devinfo.int_block == INT_BLOCK_HC) { 4174 reg_addr = (HC_REG_COMMAND_REG + port * 32 + 4175 COMMAND_REG_ATTN_BITS_CLR); 4176 } else { 4177 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER * 8); 4178 } 4179 4180 val = ~deasserted; 4181 PMD_DRV_LOG(DEBUG, sc, 4182 "about to mask 0x%08x at %s addr 0x%08x", val, 4183 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", 4184 reg_addr); 4185 REG_WR(sc, reg_addr, val); 4186 4187 if (~sc->attn_state & deasserted) { 4188 PMD_DRV_LOG(ERR, sc, "IGU error"); 4189 } 4190 4191 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4192 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4193 4194 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4195 4196 aeu_mask = REG_RD(sc, reg_addr); 4197 4198 aeu_mask |= (deasserted & 0x3ff); 4199 4200 REG_WR(sc, reg_addr, aeu_mask); 4201 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4202 4203 sc->attn_state &= ~deasserted; 4204 } 4205 4206 static void bnx2x_attn_int(struct bnx2x_softc *sc) 4207 { 4208 /* read local copy of bits */ 4209 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 4210 uint32_t attn_ack = 4211 le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 4212 uint32_t attn_state = sc->attn_state; 4213 4214 /* look for changed bits */ 4215 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 4216 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 4217 4218 PMD_DRV_LOG(DEBUG, sc, 4219 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x", 4220 attn_bits, attn_ack, asserted, deasserted); 4221 4222 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 4223 PMD_DRV_LOG(ERR, sc, "BAD attention state"); 4224 } 4225 4226 /* handle bits that were raised */ 4227 if (asserted) { 4228 bnx2x_attn_int_asserted(sc, asserted); 4229 } 4230 4231 if (deasserted) { 4232 bnx2x_attn_int_deasserted(sc, deasserted); 4233 } 4234 } 4235 4236 static uint16_t bnx2x_update_dsb_idx(struct bnx2x_softc *sc) 4237 { 4238 struct host_sp_status_block *def_sb = sc->def_sb; 4239 uint16_t rc = 0; 4240 4241 if (!def_sb) 4242 return 0; 4243 4244 mb(); /* status block is written to by the chip */ 4245 4246 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 4247 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 4248 rc |= BNX2X_DEF_SB_ATT_IDX; 4249 } 4250 4251 if (sc->def_idx != def_sb->sp_sb.running_index) { 4252 sc->def_idx = def_sb->sp_sb.running_index; 4253 rc |= BNX2X_DEF_SB_IDX; 4254 } 4255 4256 mb(); 4257 4258 return rc; 4259 } 4260 4261 static struct ecore_queue_sp_obj *bnx2x_cid_to_q_obj(struct bnx2x_softc *sc, 4262 uint32_t cid) 4263 { 4264 return &sc->sp_objs[CID_TO_FP(cid, sc)].q_obj; 4265 } 4266 4267 static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc) 4268 { 4269 struct ecore_mcast_ramrod_params rparam; 4270 int rc; 4271 4272 memset(&rparam, 0, sizeof(rparam)); 4273 4274 rparam.mcast_obj = &sc->mcast_obj; 4275 4276 /* clear pending state for the last command */ 4277 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 4278 4279 /* if there are pending mcast commands - send them */ 4280 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 4281 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4282 if (rc < 0) { 4283 PMD_DRV_LOG(INFO, sc, 4284 "Failed to send pending mcast commands (%d)", 4285 rc); 4286 } 4287 } 4288 } 4289 4290 static void 4291 bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *elem) 4292 { 4293 uint32_t ramrod_flags = 0; 4294 int rc = 0; 4295 uint32_t cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4296 struct ecore_vlan_mac_obj *vlan_mac_obj; 4297 4298 /* always push next commands out, don't wait here */ 4299 rte_bit_relaxed_set32(RAMROD_CONT, &ramrod_flags); 4300 4301 switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) { 4302 case ECORE_FILTER_MAC_PENDING: 4303 PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MAC completions"); 4304 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 4305 break; 4306 4307 case ECORE_FILTER_MCAST_PENDING: 4308 PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MCAST completions"); 4309 bnx2x_handle_mcast_eqe(sc); 4310 return; 4311 4312 default: 4313 PMD_DRV_LOG(NOTICE, sc, "Unsupported classification command: %d", 4314 elem->message.data.eth_event.echo); 4315 return; 4316 } 4317 4318 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 4319 4320 if (rc < 0) { 4321 PMD_DRV_LOG(NOTICE, sc, 4322 "Failed to schedule new commands (%d)", rc); 4323 } else if (rc > 0) { 4324 PMD_DRV_LOG(DEBUG, sc, "Scheduled next pending commands..."); 4325 } 4326 } 4327 4328 static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc *sc) 4329 { 4330 rte_bit_relaxed_clear32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 4331 4332 /* send rx_mode command again if was requested */ 4333 if (rte_bit_relaxed_test_and_clear32(ECORE_FILTER_RX_MODE_SCHED, 4334 &sc->sp_state)) 4335 bnx2x_set_storm_rx_mode(sc); 4336 } 4337 4338 static void bnx2x_update_eq_prod(struct bnx2x_softc *sc, uint16_t prod) 4339 { 4340 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 4341 wmb(); /* keep prod updates ordered */ 4342 } 4343 4344 static void bnx2x_eq_int(struct bnx2x_softc *sc) 4345 { 4346 uint16_t hw_cons, sw_cons, sw_prod; 4347 union event_ring_elem *elem; 4348 uint8_t echo; 4349 uint32_t cid; 4350 uint8_t opcode; 4351 int spqe_cnt = 0; 4352 struct ecore_queue_sp_obj *q_obj; 4353 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 4354 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 4355 4356 hw_cons = le16toh(*sc->eq_cons_sb); 4357 4358 /* 4359 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 4360 * when we get to the next-page we need to adjust so the loop 4361 * condition below will be met. The next element is the size of a 4362 * regular element and hence incrementing by 1 4363 */ 4364 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 4365 hw_cons++; 4366 } 4367 4368 /* 4369 * This function may never run in parallel with itself for a 4370 * specific sc and no need for a read memory barrier here. 4371 */ 4372 sw_cons = sc->eq_cons; 4373 sw_prod = sc->eq_prod; 4374 4375 for (; 4376 sw_cons != hw_cons; 4377 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 4378 4379 elem = &sc->eq[EQ_DESC(sw_cons)]; 4380 4381 /* elem CID originates from FW, actually LE */ 4382 cid = SW_CID(elem->message.data.cfc_del_event.cid); 4383 opcode = elem->message.opcode; 4384 4385 /* handle eq element */ 4386 switch (opcode) { 4387 case EVENT_RING_OPCODE_STAT_QUERY: 4388 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "got statistics completion event %d", 4389 sc->stats_comp++); 4390 /* nothing to do with stats comp */ 4391 goto next_spqe; 4392 4393 case EVENT_RING_OPCODE_CFC_DEL: 4394 /* handle according to cid range */ 4395 /* we may want to verify here that the sc state is HALTING */ 4396 PMD_DRV_LOG(DEBUG, sc, "got delete ramrod for MULTI[%d]", 4397 cid); 4398 q_obj = bnx2x_cid_to_q_obj(sc, cid); 4399 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 4400 break; 4401 } 4402 goto next_spqe; 4403 4404 case EVENT_RING_OPCODE_STOP_TRAFFIC: 4405 PMD_DRV_LOG(DEBUG, sc, "got STOP TRAFFIC"); 4406 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 4407 break; 4408 } 4409 goto next_spqe; 4410 4411 case EVENT_RING_OPCODE_START_TRAFFIC: 4412 PMD_DRV_LOG(DEBUG, sc, "got START TRAFFIC"); 4413 if (f_obj->complete_cmd 4414 (sc, f_obj, ECORE_F_CMD_TX_START)) { 4415 break; 4416 } 4417 goto next_spqe; 4418 4419 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4420 echo = elem->message.data.function_update_event.echo; 4421 if (echo == SWITCH_UPDATE) { 4422 PMD_DRV_LOG(DEBUG, sc, 4423 "got FUNC_SWITCH_UPDATE ramrod"); 4424 if (f_obj->complete_cmd(sc, f_obj, 4425 ECORE_F_CMD_SWITCH_UPDATE)) 4426 { 4427 break; 4428 } 4429 } else { 4430 PMD_DRV_LOG(DEBUG, sc, 4431 "AFEX: ramrod completed FUNCTION_UPDATE"); 4432 f_obj->complete_cmd(sc, f_obj, 4433 ECORE_F_CMD_AFEX_UPDATE); 4434 } 4435 goto next_spqe; 4436 4437 case EVENT_RING_OPCODE_FORWARD_SETUP: 4438 q_obj = &bnx2x_fwd_sp_obj(sc, q_obj); 4439 if (q_obj->complete_cmd(sc, q_obj, 4440 ECORE_Q_CMD_SETUP_TX_ONLY)) { 4441 break; 4442 } 4443 goto next_spqe; 4444 4445 case EVENT_RING_OPCODE_FUNCTION_START: 4446 PMD_DRV_LOG(DEBUG, sc, "got FUNC_START ramrod"); 4447 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 4448 break; 4449 } 4450 goto next_spqe; 4451 4452 case EVENT_RING_OPCODE_FUNCTION_STOP: 4453 PMD_DRV_LOG(DEBUG, sc, "got FUNC_STOP ramrod"); 4454 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 4455 break; 4456 } 4457 goto next_spqe; 4458 } 4459 4460 switch (opcode | sc->state) { 4461 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPEN): 4462 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT): 4463 cid = 4464 elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4465 PMD_DRV_LOG(DEBUG, sc, "got RSS_UPDATE ramrod. CID %d", 4466 cid); 4467 rss_raw->clear_pending(rss_raw); 4468 break; 4469 4470 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 4471 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 4472 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAITING_HALT): 4473 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN): 4474 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG): 4475 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4476 PMD_DRV_LOG(DEBUG, sc, 4477 "got (un)set mac ramrod"); 4478 bnx2x_handle_classification_eqe(sc, elem); 4479 break; 4480 4481 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN): 4482 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG): 4483 case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4484 PMD_DRV_LOG(DEBUG, sc, 4485 "got mcast ramrod"); 4486 bnx2x_handle_mcast_eqe(sc); 4487 break; 4488 4489 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN): 4490 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG): 4491 case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): 4492 PMD_DRV_LOG(DEBUG, sc, 4493 "got rx_mode ramrod"); 4494 bnx2x_handle_rx_mode_eqe(sc); 4495 break; 4496 4497 default: 4498 /* unknown event log error and continue */ 4499 PMD_DRV_LOG(INFO, sc, "Unknown EQ event %d, sc->state 0x%x", 4500 elem->message.opcode, sc->state); 4501 } 4502 4503 next_spqe: 4504 spqe_cnt++; 4505 } /* for */ 4506 4507 mb(); 4508 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 4509 4510 sc->eq_cons = sw_cons; 4511 sc->eq_prod = sw_prod; 4512 4513 /* make sure that above mem writes were issued towards the memory */ 4514 wmb(); 4515 4516 /* update producer */ 4517 bnx2x_update_eq_prod(sc, sc->eq_prod); 4518 } 4519 4520 static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc) 4521 { 4522 uint16_t status; 4523 int rc = 0; 4524 4525 PMD_DRV_LOG(DEBUG, sc, "---> SP TASK <---"); 4526 4527 /* what work needs to be performed? */ 4528 status = bnx2x_update_dsb_idx(sc); 4529 4530 PMD_DRV_LOG(DEBUG, sc, "dsb status 0x%04x", status); 4531 4532 /* HW attentions */ 4533 if (status & BNX2X_DEF_SB_ATT_IDX) { 4534 PMD_DRV_LOG(DEBUG, sc, "---> ATTN INTR <---"); 4535 bnx2x_attn_int(sc); 4536 status &= ~BNX2X_DEF_SB_ATT_IDX; 4537 rc = 1; 4538 } 4539 4540 /* SP events: STAT_QUERY and others */ 4541 if (status & BNX2X_DEF_SB_IDX) { 4542 /* handle EQ completions */ 4543 PMD_DRV_LOG(DEBUG, sc, "---> EQ INTR <---"); 4544 bnx2x_eq_int(sc); 4545 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 4546 le16toh(sc->def_idx), IGU_INT_NOP, 1); 4547 status &= ~BNX2X_DEF_SB_IDX; 4548 } 4549 4550 /* if status is non zero then something went wrong */ 4551 if (unlikely(status)) { 4552 PMD_DRV_LOG(INFO, sc, 4553 "Got an unknown SP interrupt! (0x%04x)", status); 4554 } 4555 4556 /* ack status block only if something was actually handled */ 4557 bnx2x_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 4558 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 4559 4560 return rc; 4561 } 4562 4563 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp) 4564 { 4565 struct bnx2x_softc *sc = fp->sc; 4566 uint8_t more_rx = FALSE; 4567 4568 /* Make sure FP is initialized */ 4569 if (!fp->sb_running_index) 4570 return; 4571 4572 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, 4573 "---> FP TASK QUEUE (%d) <--", fp->index); 4574 4575 /* update the fastpath index */ 4576 bnx2x_update_fp_sb_idx(fp); 4577 4578 if (rte_atomic32_read(&sc->scan_fp) == 1) { 4579 if (bnx2x_has_rx_work(fp)) { 4580 more_rx = bnx2x_rxeof(sc, fp); 4581 } 4582 4583 if (more_rx) { 4584 /* still more work to do */ 4585 bnx2x_handle_fp_tq(fp); 4586 return; 4587 } 4588 /* We have completed slow path completion, clear the flag */ 4589 rte_atomic32_set(&sc->scan_fp, 0); 4590 } 4591 4592 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 4593 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 4594 } 4595 4596 /* 4597 * Legacy interrupt entry point. 4598 * 4599 * Verifies that the controller generated the interrupt and 4600 * then calls a separate routine to handle the various 4601 * interrupt causes: link, RX, and TX. 4602 */ 4603 int bnx2x_intr_legacy(struct bnx2x_softc *sc) 4604 { 4605 struct bnx2x_fastpath *fp; 4606 uint32_t status, mask; 4607 int i, rc = 0; 4608 4609 /* 4610 * 0 for ustorm, 1 for cstorm 4611 * the bits returned from ack_int() are 0-15 4612 * bit 0 = attention status block 4613 * bit 1 = fast path status block 4614 * a mask of 0x2 or more = tx/rx event 4615 * a mask of 1 = slow path event 4616 */ 4617 4618 status = bnx2x_ack_int(sc); 4619 4620 /* the interrupt is not for us */ 4621 if (unlikely(status == 0)) { 4622 return 0; 4623 } 4624 4625 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "Interrupt status 0x%04x", status); 4626 //bnx2x_dump_status_block(sc); 4627 4628 FOR_EACH_ETH_QUEUE(sc, i) { 4629 fp = &sc->fp[i]; 4630 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 4631 if (status & mask) { 4632 /* acknowledge and disable further fastpath interrupts */ 4633 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 4634 0, IGU_INT_DISABLE, 0); 4635 bnx2x_handle_fp_tq(fp); 4636 status &= ~mask; 4637 } 4638 } 4639 4640 if (unlikely(status & 0x1)) { 4641 /* acknowledge and disable further slowpath interrupts */ 4642 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 4643 0, IGU_INT_DISABLE, 0); 4644 rc = bnx2x_handle_sp_tq(sc); 4645 status &= ~0x1; 4646 } 4647 4648 if (unlikely(status)) { 4649 PMD_DRV_LOG(WARNING, sc, 4650 "Unexpected fastpath status (0x%08x)!", status); 4651 } 4652 4653 return rc; 4654 } 4655 4656 static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc); 4657 static int bnx2x_init_hw_common(struct bnx2x_softc *sc); 4658 static int bnx2x_init_hw_port(struct bnx2x_softc *sc); 4659 static int bnx2x_init_hw_func(struct bnx2x_softc *sc); 4660 static void bnx2x_reset_common(struct bnx2x_softc *sc); 4661 static void bnx2x_reset_port(struct bnx2x_softc *sc); 4662 static void bnx2x_reset_func(struct bnx2x_softc *sc); 4663 static int bnx2x_init_firmware(struct bnx2x_softc *sc); 4664 static void bnx2x_release_firmware(struct bnx2x_softc *sc); 4665 4666 static struct 4667 ecore_func_sp_drv_ops bnx2x_func_sp_drv = { 4668 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 4669 .init_hw_cmn = bnx2x_init_hw_common, 4670 .init_hw_port = bnx2x_init_hw_port, 4671 .init_hw_func = bnx2x_init_hw_func, 4672 4673 .reset_hw_cmn = bnx2x_reset_common, 4674 .reset_hw_port = bnx2x_reset_port, 4675 .reset_hw_func = bnx2x_reset_func, 4676 4677 .init_fw = bnx2x_init_firmware, 4678 .release_fw = bnx2x_release_firmware, 4679 }; 4680 4681 static void bnx2x_init_func_obj(struct bnx2x_softc *sc) 4682 { 4683 sc->dmae_ready = 0; 4684 4685 PMD_INIT_FUNC_TRACE(sc); 4686 4687 ecore_init_func_obj(sc, 4688 &sc->func_obj, 4689 BNX2X_SP(sc, func_rdata), 4690 (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata), 4691 BNX2X_SP(sc, func_afex_rdata), 4692 (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata), 4693 &bnx2x_func_sp_drv); 4694 } 4695 4696 static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code) 4697 { 4698 struct ecore_func_state_params func_params = { NULL }; 4699 int rc; 4700 4701 PMD_INIT_FUNC_TRACE(sc); 4702 4703 /* prepare the parameters for function state transitions */ 4704 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4705 4706 func_params.f_obj = &sc->func_obj; 4707 func_params.cmd = ECORE_F_CMD_HW_INIT; 4708 4709 func_params.params.hw_init.load_phase = load_code; 4710 4711 /* 4712 * Via a plethora of function pointers, we will eventually reach 4713 * bnx2x_init_hw_common(), bnx2x_init_hw_port(), or bnx2x_init_hw_func(). 4714 */ 4715 rc = ecore_func_state_change(sc, &func_params); 4716 4717 return rc; 4718 } 4719 4720 static void 4721 bnx2x_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, uint32_t len) 4722 { 4723 uint32_t i; 4724 4725 if (!(len % 4) && !(addr % 4)) { 4726 for (i = 0; i < len; i += 4) { 4727 REG_WR(sc, (addr + i), fill); 4728 } 4729 } else { 4730 for (i = 0; i < len; i++) { 4731 REG_WR8(sc, (addr + i), fill); 4732 } 4733 } 4734 } 4735 4736 /* writes FP SP data to FW - data_size in dwords */ 4737 static void 4738 bnx2x_wr_fp_sb_data(struct bnx2x_softc *sc, int fw_sb_id, uint32_t * sb_data_p, 4739 uint32_t data_size) 4740 { 4741 uint32_t index; 4742 4743 for (index = 0; index < data_size; index++) { 4744 REG_WR(sc, 4745 (BAR_CSTRORM_INTMEM + 4746 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 4747 (sizeof(uint32_t) * index)), *(sb_data_p + index)); 4748 } 4749 } 4750 4751 static void bnx2x_zero_fp_sb(struct bnx2x_softc *sc, int fw_sb_id) 4752 { 4753 struct hc_status_block_data_e2 sb_data_e2; 4754 struct hc_status_block_data_e1x sb_data_e1x; 4755 uint32_t *sb_data_p; 4756 uint32_t data_size = 0; 4757 4758 if (!CHIP_IS_E1x(sc)) { 4759 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 4760 sb_data_e2.common.state = SB_DISABLED; 4761 sb_data_e2.common.p_func.vf_valid = FALSE; 4762 sb_data_p = (uint32_t *) & sb_data_e2; 4763 data_size = (sizeof(struct hc_status_block_data_e2) / 4764 sizeof(uint32_t)); 4765 } else { 4766 memset(&sb_data_e1x, 0, 4767 sizeof(struct hc_status_block_data_e1x)); 4768 sb_data_e1x.common.state = SB_DISABLED; 4769 sb_data_e1x.common.p_func.vf_valid = FALSE; 4770 sb_data_p = (uint32_t *) & sb_data_e1x; 4771 data_size = (sizeof(struct hc_status_block_data_e1x) / 4772 sizeof(uint32_t)); 4773 } 4774 4775 bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 4776 4777 bnx2x_fill(sc, 4778 (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, 4779 CSTORM_STATUS_BLOCK_SIZE); 4780 bnx2x_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 4781 0, CSTORM_SYNC_BLOCK_SIZE); 4782 } 4783 4784 static void 4785 bnx2x_wr_sp_sb_data(struct bnx2x_softc *sc, 4786 struct hc_sp_status_block_data *sp_sb_data) 4787 { 4788 uint32_t i; 4789 4790 for (i = 0; 4791 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 4792 i++) { 4793 REG_WR(sc, 4794 (BAR_CSTRORM_INTMEM + 4795 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 4796 (i * sizeof(uint32_t))), 4797 *((uint32_t *) sp_sb_data + i)); 4798 } 4799 } 4800 4801 static void bnx2x_zero_sp_sb(struct bnx2x_softc *sc) 4802 { 4803 struct hc_sp_status_block_data sp_sb_data; 4804 4805 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 4806 4807 sp_sb_data.state = SB_DISABLED; 4808 sp_sb_data.p_func.vf_valid = FALSE; 4809 4810 bnx2x_wr_sp_sb_data(sc, &sp_sb_data); 4811 4812 bnx2x_fill(sc, 4813 (BAR_CSTRORM_INTMEM + 4814 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 4815 0, CSTORM_SP_STATUS_BLOCK_SIZE); 4816 bnx2x_fill(sc, 4817 (BAR_CSTRORM_INTMEM + 4818 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 4819 0, CSTORM_SP_SYNC_BLOCK_SIZE); 4820 } 4821 4822 static void 4823 bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, 4824 int igu_seg_id) 4825 { 4826 hc_sm->igu_sb_id = igu_sb_id; 4827 hc_sm->igu_seg_id = igu_seg_id; 4828 hc_sm->timer_value = 0xFF; 4829 hc_sm->time_to_expire = 0xFFFFFFFF; 4830 } 4831 4832 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 4833 { 4834 /* zero out state machine indices */ 4835 4836 /* rx indices */ 4837 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4838 4839 /* tx indices */ 4840 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4841 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 4842 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 4843 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 4844 4845 /* map indices */ 4846 4847 /* rx indices */ 4848 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 4849 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4850 4851 /* tx indices */ 4852 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 4853 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 4855 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4856 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 4857 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4858 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 4859 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 4860 } 4861 4862 static void 4863 bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid, 4864 uint8_t vf_valid, int fw_sb_id, int igu_sb_id) 4865 { 4866 struct hc_status_block_data_e2 sb_data_e2; 4867 struct hc_status_block_data_e1x sb_data_e1x; 4868 struct hc_status_block_sm *hc_sm_p; 4869 uint32_t *sb_data_p; 4870 int igu_seg_id; 4871 int data_size; 4872 4873 if (CHIP_INT_MODE_IS_BC(sc)) { 4874 igu_seg_id = HC_SEG_ACCESS_NORM; 4875 } else { 4876 igu_seg_id = IGU_SEG_ACCESS_NORM; 4877 } 4878 4879 bnx2x_zero_fp_sb(sc, fw_sb_id); 4880 4881 if (!CHIP_IS_E1x(sc)) { 4882 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 4883 sb_data_e2.common.state = SB_ENABLED; 4884 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 4885 sb_data_e2.common.p_func.vf_id = vfid; 4886 sb_data_e2.common.p_func.vf_valid = vf_valid; 4887 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 4888 sb_data_e2.common.same_igu_sb_1b = TRUE; 4889 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 4890 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 4891 hc_sm_p = sb_data_e2.common.state_machine; 4892 sb_data_p = (uint32_t *) & sb_data_e2; 4893 data_size = (sizeof(struct hc_status_block_data_e2) / 4894 sizeof(uint32_t)); 4895 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 4896 } else { 4897 memset(&sb_data_e1x, 0, 4898 sizeof(struct hc_status_block_data_e1x)); 4899 sb_data_e1x.common.state = SB_ENABLED; 4900 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 4901 sb_data_e1x.common.p_func.vf_id = 0xff; 4902 sb_data_e1x.common.p_func.vf_valid = FALSE; 4903 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 4904 sb_data_e1x.common.same_igu_sb_1b = TRUE; 4905 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 4906 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 4907 hc_sm_p = sb_data_e1x.common.state_machine; 4908 sb_data_p = (uint32_t *) & sb_data_e1x; 4909 data_size = (sizeof(struct hc_status_block_data_e1x) / 4910 sizeof(uint32_t)); 4911 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 4912 } 4913 4914 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 4915 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 4916 4917 /* write indices to HW - PCI guarantees endianity of regpairs */ 4918 bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 4919 } 4920 4921 static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 4922 { 4923 if (CHIP_IS_E1x(fp->sc)) { 4924 return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H; 4925 } else { 4926 return fp->cl_id; 4927 } 4928 } 4929 4930 static uint32_t 4931 bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) 4932 { 4933 uint32_t offset = BAR_USTRORM_INTMEM; 4934 4935 if (IS_VF(sc)) { 4936 return PXP_VF_ADDR_USDM_QUEUES_START + 4937 (sc->acquire_resp.resc.hw_qid[fp->index] * 4938 sizeof(struct ustorm_queue_zone_data)); 4939 } else if (!CHIP_IS_E1x(sc)) { 4940 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 4941 } else { 4942 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 4943 } 4944 4945 return offset; 4946 } 4947 4948 static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx) 4949 { 4950 struct bnx2x_fastpath *fp = &sc->fp[idx]; 4951 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 4952 uint32_t q_type = 0; 4953 int cos; 4954 4955 fp->sc = sc; 4956 fp->index = idx; 4957 4958 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 4959 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 4960 4961 if (CHIP_IS_E1x(sc)) 4962 fp->cl_id = SC_L_ID(sc) + idx; 4963 else 4964 /* want client ID same as IGU SB ID for non-E1 */ 4965 fp->cl_id = fp->igu_sb_id; 4966 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 4967 4968 /* setup sb indices */ 4969 if (!CHIP_IS_E1x(sc)) { 4970 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 4971 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 4972 } else { 4973 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 4974 fp->sb_running_index = 4975 fp->status_block.e1x_sb->sb.running_index; 4976 } 4977 4978 /* init shortcut */ 4979 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(sc, fp); 4980 4981 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 4982 4983 for (cos = 0; cos < sc->max_cos; cos++) { 4984 cids[cos] = idx; 4985 } 4986 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 4987 4988 /* nothing more for a VF to do */ 4989 if (IS_VF(sc)) { 4990 return; 4991 } 4992 4993 bnx2x_init_sb(sc, fp->sb_dma.paddr, BNX2X_VF_ID_INVALID, FALSE, 4994 fp->fw_sb_id, fp->igu_sb_id); 4995 4996 bnx2x_update_fp_sb_idx(fp); 4997 4998 /* Configure Queue State object */ 4999 rte_bit_relaxed_set32(ECORE_Q_TYPE_HAS_RX, &q_type); 5000 rte_bit_relaxed_set32(ECORE_Q_TYPE_HAS_TX, &q_type); 5001 5002 ecore_init_queue_obj(sc, 5003 &sc->sp_objs[idx].q_obj, 5004 fp->cl_id, 5005 cids, 5006 sc->max_cos, 5007 SC_FUNC(sc), 5008 BNX2X_SP(sc, q_rdata), 5009 (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata), 5010 q_type); 5011 5012 /* configure classification DBs */ 5013 ecore_init_mac_obj(sc, 5014 &sc->sp_objs[idx].mac_obj, 5015 fp->cl_id, 5016 idx, 5017 SC_FUNC(sc), 5018 BNX2X_SP(sc, mac_rdata), 5019 (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata), 5020 ECORE_FILTER_MAC_PENDING, &sc->sp_state, 5021 ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); 5022 } 5023 5024 static void 5025 bnx2x_update_rx_prod(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 5026 uint16_t rx_bd_prod, uint16_t rx_cq_prod) 5027 { 5028 struct ustorm_eth_rx_producers rx_prods; 5029 uint32_t i; 5030 5031 memset(&rx_prods, 0, sizeof(rx_prods)); 5032 5033 /* update producers */ 5034 rx_prods.bd_prod = rx_bd_prod; 5035 rx_prods.cqe_prod = rx_cq_prod; 5036 5037 /* 5038 * Make sure that the BD and SGE data is updated before updating the 5039 * producers since FW might read the BD/SGE right after the producer 5040 * is updated. 5041 * This is only applicable for weak-ordered memory model archs such 5042 * as IA-64. The following barrier is also mandatory since FW will 5043 * assumes BDs must have buffers. 5044 */ 5045 wmb(); 5046 5047 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 5048 REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), 5049 ((uint32_t *)&rx_prods)[i]); 5050 } 5051 5052 wmb(); /* keep prod updates ordered */ 5053 } 5054 5055 static void bnx2x_init_rx_rings(struct bnx2x_softc *sc) 5056 { 5057 struct bnx2x_fastpath *fp; 5058 int i; 5059 struct bnx2x_rx_queue *rxq; 5060 5061 for (i = 0; i < sc->num_queues; i++) { 5062 fp = &sc->fp[i]; 5063 rxq = sc->rx_queues[fp->index]; 5064 if (!rxq) { 5065 PMD_RX_LOG(ERR, "RX queue is NULL"); 5066 return; 5067 } 5068 5069 rxq->rx_bd_head = 0; 5070 rxq->rx_bd_tail = rxq->nb_rx_desc; 5071 rxq->rx_cq_head = 0; 5072 rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); 5073 *fp->rx_cq_cons_sb = 0; 5074 5075 /* 5076 * Activate the BD ring... 5077 * Warning, this will generate an interrupt (to the TSTORM) 5078 * so this can only be done after the chip is initialized 5079 */ 5080 bnx2x_update_rx_prod(sc, fp, rxq->rx_bd_tail, rxq->rx_cq_tail); 5081 5082 if (i != 0) { 5083 continue; 5084 } 5085 } 5086 } 5087 5088 static void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp) 5089 { 5090 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 5091 5092 fp->tx_db.data.header.header = 1 << DOORBELL_HDR_DB_TYPE_SHIFT; 5093 fp->tx_db.data.zero_fill1 = 0; 5094 fp->tx_db.data.prod = 0; 5095 5096 if (!txq) { 5097 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 5098 return; 5099 } 5100 5101 txq->tx_pkt_tail = 0; 5102 txq->tx_pkt_head = 0; 5103 txq->tx_bd_tail = 0; 5104 txq->tx_bd_head = 0; 5105 } 5106 5107 static void bnx2x_init_tx_rings(struct bnx2x_softc *sc) 5108 { 5109 int i; 5110 5111 for (i = 0; i < sc->num_queues; i++) { 5112 bnx2x_init_tx_ring_one(&sc->fp[i]); 5113 } 5114 } 5115 5116 static void bnx2x_init_def_sb(struct bnx2x_softc *sc) 5117 { 5118 struct host_sp_status_block *def_sb = sc->def_sb; 5119 rte_iova_t mapping = sc->def_sb_dma.paddr; 5120 int igu_sp_sb_index; 5121 int igu_seg_id; 5122 int port = SC_PORT(sc); 5123 int func = SC_FUNC(sc); 5124 int reg_offset, reg_offset_en5; 5125 uint64_t section; 5126 int index, sindex; 5127 struct hc_sp_status_block_data sp_sb_data; 5128 5129 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5130 5131 if (CHIP_INT_MODE_IS_BC(sc)) { 5132 igu_sp_sb_index = DEF_SB_IGU_ID; 5133 igu_seg_id = HC_SEG_ACCESS_DEF; 5134 } else { 5135 igu_sp_sb_index = sc->igu_dsb_id; 5136 igu_seg_id = IGU_SEG_ACCESS_DEF; 5137 } 5138 5139 /* attentions */ 5140 section = ((uint64_t) mapping + 5141 offsetof(struct host_sp_status_block, atten_status_block)); 5142 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5143 sc->attn_state = 0; 5144 5145 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5146 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 5147 5148 reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5149 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 5150 5151 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5152 /* take care of sig[0]..sig[4] */ 5153 for (sindex = 0; sindex < 4; sindex++) { 5154 sc->attn_group[index].sig[sindex] = 5155 REG_RD(sc, 5156 (reg_offset + (sindex * 0x4) + 5157 (0x10 * index))); 5158 } 5159 5160 if (!CHIP_IS_E1x(sc)) { 5161 /* 5162 * enable5 is separate from the rest of the registers, 5163 * and the address skip is 4 and not 16 between the 5164 * different groups 5165 */ 5166 sc->attn_group[index].sig[4] = 5167 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 5168 } else { 5169 sc->attn_group[index].sig[4] = 0; 5170 } 5171 } 5172 5173 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5174 reg_offset = 5175 port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; 5176 REG_WR(sc, reg_offset, U64_LO(section)); 5177 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 5178 } else if (!CHIP_IS_E1x(sc)) { 5179 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5180 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5181 } 5182 5183 section = ((uint64_t) mapping + 5184 offsetof(struct host_sp_status_block, sp_sb)); 5185 5186 bnx2x_zero_sp_sb(sc); 5187 5188 /* PCI guarantees endianity of regpair */ 5189 sp_sb_data.state = SB_ENABLED; 5190 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5191 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5192 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5193 sp_sb_data.igu_seg_id = igu_seg_id; 5194 sp_sb_data.p_func.pf_id = func; 5195 sp_sb_data.p_func.vnic_id = SC_VN(sc); 5196 sp_sb_data.p_func.vf_id = 0xff; 5197 5198 bnx2x_wr_sp_sb_data(sc, &sp_sb_data); 5199 5200 bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5201 } 5202 5203 static void bnx2x_init_sp_ring(struct bnx2x_softc *sc) 5204 { 5205 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 5206 sc->spq_prod_idx = 0; 5207 sc->dsb_sp_prod = 5208 &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 5209 sc->spq_prod_bd = sc->spq; 5210 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 5211 } 5212 5213 static void bnx2x_init_eq_ring(struct bnx2x_softc *sc) 5214 { 5215 union event_ring_elem *elem; 5216 int i; 5217 5218 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5219 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 5220 5221 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 5222 BNX2X_PAGE_SIZE * 5223 (i % NUM_EQ_PAGES))); 5224 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 5225 BNX2X_PAGE_SIZE * 5226 (i % NUM_EQ_PAGES))); 5227 } 5228 5229 sc->eq_cons = 0; 5230 sc->eq_prod = NUM_EQ_DESC; 5231 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 5232 5233 atomic_store_rel_long(&sc->eq_spq_left, 5234 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 5235 NUM_EQ_DESC) - 1)); 5236 } 5237 5238 static void bnx2x_init_internal_common(struct bnx2x_softc *sc) 5239 { 5240 int i; 5241 5242 /* 5243 * Zero this manually as its initialization is currently missing 5244 * in the initTool. 5245 */ 5246 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 5247 REG_WR(sc, 5248 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 5249 0); 5250 } 5251 5252 if (!CHIP_IS_E1x(sc)) { 5253 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 5254 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : 5255 HC_IGU_NBC_MODE); 5256 } 5257 } 5258 5259 static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code) 5260 { 5261 switch (load_code) { 5262 case FW_MSG_CODE_DRV_LOAD_COMMON: 5263 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5264 bnx2x_init_internal_common(sc); 5265 /* no break */ 5266 5267 case FW_MSG_CODE_DRV_LOAD_PORT: 5268 /* nothing to do */ 5269 /* no break */ 5270 5271 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5272 /* internal memory per function is initialized inside bnx2x_pf_init */ 5273 break; 5274 5275 default: 5276 PMD_DRV_LOG(NOTICE, sc, "Unknown load_code (0x%x) from MCP", 5277 load_code); 5278 break; 5279 } 5280 } 5281 5282 static void 5283 storm_memset_func_cfg(struct bnx2x_softc *sc, 5284 struct tstorm_eth_function_common_config *tcfg, 5285 uint16_t abs_fid) 5286 { 5287 uint32_t addr; 5288 size_t size; 5289 5290 addr = (BAR_TSTRORM_INTMEM + 5291 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 5292 size = sizeof(struct tstorm_eth_function_common_config); 5293 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) tcfg); 5294 } 5295 5296 static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_params *p) 5297 { 5298 struct tstorm_eth_function_common_config tcfg = { 0 }; 5299 5300 if (CHIP_IS_E1x(sc)) { 5301 storm_memset_func_cfg(sc, &tcfg, p->func_id); 5302 } 5303 5304 /* Enable the function in the FW */ 5305 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 5306 storm_memset_func_en(sc, p->func_id, 1); 5307 5308 /* spq */ 5309 if (p->func_flgs & FUNC_FLG_SPQ) { 5310 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 5311 REG_WR(sc, 5312 (XSEM_REG_FAST_MEMORY + 5313 XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); 5314 } 5315 } 5316 5317 /* 5318 * Calculates the sum of vn_min_rates. 5319 * It's needed for further normalizing of the min_rates. 5320 * Returns: 5321 * sum of vn_min_rates. 5322 * or 5323 * 0 - if all the min_rates are 0. 5324 * In the later case fainess algorithm should be deactivated. 5325 * If all min rates are not zero then those that are zeroes will be set to 1. 5326 */ 5327 static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input) 5328 { 5329 uint32_t vn_cfg; 5330 uint32_t vn_min_rate; 5331 int all_zero = 1; 5332 int vn; 5333 5334 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5335 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 5336 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 5337 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 5338 5339 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 5340 /* skip hidden VNs */ 5341 vn_min_rate = 0; 5342 } else if (!vn_min_rate) { 5343 /* If min rate is zero - set it to 100 */ 5344 vn_min_rate = DEF_MIN_RATE; 5345 } else { 5346 all_zero = 0; 5347 } 5348 5349 input->vnic_min_rate[vn] = vn_min_rate; 5350 } 5351 5352 /* if ETS or all min rates are zeros - disable fairness */ 5353 if (all_zero) { 5354 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 5355 } else { 5356 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 5357 } 5358 } 5359 5360 static uint16_t 5361 bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg) 5362 { 5363 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 5364 FUNC_MF_CFG_MAX_BW_SHIFT); 5365 5366 if (!max_cfg) { 5367 PMD_DRV_LOG(DEBUG, sc, 5368 "Max BW configured to 0 - using 100 instead"); 5369 max_cfg = 100; 5370 } 5371 5372 return max_cfg; 5373 } 5374 5375 static void 5376 bnx2x_calc_vn_max(struct bnx2x_softc *sc, int vn, struct cmng_init_input *input) 5377 { 5378 uint16_t vn_max_rate; 5379 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 5380 uint32_t max_cfg; 5381 5382 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 5383 vn_max_rate = 0; 5384 } else { 5385 max_cfg = bnx2x_extract_max_cfg(sc, vn_cfg); 5386 5387 if (IS_MF_SI(sc)) { 5388 /* max_cfg in percents of linkspeed */ 5389 vn_max_rate = 5390 ((sc->link_vars.line_speed * max_cfg) / 100); 5391 } else { /* SD modes */ 5392 /* max_cfg is absolute in 100Mb units */ 5393 vn_max_rate = (max_cfg * 100); 5394 } 5395 } 5396 5397 input->vnic_max_rate[vn] = vn_max_rate; 5398 } 5399 5400 static void 5401 bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, uint8_t cmng_type) 5402 { 5403 struct cmng_init_input input; 5404 int vn; 5405 5406 memset(&input, 0, sizeof(struct cmng_init_input)); 5407 5408 input.port_rate = sc->link_vars.line_speed; 5409 5410 if (cmng_type == CMNG_FNS_MINMAX) { 5411 /* read mf conf from shmem */ 5412 if (read_cfg) { 5413 bnx2x_read_mf_cfg(sc); 5414 } 5415 5416 /* get VN min rate and enable fairness if not 0 */ 5417 bnx2x_calc_vn_min(sc, &input); 5418 5419 /* get VN max rate */ 5420 if (sc->port.pmf) { 5421 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5422 bnx2x_calc_vn_max(sc, vn, &input); 5423 } 5424 } 5425 5426 /* always enable rate shaping and fairness */ 5427 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 5428 5429 ecore_init_cmng(&input, &sc->cmng); 5430 return; 5431 } 5432 } 5433 5434 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc) 5435 { 5436 if (CHIP_REV_IS_SLOW(sc)) { 5437 return CMNG_FNS_NONE; 5438 } 5439 5440 if (IS_MF(sc)) { 5441 return CMNG_FNS_MINMAX; 5442 } 5443 5444 return CMNG_FNS_NONE; 5445 } 5446 5447 static void 5448 storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, uint8_t port) 5449 { 5450 int vn; 5451 int func; 5452 uint32_t addr; 5453 size_t size; 5454 5455 addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 5456 size = sizeof(struct cmng_struct_per_port); 5457 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) & cmng->port); 5458 5459 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 5460 func = func_by_vn(sc, vn); 5461 5462 addr = (BAR_XSTRORM_INTMEM + 5463 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 5464 size = sizeof(struct rate_shaping_vars_per_vn); 5465 ecore_storm_memset_struct(sc, addr, size, 5466 (uint32_t *) & cmng-> 5467 vnic.vnic_max_rate[vn]); 5468 5469 addr = (BAR_XSTRORM_INTMEM + 5470 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 5471 size = sizeof(struct fairness_vars_per_vn); 5472 ecore_storm_memset_struct(sc, addr, size, 5473 (uint32_t *) & cmng-> 5474 vnic.vnic_min_rate[vn]); 5475 } 5476 } 5477 5478 static void bnx2x_pf_init(struct bnx2x_softc *sc) 5479 { 5480 struct bnx2x_func_init_params func_init; 5481 struct event_ring_data eq_data; 5482 uint16_t flags; 5483 5484 memset(&eq_data, 0, sizeof(struct event_ring_data)); 5485 memset(&func_init, 0, sizeof(struct bnx2x_func_init_params)); 5486 5487 if (!CHIP_IS_E1x(sc)) { 5488 /* reset IGU PF statistics: MSIX + ATTN */ 5489 /* PF */ 5490 REG_WR(sc, 5491 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 5492 (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + 5493 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 5494 4)), 0); 5495 /* ATTN */ 5496 REG_WR(sc, 5497 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 5498 (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + 5499 (BNX2X_IGU_STAS_MSG_PF_CNT * 4) + 5500 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 5501 4)), 0); 5502 } 5503 5504 /* function setup flags */ 5505 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 5506 5507 func_init.func_flgs = flags; 5508 func_init.pf_id = SC_FUNC(sc); 5509 func_init.func_id = SC_FUNC(sc); 5510 func_init.spq_map = sc->spq_dma.paddr; 5511 func_init.spq_prod = sc->spq_prod_idx; 5512 5513 bnx2x_func_init(sc, &func_init); 5514 5515 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 5516 5517 /* 5518 * Congestion management values depend on the link rate. 5519 * There is no active link so initial link rate is set to 10Gbps. 5520 * When the link comes up the congestion management values are 5521 * re-calculated according to the actual link rate. 5522 */ 5523 sc->link_vars.line_speed = SPEED_10000; 5524 bnx2x_cmng_fns_init(sc, TRUE, bnx2x_get_cmng_fns_mode(sc)); 5525 5526 /* Only the PMF sets the HW */ 5527 if (sc->port.pmf) { 5528 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 5529 } 5530 5531 /* init Event Queue - PCI bus guarantees correct endainity */ 5532 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 5533 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 5534 eq_data.producer = sc->eq_prod; 5535 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 5536 eq_data.sb_id = DEF_SB_ID; 5537 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 5538 } 5539 5540 static void bnx2x_hc_int_enable(struct bnx2x_softc *sc) 5541 { 5542 int port = SC_PORT(sc); 5543 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 5544 uint32_t val = REG_RD(sc, addr); 5545 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) 5546 || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5547 uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5548 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); 5549 5550 if (msix) { 5551 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5552 HC_CONFIG_0_REG_INT_LINE_EN_0); 5553 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5554 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5555 if (single_msix) { 5556 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 5557 } 5558 } else if (msi) { 5559 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 5560 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5561 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5562 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5563 } else { 5564 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5565 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5566 HC_CONFIG_0_REG_INT_LINE_EN_0 | 5567 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5568 5569 REG_WR(sc, addr, val); 5570 5571 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 5572 } 5573 5574 REG_WR(sc, addr, val); 5575 5576 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 5577 mb(); 5578 5579 /* init leading/trailing edge */ 5580 if (IS_MF(sc)) { 5581 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 5582 if (sc->port.pmf) { 5583 /* enable nig and gpio3 attention */ 5584 val |= 0x1100; 5585 } 5586 } else { 5587 val = 0xffff; 5588 } 5589 5590 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port * 8), val); 5591 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port * 8), val); 5592 5593 /* make sure that interrupts are indeed enabled from here on */ 5594 mb(); 5595 } 5596 5597 static void bnx2x_igu_int_enable(struct bnx2x_softc *sc) 5598 { 5599 uint32_t val; 5600 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) 5601 || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5602 uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); 5603 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); 5604 5605 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 5606 5607 if (msix) { 5608 val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5609 val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); 5610 if (single_msix) { 5611 val |= IGU_PF_CONF_SINGLE_ISR_EN; 5612 } 5613 } else if (msi) { 5614 val &= ~IGU_PF_CONF_INT_LINE_EN; 5615 val |= (IGU_PF_CONF_MSI_MSIX_EN | 5616 IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5617 } else { 5618 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 5619 val |= (IGU_PF_CONF_INT_LINE_EN | 5620 IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); 5621 } 5622 5623 /* clean previous status - need to configure igu prior to ack */ 5624 if ((!msix) || single_msix) { 5625 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5626 bnx2x_ack_int(sc); 5627 } 5628 5629 val |= IGU_PF_CONF_FUNC_EN; 5630 5631 PMD_DRV_LOG(DEBUG, sc, "write 0x%x to IGU mode %s", 5632 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 5633 5634 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5635 5636 mb(); 5637 5638 /* init leading/trailing edge */ 5639 if (IS_MF(sc)) { 5640 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 5641 if (sc->port.pmf) { 5642 /* enable nig and gpio3 attention */ 5643 val |= 0x1100; 5644 } 5645 } else { 5646 val = 0xffff; 5647 } 5648 5649 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 5650 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 5651 5652 /* make sure that interrupts are indeed enabled from here on */ 5653 mb(); 5654 } 5655 5656 static void bnx2x_int_enable(struct bnx2x_softc *sc) 5657 { 5658 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5659 bnx2x_hc_int_enable(sc); 5660 } else { 5661 bnx2x_igu_int_enable(sc); 5662 } 5663 } 5664 5665 static void bnx2x_hc_int_disable(struct bnx2x_softc *sc) 5666 { 5667 int port = SC_PORT(sc); 5668 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 5669 uint32_t val = REG_RD(sc, addr); 5670 5671 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 5672 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 5673 HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); 5674 /* flush all outstanding writes */ 5675 mb(); 5676 5677 REG_WR(sc, addr, val); 5678 if (REG_RD(sc, addr) != val) { 5679 PMD_DRV_LOG(ERR, sc, "proper val not read from HC IGU!"); 5680 } 5681 } 5682 5683 static void bnx2x_igu_int_disable(struct bnx2x_softc *sc) 5684 { 5685 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 5686 5687 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 5688 IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); 5689 5690 PMD_DRV_LOG(DEBUG, sc, "write %x to IGU", val); 5691 5692 /* flush all outstanding writes */ 5693 mb(); 5694 5695 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 5696 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 5697 PMD_DRV_LOG(ERR, sc, "proper val not read from IGU!"); 5698 } 5699 } 5700 5701 static void bnx2x_int_disable(struct bnx2x_softc *sc) 5702 { 5703 if (sc->devinfo.int_block == INT_BLOCK_HC) { 5704 bnx2x_hc_int_disable(sc); 5705 } else { 5706 bnx2x_igu_int_disable(sc); 5707 } 5708 } 5709 5710 static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code) 5711 { 5712 int i; 5713 5714 PMD_INIT_FUNC_TRACE(sc); 5715 5716 for (i = 0; i < sc->num_queues; i++) { 5717 bnx2x_init_eth_fp(sc, i); 5718 } 5719 5720 rmb(); /* ensure status block indices were read */ 5721 5722 bnx2x_init_rx_rings(sc); 5723 bnx2x_init_tx_rings(sc); 5724 5725 if (IS_VF(sc)) { 5726 bnx2x_memset_stats(sc); 5727 return; 5728 } 5729 5730 /* initialize MOD_ABS interrupts */ 5731 elink_init_mod_abs_int(sc, &sc->link_vars, 5732 sc->devinfo.chip_id, 5733 sc->devinfo.shmem_base, 5734 sc->devinfo.shmem2_base, SC_PORT(sc)); 5735 5736 bnx2x_init_def_sb(sc); 5737 bnx2x_update_dsb_idx(sc); 5738 bnx2x_init_sp_ring(sc); 5739 bnx2x_init_eq_ring(sc); 5740 bnx2x_init_internal(sc, load_code); 5741 bnx2x_pf_init(sc); 5742 bnx2x_stats_init(sc); 5743 5744 /* flush all before enabling interrupts */ 5745 mb(); 5746 5747 bnx2x_int_enable(sc); 5748 5749 /* check for SPIO5 */ 5750 bnx2x_attn_int_deasserted0(sc, 5751 REG_RD(sc, 5752 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 5753 SC_PORT(sc) * 4)) & 5754 AEU_INPUTS_ATTN_BITS_SPIO5); 5755 } 5756 5757 static void bnx2x_init_objs(struct bnx2x_softc *sc) 5758 { 5759 /* mcast rules must be added to tx if tx switching is enabled */ 5760 ecore_obj_type o_type; 5761 if (sc->flags & BNX2X_TX_SWITCHING) 5762 o_type = ECORE_OBJ_TYPE_RX_TX; 5763 else 5764 o_type = ECORE_OBJ_TYPE_RX; 5765 5766 /* RX_MODE controlling object */ 5767 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 5768 5769 /* multicast configuration controlling object */ 5770 ecore_init_mcast_obj(sc, 5771 &sc->mcast_obj, 5772 sc->fp[0].cl_id, 5773 sc->fp[0].index, 5774 SC_FUNC(sc), 5775 SC_FUNC(sc), 5776 BNX2X_SP(sc, mcast_rdata), 5777 (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata), 5778 ECORE_FILTER_MCAST_PENDING, 5779 &sc->sp_state, o_type); 5780 5781 /* Setup CAM credit pools */ 5782 ecore_init_mac_credit_pool(sc, 5783 &sc->macs_pool, 5784 SC_FUNC(sc), 5785 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 5786 VNICS_PER_PATH(sc)); 5787 5788 ecore_init_vlan_credit_pool(sc, 5789 &sc->vlans_pool, 5790 SC_ABS_FUNC(sc) >> 1, 5791 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 5792 VNICS_PER_PATH(sc)); 5793 5794 /* RSS configuration object */ 5795 ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp->cl_id, 5796 sc->fp->index, SC_FUNC(sc), SC_FUNC(sc), 5797 BNX2X_SP(sc, rss_rdata), 5798 (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata), 5799 ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, 5800 ECORE_OBJ_TYPE_RX); 5801 } 5802 5803 /* 5804 * Initialize the function. This must be called before sending CLIENT_SETUP 5805 * for the first client. 5806 */ 5807 static int bnx2x_func_start(struct bnx2x_softc *sc) 5808 { 5809 struct ecore_func_state_params func_params = { NULL }; 5810 struct ecore_func_start_params *start_params = 5811 &func_params.params.start; 5812 5813 /* Prepare parameters for function state transitions */ 5814 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 5815 5816 func_params.f_obj = &sc->func_obj; 5817 func_params.cmd = ECORE_F_CMD_START; 5818 5819 /* Function parameters */ 5820 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 5821 start_params->sd_vlan_tag = OVLAN(sc); 5822 5823 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 5824 start_params->network_cos_mode = STATIC_COS; 5825 } else { /* CHIP_IS_E1X */ 5826 start_params->network_cos_mode = FW_WRR; 5827 } 5828 5829 return ecore_func_state_change(sc, &func_params); 5830 } 5831 5832 static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state) 5833 { 5834 uint16_t pmcsr; 5835 5836 /* If there is no power capability, silently succeed */ 5837 if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) { 5838 PMD_DRV_LOG(INFO, sc, "No power capability"); 5839 return 0; 5840 } 5841 5842 pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), &pmcsr, 5843 2); 5844 5845 switch (state) { 5846 case PCI_PM_D0: 5847 pci_write_word(sc, 5848 (sc->devinfo.pcie_pm_cap_reg + 5849 PCIR_POWER_STATUS), 5850 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME)); 5851 5852 if (pmcsr & PCIM_PSTAT_DMASK) { 5853 /* delay required during transition out of D3hot */ 5854 DELAY(20000); 5855 } 5856 5857 break; 5858 5859 case PCI_PM_D3hot: 5860 /* don't shut down the power for emulation and FPGA */ 5861 if (CHIP_REV_IS_SLOW(sc)) { 5862 return 0; 5863 } 5864 5865 pmcsr &= ~PCIM_PSTAT_DMASK; 5866 pmcsr |= PCIM_PSTAT_D3; 5867 5868 if (sc->wol) { 5869 pmcsr |= PCIM_PSTAT_PMEENABLE; 5870 } 5871 5872 pci_write_long(sc, 5873 (sc->devinfo.pcie_pm_cap_reg + 5874 PCIR_POWER_STATUS), pmcsr); 5875 5876 /* 5877 * No more memory access after this point until device is brought back 5878 * to D0 state. 5879 */ 5880 break; 5881 5882 default: 5883 PMD_DRV_LOG(NOTICE, sc, "Can't support PCI power state = %d", 5884 state); 5885 return -1; 5886 } 5887 5888 return 0; 5889 } 5890 5891 /* return true if succeeded to acquire the lock */ 5892 static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource) 5893 { 5894 uint32_t lock_status; 5895 uint32_t resource_bit = (1 << resource); 5896 int func = SC_FUNC(sc); 5897 uint32_t hw_lock_control_reg; 5898 5899 /* Validating that the resource is within range */ 5900 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 5901 PMD_DRV_LOG(INFO, sc, 5902 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)", 5903 resource, HW_LOCK_MAX_RESOURCE_VALUE); 5904 return FALSE; 5905 } 5906 5907 if (func <= 5) { 5908 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func * 8); 5909 } else { 5910 hw_lock_control_reg = 5911 (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8); 5912 } 5913 5914 /* try to acquire the lock */ 5915 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 5916 lock_status = REG_RD(sc, hw_lock_control_reg); 5917 if (lock_status & resource_bit) { 5918 return TRUE; 5919 } 5920 5921 PMD_DRV_LOG(NOTICE, sc, "Failed to get a resource lock 0x%x", resource); 5922 5923 return FALSE; 5924 } 5925 5926 /* 5927 * Get the recovery leader resource id according to the engine this function 5928 * belongs to. Currently only only 2 engines is supported. 5929 */ 5930 static int bnx2x_get_leader_lock_resource(struct bnx2x_softc *sc) 5931 { 5932 if (SC_PATH(sc)) { 5933 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 5934 } else { 5935 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 5936 } 5937 } 5938 5939 /* try to acquire a leader lock for current engine */ 5940 static uint8_t bnx2x_trylock_leader_lock(struct bnx2x_softc *sc) 5941 { 5942 return bnx2x_trylock_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); 5943 } 5944 5945 static int bnx2x_release_leader_lock(struct bnx2x_softc *sc) 5946 { 5947 return bnx2x_release_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); 5948 } 5949 5950 /* close gates #2, #3 and #4 */ 5951 static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close) 5952 { 5953 uint32_t val; 5954 5955 /* gates #2 and #4a are closed/opened */ 5956 /* #4 */ 5957 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, ! !close); 5958 /* #2 */ 5959 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, ! !close); 5960 5961 /* #3 */ 5962 if (CHIP_IS_E1x(sc)) { 5963 /* prevent interrupts from HC on both ports */ 5964 val = REG_RD(sc, HC_REG_CONFIG_1); 5965 if (close) 5966 REG_WR(sc, HC_REG_CONFIG_1, (val & ~(uint32_t) 5967 HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 5968 else 5969 REG_WR(sc, HC_REG_CONFIG_1, 5970 (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 5971 5972 val = REG_RD(sc, HC_REG_CONFIG_0); 5973 if (close) 5974 REG_WR(sc, HC_REG_CONFIG_0, (val & ~(uint32_t) 5975 HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 5976 else 5977 REG_WR(sc, HC_REG_CONFIG_0, 5978 (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 5979 5980 } else { 5981 /* Prevent incoming interrupts in IGU */ 5982 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 5983 5984 if (close) 5985 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 5986 (val & ~(uint32_t) 5987 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 5988 else 5989 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 5990 (val | 5991 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 5992 } 5993 5994 wmb(); 5995 } 5996 5997 /* poll for pending writes bit, it should get cleared in no more than 1s */ 5998 static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc) 5999 { 6000 uint32_t cnt = 1000; 6001 uint32_t pend_bits = 0; 6002 6003 do { 6004 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 6005 6006 if (pend_bits == 0) { 6007 break; 6008 } 6009 6010 DELAY(1000); 6011 } while (cnt-- > 0); 6012 6013 if (cnt <= 0) { 6014 PMD_DRV_LOG(NOTICE, sc, "Still pending IGU requests bits=0x%08x!", 6015 pend_bits); 6016 return -1; 6017 } 6018 6019 return 0; 6020 } 6021 6022 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 6023 6024 static void bnx2x_clp_reset_prep(struct bnx2x_softc *sc, uint32_t * magic_val) 6025 { 6026 /* Do some magic... */ 6027 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 6028 *magic_val = val & SHARED_MF_CLP_MAGIC; 6029 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 6030 } 6031 6032 /* restore the value of the 'magic' bit */ 6033 static void bnx2x_clp_reset_done(struct bnx2x_softc *sc, uint32_t magic_val) 6034 { 6035 /* Restore the 'magic' bit value... */ 6036 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 6037 MFCFG_WR(sc, shared_mf_config.clp_mb, 6038 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 6039 } 6040 6041 /* prepare for MCP reset, takes care of CLP configurations */ 6042 static void bnx2x_reset_mcp_prep(struct bnx2x_softc *sc, uint32_t * magic_val) 6043 { 6044 uint32_t shmem; 6045 uint32_t validity_offset; 6046 6047 /* set `magic' bit in order to save MF config */ 6048 bnx2x_clp_reset_prep(sc, magic_val); 6049 6050 /* get shmem offset */ 6051 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 6052 validity_offset = 6053 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 6054 6055 /* Clear validity map flags */ 6056 if (shmem > 0) { 6057 REG_WR(sc, shmem + validity_offset, 0); 6058 } 6059 } 6060 6061 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 6062 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 6063 6064 static void bnx2x_mcp_wait_one(struct bnx2x_softc *sc) 6065 { 6066 /* special handling for emulation and FPGA (10 times longer) */ 6067 if (CHIP_REV_IS_SLOW(sc)) { 6068 DELAY((MCP_ONE_TIMEOUT * 10) * 1000); 6069 } else { 6070 DELAY((MCP_ONE_TIMEOUT) * 1000); 6071 } 6072 } 6073 6074 /* initialize shmem_base and waits for validity signature to appear */ 6075 static int bnx2x_init_shmem(struct bnx2x_softc *sc) 6076 { 6077 int cnt = 0; 6078 uint32_t val = 0; 6079 6080 do { 6081 sc->devinfo.shmem_base = 6082 sc->link_params.shmem_base = 6083 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 6084 6085 if (sc->devinfo.shmem_base) { 6086 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 6087 if (val & SHR_MEM_VALIDITY_MB) 6088 return 0; 6089 } 6090 6091 bnx2x_mcp_wait_one(sc); 6092 6093 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 6094 6095 PMD_DRV_LOG(NOTICE, sc, "BAD MCP validity signature"); 6096 6097 return -1; 6098 } 6099 6100 static int bnx2x_reset_mcp_comp(struct bnx2x_softc *sc, uint32_t magic_val) 6101 { 6102 int rc = bnx2x_init_shmem(sc); 6103 6104 /* Restore the `magic' bit value */ 6105 bnx2x_clp_reset_done(sc, magic_val); 6106 6107 return rc; 6108 } 6109 6110 static void bnx2x_pxp_prep(struct bnx2x_softc *sc) 6111 { 6112 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 6113 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 6114 wmb(); 6115 } 6116 6117 /* 6118 * Reset the whole chip except for: 6119 * - PCIE core 6120 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 6121 * - IGU 6122 * - MISC (including AEU) 6123 * - GRC 6124 * - RBCN, RBCP 6125 */ 6126 static void bnx2x_process_kill_chip_reset(struct bnx2x_softc *sc, uint8_t global) 6127 { 6128 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 6129 uint32_t global_bits2, stay_reset2; 6130 6131 /* 6132 * Bits that have to be set in reset_mask2 if we want to reset 'global' 6133 * (per chip) blocks. 6134 */ 6135 global_bits2 = 6136 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 6137 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 6138 6139 /* 6140 * Don't reset the following blocks. 6141 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 6142 * reset, as in 4 port device they might still be owned 6143 * by the MCP (there is only one leader per path). 6144 */ 6145 not_reset_mask1 = 6146 MISC_REGISTERS_RESET_REG_1_RST_HC | 6147 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 6148 MISC_REGISTERS_RESET_REG_1_RST_PXP; 6149 6150 not_reset_mask2 = 6151 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 6152 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 6153 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 6154 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 6155 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 6156 MISC_REGISTERS_RESET_REG_2_RST_GRC | 6157 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 6158 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 6159 MISC_REGISTERS_RESET_REG_2_RST_ATC | 6160 MISC_REGISTERS_RESET_REG_2_PGLC | 6161 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 6162 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 6163 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 6164 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 6165 MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; 6166 6167 /* 6168 * Keep the following blocks in reset: 6169 * - all xxMACs are handled by the elink code. 6170 */ 6171 stay_reset2 = 6172 MISC_REGISTERS_RESET_REG_2_XMAC | 6173 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 6174 6175 /* Full reset masks according to the chip */ 6176 reset_mask1 = 0xffffffff; 6177 6178 if (CHIP_IS_E1H(sc)) 6179 reset_mask2 = 0x1ffff; 6180 else if (CHIP_IS_E2(sc)) 6181 reset_mask2 = 0xfffff; 6182 else /* CHIP_IS_E3 */ 6183 reset_mask2 = 0x3ffffff; 6184 6185 /* Don't reset global blocks unless we need to */ 6186 if (!global) 6187 reset_mask2 &= ~global_bits2; 6188 6189 /* 6190 * In case of attention in the QM, we need to reset PXP 6191 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 6192 * because otherwise QM reset would release 'close the gates' shortly 6193 * before resetting the PXP, then the PSWRQ would send a write 6194 * request to PGLUE. Then when PXP is reset, PGLUE would try to 6195 * read the payload data from PSWWR, but PSWWR would not 6196 * respond. The write queue in PGLUE would stuck, dmae commands 6197 * would not return. Therefore it's important to reset the second 6198 * reset register (containing the 6199 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 6200 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 6201 * bit). 6202 */ 6203 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6204 reset_mask2 & (~not_reset_mask2)); 6205 6206 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6207 reset_mask1 & (~not_reset_mask1)); 6208 6209 mb(); 6210 wmb(); 6211 6212 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 6213 reset_mask2 & (~stay_reset2)); 6214 6215 mb(); 6216 wmb(); 6217 6218 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 6219 wmb(); 6220 } 6221 6222 static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global) 6223 { 6224 int cnt = 1000; 6225 uint32_t val = 0; 6226 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 6227 uint32_t tags_63_32 = 0; 6228 6229 /* Empty the Tetris buffer, wait for 1s */ 6230 do { 6231 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 6232 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 6233 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 6234 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 6235 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 6236 if (CHIP_IS_E3(sc)) { 6237 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 6238 } 6239 6240 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 6241 ((port_is_idle_0 & 0x1) == 0x1) && 6242 ((port_is_idle_1 & 0x1) == 0x1) && 6243 (pgl_exp_rom2 == 0xffffffff) && 6244 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 6245 break; 6246 DELAY(1000); 6247 } while (cnt-- > 0); 6248 6249 if (cnt <= 0) { 6250 PMD_DRV_LOG(NOTICE, sc, 6251 "ERROR: Tetris buffer didn't get empty or there " 6252 "are still outstanding read requests after 1s! " 6253 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 6254 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x", 6255 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 6256 pgl_exp_rom2); 6257 return -1; 6258 } 6259 6260 mb(); 6261 6262 /* Close gates #2, #3 and #4 */ 6263 bnx2x_set_234_gates(sc, TRUE); 6264 6265 /* Poll for IGU VQs for 57712 and newer chips */ 6266 if (!CHIP_IS_E1x(sc) && bnx2x_er_poll_igu_vq(sc)) { 6267 return -1; 6268 } 6269 6270 /* clear "unprepared" bit */ 6271 REG_WR(sc, MISC_REG_UNPREPARED, 0); 6272 mb(); 6273 6274 /* Make sure all is written to the chip before the reset */ 6275 wmb(); 6276 6277 /* 6278 * Wait for 1ms to empty GLUE and PCI-E core queues, 6279 * PSWHST, GRC and PSWRD Tetris buffer. 6280 */ 6281 DELAY(1000); 6282 6283 /* Prepare to chip reset: */ 6284 /* MCP */ 6285 if (global) { 6286 bnx2x_reset_mcp_prep(sc, &val); 6287 } 6288 6289 /* PXP */ 6290 bnx2x_pxp_prep(sc); 6291 mb(); 6292 6293 /* reset the chip */ 6294 bnx2x_process_kill_chip_reset(sc, global); 6295 mb(); 6296 6297 /* Recover after reset: */ 6298 /* MCP */ 6299 if (global && bnx2x_reset_mcp_comp(sc, val)) { 6300 return -1; 6301 } 6302 6303 /* Open the gates #2, #3 and #4 */ 6304 bnx2x_set_234_gates(sc, FALSE); 6305 6306 return 0; 6307 } 6308 6309 static int bnx2x_leader_reset(struct bnx2x_softc *sc) 6310 { 6311 int rc = 0; 6312 uint8_t global = bnx2x_reset_is_global(sc); 6313 uint32_t load_code; 6314 6315 /* 6316 * If not going to reset MCP, load "fake" driver to reset HW while 6317 * driver is owner of the HW. 6318 */ 6319 if (!global && !BNX2X_NOMCP(sc)) { 6320 load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 6321 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 6322 if (!load_code) { 6323 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 6324 rc = -1; 6325 goto exit_leader_reset; 6326 } 6327 6328 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 6329 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 6330 PMD_DRV_LOG(NOTICE, sc, 6331 "MCP unexpected response, aborting"); 6332 rc = -1; 6333 goto exit_leader_reset2; 6334 } 6335 6336 load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 6337 if (!load_code) { 6338 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 6339 rc = -1; 6340 goto exit_leader_reset2; 6341 } 6342 } 6343 6344 /* try to recover after the failure */ 6345 if (bnx2x_process_kill(sc, global)) { 6346 PMD_DRV_LOG(NOTICE, sc, "Something bad occurred on engine %d!", 6347 SC_PATH(sc)); 6348 rc = -1; 6349 goto exit_leader_reset2; 6350 } 6351 6352 /* 6353 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 6354 * state. 6355 */ 6356 bnx2x_set_reset_done(sc); 6357 if (global) { 6358 bnx2x_clear_reset_global(sc); 6359 } 6360 6361 exit_leader_reset2: 6362 6363 /* unload "fake driver" if it was loaded */ 6364 if (!global &&!BNX2X_NOMCP(sc)) { 6365 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 6366 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 6367 } 6368 6369 exit_leader_reset: 6370 6371 sc->is_leader = 0; 6372 bnx2x_release_leader_lock(sc); 6373 6374 mb(); 6375 return rc; 6376 } 6377 6378 /* 6379 * prepare INIT transition, parameters configured: 6380 * - HC configuration 6381 * - Queue's CDU context 6382 */ 6383 static void 6384 bnx2x_pf_q_prep_init(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6385 struct ecore_queue_init_params *init_params) 6386 { 6387 uint8_t cos; 6388 int cxt_index, cxt_offset; 6389 6390 rte_bit_relaxed_set32(ECORE_Q_FLG_HC, &init_params->rx.flags); 6391 rte_bit_relaxed_set32(ECORE_Q_FLG_HC, &init_params->tx.flags); 6392 6393 rte_bit_relaxed_set32(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 6394 rte_bit_relaxed_set32(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 6395 6396 /* HC rate */ 6397 init_params->rx.hc_rate = 6398 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 6399 init_params->tx.hc_rate = 6400 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 6401 6402 /* FW SB ID */ 6403 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 6404 6405 /* CQ index among the SB indices */ 6406 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 6407 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 6408 6409 /* set maximum number of COSs supported by this queue */ 6410 init_params->max_cos = sc->max_cos; 6411 6412 /* set the context pointers queue object */ 6413 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 6414 cxt_index = fp->index / ILT_PAGE_CIDS; 6415 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 6416 init_params->cxts[cos] = 6417 &sc->context[cxt_index].vcxt[cxt_offset].eth; 6418 } 6419 } 6420 6421 /* set flags that are common for the Tx-only and not normal connections */ 6422 static unsigned long 6423 bnx2x_get_common_flags(struct bnx2x_softc *sc, uint8_t zero_stats) 6424 { 6425 uint32_t flags = 0; 6426 6427 /* PF driver will always initialize the Queue to an ACTIVE state */ 6428 rte_bit_relaxed_set32(ECORE_Q_FLG_ACTIVE, &flags); 6429 6430 /* 6431 * tx only connections collect statistics (on the same index as the 6432 * parent connection). The statistics are zeroed when the parent 6433 * connection is initialized. 6434 */ 6435 6436 rte_bit_relaxed_set32(ECORE_Q_FLG_STATS, &flags); 6437 if (zero_stats) { 6438 rte_bit_relaxed_set32(ECORE_Q_FLG_ZERO_STATS, &flags); 6439 } 6440 6441 /* 6442 * tx only connections can support tx-switching, though their 6443 * CoS-ness doesn't survive the loopback 6444 */ 6445 if (sc->flags & BNX2X_TX_SWITCHING) { 6446 rte_bit_relaxed_set32(ECORE_Q_FLG_TX_SWITCH, &flags); 6447 } 6448 6449 rte_bit_relaxed_set32(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 6450 6451 return flags; 6452 } 6453 6454 static unsigned long bnx2x_get_q_flags(struct bnx2x_softc *sc, uint8_t leading) 6455 { 6456 uint32_t flags = 0; 6457 6458 if (IS_MF_SD(sc)) { 6459 rte_bit_relaxed_set32(ECORE_Q_FLG_OV, &flags); 6460 } 6461 6462 if (leading) { 6463 rte_bit_relaxed_set32(ECORE_Q_FLG_LEADING_RSS, &flags); 6464 rte_bit_relaxed_set32(ECORE_Q_FLG_MCAST, &flags); 6465 } 6466 6467 rte_bit_relaxed_set32(ECORE_Q_FLG_VLAN, &flags); 6468 6469 /* merge with common flags */ 6470 return flags | bnx2x_get_common_flags(sc, TRUE); 6471 } 6472 6473 static void 6474 bnx2x_pf_q_prep_general(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6475 struct ecore_general_setup_params *gen_init, uint8_t cos) 6476 { 6477 gen_init->stat_id = bnx2x_stats_id(fp); 6478 gen_init->spcl_id = fp->cl_id; 6479 gen_init->mtu = sc->mtu; 6480 gen_init->cos = cos; 6481 } 6482 6483 static void 6484 bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6485 struct rxq_pause_params *pause, 6486 struct ecore_rxq_setup_params *rxq_init) 6487 { 6488 struct bnx2x_rx_queue *rxq; 6489 6490 rxq = sc->rx_queues[fp->index]; 6491 if (!rxq) { 6492 PMD_RX_LOG(ERR, "RX queue is NULL"); 6493 return; 6494 } 6495 /* pause */ 6496 pause->bd_th_lo = BD_TH_LO(sc); 6497 pause->bd_th_hi = BD_TH_HI(sc); 6498 6499 pause->rcq_th_lo = RCQ_TH_LO(sc); 6500 pause->rcq_th_hi = RCQ_TH_HI(sc); 6501 6502 /* validate rings have enough entries to cross high thresholds */ 6503 if (sc->dropless_fc && 6504 pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { 6505 PMD_DRV_LOG(WARNING, sc, "rx bd ring threshold limit"); 6506 } 6507 6508 if (sc->dropless_fc && 6509 pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) { 6510 PMD_DRV_LOG(WARNING, sc, "rcq ring threshold limit"); 6511 } 6512 6513 pause->pri_map = 1; 6514 6515 /* rxq setup */ 6516 rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr; 6517 rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr; 6518 rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr + 6519 BNX2X_PAGE_SIZE); 6520 6521 /* 6522 * This should be a maximum number of data bytes that may be 6523 * placed on the BD (not including paddings). 6524 */ 6525 rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); 6526 6527 rxq_init->cl_qzone_id = fp->cl_qzone_id; 6528 rxq_init->rss_engine_id = SC_FUNC(sc); 6529 rxq_init->mcast_engine_id = SC_FUNC(sc); 6530 6531 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 6532 rxq_init->fw_sb_id = fp->fw_sb_id; 6533 6534 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 6535 6536 /* 6537 * configure silent vlan removal 6538 * if multi function mode is afex, then mask default vlan 6539 */ 6540 if (IS_MF_AFEX(sc)) { 6541 rxq_init->silent_removal_value = 6542 sc->devinfo.mf_info.afex_def_vlan_tag; 6543 rxq_init->silent_removal_mask = EVL_VLID_MASK; 6544 } 6545 } 6546 6547 static void 6548 bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, 6549 struct ecore_txq_setup_params *txq_init, uint8_t cos) 6550 { 6551 struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; 6552 6553 if (!txq) { 6554 PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); 6555 return; 6556 } 6557 txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr; 6558 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 6559 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 6560 txq_init->fw_sb_id = fp->fw_sb_id; 6561 6562 /* 6563 * set the TSS leading client id for TX classfication to the 6564 * leading RSS client id 6565 */ 6566 txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id); 6567 } 6568 6569 /* 6570 * This function performs 2 steps in a queue state machine: 6571 * 1) RESET->INIT 6572 * 2) INIT->SETUP 6573 */ 6574 static int 6575 bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t leading) 6576 { 6577 struct ecore_queue_state_params q_params = { NULL }; 6578 struct ecore_queue_setup_params *setup_params = &q_params.params.setup; 6579 int rc; 6580 6581 PMD_DRV_LOG(DEBUG, sc, "setting up queue %d", fp->index); 6582 6583 bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 6584 6585 q_params.q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; 6586 6587 /* we want to wait for completion in this context */ 6588 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 6589 6590 /* prepare the INIT parameters */ 6591 bnx2x_pf_q_prep_init(sc, fp, &q_params.params.init); 6592 6593 /* Set the command */ 6594 q_params.cmd = ECORE_Q_CMD_INIT; 6595 6596 /* Change the state to INIT */ 6597 rc = ecore_queue_state_change(sc, &q_params); 6598 if (rc) { 6599 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) INIT failed", fp->index); 6600 return rc; 6601 } 6602 6603 PMD_DRV_LOG(DEBUG, sc, "init complete"); 6604 6605 /* now move the Queue to the SETUP state */ 6606 memset(setup_params, 0, sizeof(*setup_params)); 6607 6608 /* set Queue flags */ 6609 setup_params->flags = bnx2x_get_q_flags(sc, leading); 6610 6611 /* set general SETUP parameters */ 6612 bnx2x_pf_q_prep_general(sc, fp, &setup_params->gen_params, 6613 FIRST_TX_COS_INDEX); 6614 6615 bnx2x_pf_rx_q_prep(sc, fp, 6616 &setup_params->pause_params, 6617 &setup_params->rxq_params); 6618 6619 bnx2x_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); 6620 6621 /* Set the command */ 6622 q_params.cmd = ECORE_Q_CMD_SETUP; 6623 6624 /* change the state to SETUP */ 6625 rc = ecore_queue_state_change(sc, &q_params); 6626 if (rc) { 6627 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) SETUP failed", fp->index); 6628 return rc; 6629 } 6630 6631 return rc; 6632 } 6633 6634 static int bnx2x_setup_leading(struct bnx2x_softc *sc) 6635 { 6636 if (IS_PF(sc)) 6637 return bnx2x_setup_queue(sc, &sc->fp[0], TRUE); 6638 else /* VF */ 6639 return bnx2x_vf_setup_queue(sc, &sc->fp[0], TRUE); 6640 } 6641 6642 static int 6643 bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj, 6644 uint8_t config_hash) 6645 { 6646 struct ecore_config_rss_params params = { NULL }; 6647 uint32_t i; 6648 6649 /* 6650 * Although RSS is meaningless when there is a single HW queue we 6651 * still need it enabled in order to have HW Rx hash generated. 6652 */ 6653 6654 params.rss_obj = rss_obj; 6655 6656 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 6657 6658 rte_bit_relaxed_set32(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 6659 6660 /* RSS configuration */ 6661 rte_bit_relaxed_set32(ECORE_RSS_IPV4, ¶ms.rss_flags); 6662 rte_bit_relaxed_set32(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 6663 rte_bit_relaxed_set32(ECORE_RSS_IPV6, ¶ms.rss_flags); 6664 rte_bit_relaxed_set32(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 6665 if (rss_obj->udp_rss_v4) { 6666 rte_bit_relaxed_set32(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 6667 } 6668 if (rss_obj->udp_rss_v6) { 6669 rte_bit_relaxed_set32(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 6670 } 6671 6672 /* Hash bits */ 6673 params.rss_result_mask = MULTI_MASK; 6674 6675 rte_memcpy(params.ind_table, rss_obj->ind_table, 6676 sizeof(params.ind_table)); 6677 6678 if (config_hash) { 6679 /* RSS keys */ 6680 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 6681 params.rss_key[i] = (uint32_t) rte_rand(); 6682 } 6683 6684 rte_bit_relaxed_set32(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 6685 } 6686 6687 if (IS_PF(sc)) 6688 return ecore_config_rss(sc, ¶ms); 6689 else 6690 return bnx2x_vf_config_rss(sc, ¶ms); 6691 } 6692 6693 static int bnx2x_config_rss_eth(struct bnx2x_softc *sc, uint8_t config_hash) 6694 { 6695 return bnx2x_config_rss_pf(sc, &sc->rss_conf_obj, config_hash); 6696 } 6697 6698 static int bnx2x_init_rss_pf(struct bnx2x_softc *sc) 6699 { 6700 uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(sc); 6701 uint32_t i; 6702 6703 /* 6704 * Prepare the initial contents of the indirection table if 6705 * RSS is enabled 6706 */ 6707 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 6708 sc->rss_conf_obj.ind_table[i] = 6709 (sc->fp->cl_id + (i % num_eth_queues)); 6710 } 6711 6712 if (sc->udp_rss) { 6713 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 6714 } 6715 6716 /* 6717 * For 57711 SEARCHER configuration (rss_keys) is 6718 * per-port, so if explicit configuration is needed, do it only 6719 * for a PMF. 6720 * 6721 * For 57712 and newer it's a per-function configuration. 6722 */ 6723 return bnx2x_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)); 6724 } 6725 6726 static int 6727 bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac, 6728 struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, 6729 uint32_t *ramrod_flags) 6730 { 6731 struct ecore_vlan_mac_ramrod_params ramrod_param; 6732 int rc; 6733 6734 memset(&ramrod_param, 0, sizeof(ramrod_param)); 6735 6736 /* fill in general parameters */ 6737 ramrod_param.vlan_mac_obj = obj; 6738 ramrod_param.ramrod_flags = *ramrod_flags; 6739 6740 /* fill a user request section if needed */ 6741 if (!rte_bit_relaxed_get32(RAMROD_CONT, ramrod_flags)) { 6742 rte_memcpy(ramrod_param.user_req.u.mac.mac, mac, 6743 ETH_ALEN); 6744 6745 rte_bit_relaxed_set32(mac_type, 6746 &ramrod_param.user_req.vlan_mac_flags); 6747 6748 /* Set the command: ADD or DEL */ 6749 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 6750 ECORE_VLAN_MAC_DEL; 6751 } 6752 6753 rc = ecore_config_vlan_mac(sc, &ramrod_param); 6754 6755 if (rc == ECORE_EXISTS) { 6756 PMD_DRV_LOG(INFO, sc, "Failed to schedule ADD operations (EEXIST)"); 6757 /* do not treat adding same MAC as error */ 6758 rc = 0; 6759 } else if (rc < 0) { 6760 PMD_DRV_LOG(ERR, sc, 6761 "%s MAC failed (%d)", (set ? "Set" : "Delete"), rc); 6762 } 6763 6764 return rc; 6765 } 6766 6767 static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set) 6768 { 6769 uint32_t ramrod_flags = 0; 6770 6771 PMD_DRV_LOG(DEBUG, sc, "Adding Ethernet MAC"); 6772 6773 rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags); 6774 6775 /* Eth MAC is set on RSS leading client (fp[0]) */ 6776 return bnx2x_set_mac_one(sc, sc->link_params.mac_addr, 6777 &sc->sp_objs->mac_obj, 6778 set, ECORE_ETH_MAC, &ramrod_flags); 6779 } 6780 6781 static int bnx2x_get_cur_phy_idx(struct bnx2x_softc *sc) 6782 { 6783 uint32_t sel_phy_idx = 0; 6784 6785 if (sc->link_params.num_phys <= 1) { 6786 return ELINK_INT_PHY; 6787 } 6788 6789 if (sc->link_vars.link_up) { 6790 sel_phy_idx = ELINK_EXT_PHY1; 6791 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 6792 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 6793 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 6794 ELINK_SUPPORTED_FIBRE)) 6795 sel_phy_idx = ELINK_EXT_PHY2; 6796 } else { 6797 switch (elink_phy_selection(&sc->link_params)) { 6798 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 6799 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 6800 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 6801 sel_phy_idx = ELINK_EXT_PHY1; 6802 break; 6803 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 6804 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 6805 sel_phy_idx = ELINK_EXT_PHY2; 6806 break; 6807 } 6808 } 6809 6810 return sel_phy_idx; 6811 } 6812 6813 static int bnx2x_get_link_cfg_idx(struct bnx2x_softc *sc) 6814 { 6815 uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(sc); 6816 6817 /* 6818 * The selected activated PHY is always after swapping (in case PHY 6819 * swapping is enabled). So when swapping is enabled, we need to reverse 6820 * the configuration 6821 */ 6822 6823 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 6824 if (sel_phy_idx == ELINK_EXT_PHY1) 6825 sel_phy_idx = ELINK_EXT_PHY2; 6826 else if (sel_phy_idx == ELINK_EXT_PHY2) 6827 sel_phy_idx = ELINK_EXT_PHY1; 6828 } 6829 6830 return ELINK_LINK_CONFIG_IDX(sel_phy_idx); 6831 } 6832 6833 static void bnx2x_set_requested_fc(struct bnx2x_softc *sc) 6834 { 6835 /* 6836 * Initialize link parameters structure variables 6837 * It is recommended to turn off RX FC for jumbo frames 6838 * for better performance 6839 */ 6840 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 6841 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 6842 } else { 6843 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 6844 } 6845 } 6846 6847 static void bnx2x_calc_fc_adv(struct bnx2x_softc *sc) 6848 { 6849 uint8_t cfg_idx = bnx2x_get_link_cfg_idx(sc); 6850 switch (sc->link_vars.ieee_fc & 6851 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 6852 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 6853 default: 6854 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 6855 ADVERTISED_Pause); 6856 break; 6857 6858 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 6859 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 6860 ADVERTISED_Pause); 6861 break; 6862 6863 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 6864 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 6865 break; 6866 } 6867 } 6868 6869 static uint16_t bnx2x_get_mf_speed(struct bnx2x_softc *sc) 6870 { 6871 uint16_t line_speed = sc->link_vars.line_speed; 6872 if (IS_MF(sc)) { 6873 uint16_t maxCfg = bnx2x_extract_max_cfg(sc, 6874 sc->devinfo. 6875 mf_info.mf_config[SC_VN 6876 (sc)]); 6877 6878 /* calculate the current MAX line speed limit for the MF devices */ 6879 if (IS_MF_SI(sc)) { 6880 line_speed = (line_speed * maxCfg) / 100; 6881 } else { /* SD mode */ 6882 uint16_t vn_max_rate = maxCfg * 100; 6883 6884 if (vn_max_rate < line_speed) { 6885 line_speed = vn_max_rate; 6886 } 6887 } 6888 } 6889 6890 return line_speed; 6891 } 6892 6893 static void 6894 bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *data) 6895 { 6896 uint16_t line_speed = bnx2x_get_mf_speed(sc); 6897 6898 memset(data, 0, sizeof(*data)); 6899 6900 /* fill the report data with the effective line speed */ 6901 data->line_speed = line_speed; 6902 6903 /* Link is down */ 6904 if (!sc->link_vars.link_up || (sc->flags & BNX2X_MF_FUNC_DIS)) { 6905 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_LINK_DOWN, 6906 &data->link_report_flags); 6907 } 6908 6909 /* Full DUPLEX */ 6910 if (sc->link_vars.duplex == DUPLEX_FULL) { 6911 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_FULL_DUPLEX, 6912 &data->link_report_flags); 6913 } 6914 6915 /* Rx Flow Control is ON */ 6916 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 6917 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_RX_FC_ON, 6918 &data->link_report_flags); 6919 } 6920 6921 /* Tx Flow Control is ON */ 6922 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 6923 rte_bit_relaxed_set32(BNX2X_LINK_REPORT_TX_FC_ON, 6924 &data->link_report_flags); 6925 } 6926 } 6927 6928 /* report link status to OS, should be called under phy_lock */ 6929 static void bnx2x_link_report_locked(struct bnx2x_softc *sc) 6930 { 6931 struct bnx2x_link_report_data cur_data; 6932 6933 /* reread mf_cfg */ 6934 if (IS_PF(sc)) { 6935 bnx2x_read_mf_cfg(sc); 6936 } 6937 6938 /* Read the current link report info */ 6939 bnx2x_fill_report_data(sc, &cur_data); 6940 6941 /* Don't report link down or exactly the same link status twice */ 6942 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 6943 (rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN, 6944 &sc->last_reported_link.link_report_flags) && 6945 rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN, 6946 &cur_data.link_report_flags))) { 6947 return; 6948 } 6949 6950 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x", 6951 cur_data.link_report_flags, 6952 sc->last_reported_link.link_report_flags); 6953 6954 sc->link_cnt++; 6955 6956 ELINK_DEBUG_P1(sc, "link status change count = %x", sc->link_cnt); 6957 /* report new link params and remember the state for the next time */ 6958 rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 6959 6960 if (rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN, 6961 &cur_data.link_report_flags)) { 6962 ELINK_DEBUG_P0(sc, "NIC Link is Down"); 6963 } else { 6964 __rte_unused const char *duplex; 6965 __rte_unused const char *flow; 6966 6967 if (rte_bit_relaxed_test_and_clear32 6968 (BNX2X_LINK_REPORT_FULL_DUPLEX, 6969 &cur_data.link_report_flags)) { 6970 duplex = "full"; 6971 ELINK_DEBUG_P0(sc, "link set to full duplex"); 6972 } else { 6973 duplex = "half"; 6974 ELINK_DEBUG_P0(sc, "link set to half duplex"); 6975 } 6976 6977 /* 6978 * Handle the FC at the end so that only these flags would be 6979 * possibly set. This way we may easily check if there is no FC 6980 * enabled. 6981 */ 6982 if (cur_data.link_report_flags) { 6983 if (rte_bit_relaxed_get32 6984 (BNX2X_LINK_REPORT_RX_FC_ON, 6985 &cur_data.link_report_flags) && 6986 rte_bit_relaxed_get32(BNX2X_LINK_REPORT_TX_FC_ON, 6987 &cur_data.link_report_flags)) { 6988 flow = "ON - receive & transmit"; 6989 } else if (rte_bit_relaxed_get32 6990 (BNX2X_LINK_REPORT_RX_FC_ON, 6991 &cur_data.link_report_flags) && 6992 !rte_bit_relaxed_get32 6993 (BNX2X_LINK_REPORT_TX_FC_ON, 6994 &cur_data.link_report_flags)) { 6995 flow = "ON - receive"; 6996 } else if (!rte_bit_relaxed_get32 6997 (BNX2X_LINK_REPORT_RX_FC_ON, 6998 &cur_data.link_report_flags) && 6999 rte_bit_relaxed_get32 7000 (BNX2X_LINK_REPORT_TX_FC_ON, 7001 &cur_data.link_report_flags)) { 7002 flow = "ON - transmit"; 7003 } else { 7004 flow = "none"; /* possible? */ 7005 } 7006 } else { 7007 flow = "none"; 7008 } 7009 7010 PMD_DRV_LOG(INFO, sc, 7011 "NIC Link is Up, %d Mbps %s duplex, Flow control: %s", 7012 cur_data.line_speed, duplex, flow); 7013 } 7014 } 7015 7016 static void 7017 bnx2x_link_report(struct bnx2x_softc *sc) 7018 { 7019 bnx2x_acquire_phy_lock(sc); 7020 bnx2x_link_report_locked(sc); 7021 bnx2x_release_phy_lock(sc); 7022 } 7023 7024 void bnx2x_link_status_update(struct bnx2x_softc *sc) 7025 { 7026 if (sc->state != BNX2X_STATE_OPEN) { 7027 return; 7028 } 7029 7030 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 7031 elink_link_status_update(&sc->link_params, &sc->link_vars); 7032 } else { 7033 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 7034 ELINK_SUPPORTED_10baseT_Full | 7035 ELINK_SUPPORTED_100baseT_Half | 7036 ELINK_SUPPORTED_100baseT_Full | 7037 ELINK_SUPPORTED_1000baseT_Full | 7038 ELINK_SUPPORTED_2500baseX_Full | 7039 ELINK_SUPPORTED_10000baseT_Full | 7040 ELINK_SUPPORTED_TP | 7041 ELINK_SUPPORTED_FIBRE | 7042 ELINK_SUPPORTED_Autoneg | 7043 ELINK_SUPPORTED_Pause | 7044 ELINK_SUPPORTED_Asym_Pause); 7045 sc->port.advertising[0] = sc->port.supported[0]; 7046 7047 sc->link_params.sc = sc; 7048 sc->link_params.port = SC_PORT(sc); 7049 sc->link_params.req_duplex[0] = DUPLEX_FULL; 7050 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 7051 sc->link_params.req_line_speed[0] = SPEED_10000; 7052 sc->link_params.speed_cap_mask[0] = 0x7f0000; 7053 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 7054 7055 if (CHIP_REV_IS_FPGA(sc)) { 7056 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 7057 sc->link_vars.line_speed = ELINK_SPEED_1000; 7058 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 7059 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 7060 } else { 7061 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 7062 sc->link_vars.line_speed = ELINK_SPEED_10000; 7063 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 7064 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 7065 } 7066 7067 sc->link_vars.link_up = 1; 7068 7069 sc->link_vars.duplex = DUPLEX_FULL; 7070 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 7071 7072 if (IS_PF(sc)) { 7073 REG_WR(sc, 7074 NIG_REG_EGRESS_DRAIN0_MODE + 7075 sc->link_params.port * 4, 0); 7076 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7077 bnx2x_link_report(sc); 7078 } 7079 } 7080 7081 if (IS_PF(sc)) { 7082 if (sc->link_vars.link_up) { 7083 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7084 } else { 7085 bnx2x_stats_handle(sc, STATS_EVENT_STOP); 7086 } 7087 bnx2x_link_report(sc); 7088 } else { 7089 bnx2x_link_report_locked(sc); 7090 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7091 } 7092 } 7093 7094 static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode) 7095 { 7096 int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc); 7097 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 7098 struct elink_params *lp = &sc->link_params; 7099 7100 bnx2x_set_requested_fc(sc); 7101 7102 bnx2x_acquire_phy_lock(sc); 7103 7104 if (load_mode == LOAD_DIAG) { 7105 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 7106 /* Prefer doing PHY loopback at 10G speed, if possible */ 7107 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 7108 if (lp->speed_cap_mask[cfg_idx] & 7109 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 7110 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 7111 } else { 7112 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 7113 } 7114 } 7115 } 7116 7117 if (load_mode == LOAD_LOOPBACK_EXT) { 7118 lp->loopback_mode = ELINK_LOOPBACK_EXT; 7119 } 7120 7121 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 7122 7123 bnx2x_release_phy_lock(sc); 7124 7125 bnx2x_calc_fc_adv(sc); 7126 7127 if (sc->link_vars.link_up) { 7128 bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); 7129 bnx2x_link_report(sc); 7130 } 7131 7132 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 7133 return rc; 7134 } 7135 7136 /* update flags in shmem */ 7137 static void 7138 bnx2x_update_drv_flags(struct bnx2x_softc *sc, uint32_t flags, uint32_t set) 7139 { 7140 uint32_t drv_flags; 7141 7142 if (SHMEM2_HAS(sc, drv_flags)) { 7143 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 7144 drv_flags = SHMEM2_RD(sc, drv_flags); 7145 7146 if (set) { 7147 drv_flags |= flags; 7148 } else { 7149 drv_flags &= ~flags; 7150 } 7151 7152 SHMEM2_WR(sc, drv_flags, drv_flags); 7153 7154 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 7155 } 7156 } 7157 7158 /* periodic timer callout routine, only runs when the interface is up */ 7159 void bnx2x_periodic_callout(struct bnx2x_softc *sc) 7160 { 7161 if ((sc->state != BNX2X_STATE_OPEN) || 7162 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 7163 PMD_DRV_LOG(DEBUG, sc, "periodic callout exit (state=0x%x)", 7164 sc->state); 7165 return; 7166 } 7167 if (!CHIP_REV_IS_SLOW(sc)) { 7168 /* 7169 * This barrier is needed to ensure the ordering between the writing 7170 * to the sc->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 7171 * the reading here. 7172 */ 7173 mb(); 7174 if (sc->port.pmf) { 7175 bnx2x_acquire_phy_lock(sc); 7176 elink_period_func(&sc->link_params, &sc->link_vars); 7177 bnx2x_release_phy_lock(sc); 7178 } 7179 } 7180 #ifdef BNX2X_PULSE 7181 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 7182 int mb_idx = SC_FW_MB_IDX(sc); 7183 uint32_t drv_pulse; 7184 uint32_t mcp_pulse; 7185 7186 ++sc->fw_drv_pulse_wr_seq; 7187 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 7188 7189 drv_pulse = sc->fw_drv_pulse_wr_seq; 7190 bnx2x_drv_pulse(sc); 7191 7192 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 7193 MCP_PULSE_SEQ_MASK); 7194 7195 /* 7196 * The delta between driver pulse and mcp response should 7197 * be 1 (before mcp response) or 0 (after mcp response). 7198 */ 7199 if ((drv_pulse != mcp_pulse) && 7200 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 7201 /* someone lost a heartbeat... */ 7202 PMD_DRV_LOG(ERR, sc, 7203 "drv_pulse (0x%x) != mcp_pulse (0x%x)", 7204 drv_pulse, mcp_pulse); 7205 } 7206 } 7207 #endif 7208 } 7209 7210 /* start the controller */ 7211 static __rte_noinline 7212 int bnx2x_nic_load(struct bnx2x_softc *sc) 7213 { 7214 uint32_t val; 7215 uint32_t load_code = 0; 7216 int i, rc = 0; 7217 7218 PMD_INIT_FUNC_TRACE(sc); 7219 7220 sc->state = BNX2X_STATE_OPENING_WAITING_LOAD; 7221 7222 if (IS_PF(sc)) { 7223 /* must be called before memory allocation and HW init */ 7224 bnx2x_ilt_set_info(sc); 7225 } 7226 7227 bnx2x_set_fp_rx_buf_size(sc); 7228 7229 if (IS_PF(sc)) { 7230 if (bnx2x_alloc_mem(sc) != 0) { 7231 sc->state = BNX2X_STATE_CLOSED; 7232 rc = -ENOMEM; 7233 goto bnx2x_nic_load_error0; 7234 } 7235 } 7236 7237 /* allocate the host hardware/software hsi structures */ 7238 if (bnx2x_alloc_hsi_mem(sc) != 0) { 7239 PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed"); 7240 sc->state = BNX2X_STATE_CLOSED; 7241 rc = -ENOMEM; 7242 goto bnx2x_nic_load_error0; 7243 } 7244 7245 if (bnx2x_alloc_fw_stats_mem(sc) != 0) { 7246 sc->state = BNX2X_STATE_CLOSED; 7247 rc = -ENOMEM; 7248 goto bnx2x_nic_load_error0; 7249 } 7250 7251 if (IS_VF(sc)) { 7252 rc = bnx2x_vf_init(sc); 7253 if (rc) { 7254 sc->state = BNX2X_STATE_ERROR; 7255 goto bnx2x_nic_load_error0; 7256 } 7257 } 7258 7259 if (IS_PF(sc)) { 7260 /* set pf load just before approaching the MCP */ 7261 bnx2x_set_pf_load(sc); 7262 7263 /* if MCP exists send load request and analyze response */ 7264 if (!BNX2X_NOMCP(sc)) { 7265 /* attempt to load pf */ 7266 if (bnx2x_nic_load_request(sc, &load_code) != 0) { 7267 sc->state = BNX2X_STATE_CLOSED; 7268 rc = -ENXIO; 7269 goto bnx2x_nic_load_error1; 7270 } 7271 7272 /* what did the MCP say? */ 7273 if (bnx2x_nic_load_analyze_req(sc, load_code) != 0) { 7274 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7275 sc->state = BNX2X_STATE_CLOSED; 7276 rc = -ENXIO; 7277 goto bnx2x_nic_load_error2; 7278 } 7279 } else { 7280 PMD_DRV_LOG(INFO, sc, "Device has no MCP!"); 7281 load_code = bnx2x_nic_load_no_mcp(sc); 7282 } 7283 7284 /* mark PMF if applicable */ 7285 bnx2x_nic_load_pmf(sc, load_code); 7286 7287 /* Init Function state controlling object */ 7288 bnx2x_init_func_obj(sc); 7289 7290 /* Initialize HW */ 7291 if (bnx2x_init_hw(sc, load_code) != 0) { 7292 PMD_DRV_LOG(NOTICE, sc, "HW init failed"); 7293 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7294 sc->state = BNX2X_STATE_CLOSED; 7295 rc = -ENXIO; 7296 goto bnx2x_nic_load_error2; 7297 } 7298 } 7299 7300 bnx2x_nic_init(sc, load_code); 7301 7302 /* Init per-function objects */ 7303 if (IS_PF(sc)) { 7304 bnx2x_init_objs(sc); 7305 7306 /* set AFEX default VLAN tag to an invalid value */ 7307 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 7308 7309 sc->state = BNX2X_STATE_OPENING_WAITING_PORT; 7310 rc = bnx2x_func_start(sc); 7311 if (rc) { 7312 PMD_DRV_LOG(NOTICE, sc, "Function start failed!"); 7313 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7314 sc->state = BNX2X_STATE_ERROR; 7315 goto bnx2x_nic_load_error3; 7316 } 7317 7318 /* send LOAD_DONE command to MCP */ 7319 if (!BNX2X_NOMCP(sc)) { 7320 load_code = 7321 bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 7322 if (!load_code) { 7323 PMD_DRV_LOG(NOTICE, sc, 7324 "MCP response failure, aborting"); 7325 sc->state = BNX2X_STATE_ERROR; 7326 rc = -ENXIO; 7327 goto bnx2x_nic_load_error3; 7328 } 7329 } 7330 } 7331 7332 rc = bnx2x_setup_leading(sc); 7333 if (rc) { 7334 PMD_DRV_LOG(NOTICE, sc, "Setup leading failed!"); 7335 sc->state = BNX2X_STATE_ERROR; 7336 goto bnx2x_nic_load_error3; 7337 } 7338 7339 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 7340 if (IS_PF(sc)) 7341 rc = bnx2x_setup_queue(sc, &sc->fp[i], FALSE); 7342 else /* IS_VF(sc) */ 7343 rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE); 7344 7345 if (rc) { 7346 PMD_DRV_LOG(NOTICE, sc, "Queue(%d) setup failed", i); 7347 sc->state = BNX2X_STATE_ERROR; 7348 goto bnx2x_nic_load_error3; 7349 } 7350 } 7351 7352 rc = bnx2x_init_rss_pf(sc); 7353 if (rc) { 7354 PMD_DRV_LOG(NOTICE, sc, "PF RSS init failed"); 7355 sc->state = BNX2X_STATE_ERROR; 7356 goto bnx2x_nic_load_error3; 7357 } 7358 7359 /* now when Clients are configured we are ready to work */ 7360 sc->state = BNX2X_STATE_OPEN; 7361 7362 /* Configure a ucast MAC */ 7363 if (IS_PF(sc)) { 7364 rc = bnx2x_set_eth_mac(sc, TRUE); 7365 } else { /* IS_VF(sc) */ 7366 rc = bnx2x_vf_set_mac(sc, TRUE); 7367 } 7368 7369 if (rc) { 7370 PMD_DRV_LOG(NOTICE, sc, "Setting Ethernet MAC failed"); 7371 sc->state = BNX2X_STATE_ERROR; 7372 goto bnx2x_nic_load_error3; 7373 } 7374 7375 if (sc->port.pmf) { 7376 rc = bnx2x_initial_phy_init(sc, LOAD_OPEN); 7377 if (rc) { 7378 sc->state = BNX2X_STATE_ERROR; 7379 goto bnx2x_nic_load_error3; 7380 } 7381 } 7382 7383 sc->link_params.feature_config_flags &= 7384 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 7385 7386 /* start the Tx */ 7387 switch (LOAD_OPEN) { 7388 case LOAD_NORMAL: 7389 case LOAD_OPEN: 7390 break; 7391 7392 case LOAD_DIAG: 7393 case LOAD_LOOPBACK_EXT: 7394 sc->state = BNX2X_STATE_DIAG; 7395 break; 7396 7397 default: 7398 break; 7399 } 7400 7401 if (sc->port.pmf) { 7402 bnx2x_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 7403 } else { 7404 bnx2x_link_status_update(sc); 7405 } 7406 7407 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 7408 /* mark driver is loaded in shmem2 */ 7409 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 7410 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 7411 (val | 7412 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 7413 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 7414 } 7415 7416 /* start fast path */ 7417 /* Initialize Rx filter */ 7418 bnx2x_set_rx_mode(sc); 7419 7420 /* wait for all pending SP commands to complete */ 7421 if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0U)) { 7422 PMD_DRV_LOG(NOTICE, sc, "Timeout waiting for all SPs to complete!"); 7423 bnx2x_periodic_stop(sc); 7424 bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE); 7425 return -ENXIO; 7426 } 7427 7428 PMD_DRV_LOG(DEBUG, sc, "NIC successfully loaded"); 7429 7430 return 0; 7431 7432 bnx2x_nic_load_error3: 7433 7434 if (IS_PF(sc)) { 7435 bnx2x_int_disable_sync(sc, 1); 7436 7437 /* clean out queued objects */ 7438 bnx2x_squeeze_objects(sc); 7439 } 7440 7441 bnx2x_nic_load_error2: 7442 7443 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 7444 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 7445 bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 7446 } 7447 7448 sc->port.pmf = 0; 7449 7450 bnx2x_nic_load_error1: 7451 7452 /* clear pf_load status, as it was already set */ 7453 if (IS_PF(sc)) { 7454 bnx2x_clear_pf_load(sc); 7455 } 7456 7457 bnx2x_nic_load_error0: 7458 7459 bnx2x_free_fw_stats_mem(sc); 7460 bnx2x_free_hsi_mem(sc); 7461 bnx2x_free_mem(sc); 7462 7463 return rc; 7464 } 7465 7466 /* 7467 * Handles controller initialization. 7468 */ 7469 int bnx2x_init(struct bnx2x_softc *sc) 7470 { 7471 int other_engine = SC_PATH(sc) ? 0 : 1; 7472 uint8_t other_load_status, load_status; 7473 uint8_t global = FALSE; 7474 int rc; 7475 7476 /* Check if the driver is still running and bail out if it is. */ 7477 if (sc->state != BNX2X_STATE_CLOSED) { 7478 PMD_DRV_LOG(DEBUG, sc, "Init called while driver is running!"); 7479 rc = 0; 7480 goto bnx2x_init_done; 7481 } 7482 7483 bnx2x_set_power_state(sc, PCI_PM_D0); 7484 7485 /* 7486 * If parity occurred during the unload, then attentions and/or 7487 * RECOVERY_IN_PROGRESS may still be set. If so we want the first function 7488 * loaded on the current engine to complete the recovery. Parity recovery 7489 * is only relevant for PF driver. 7490 */ 7491 if (IS_PF(sc)) { 7492 other_load_status = bnx2x_get_load_status(sc, other_engine); 7493 load_status = bnx2x_get_load_status(sc, SC_PATH(sc)); 7494 7495 if (!bnx2x_reset_is_done(sc, SC_PATH(sc)) || 7496 bnx2x_chk_parity_attn(sc, &global, TRUE)) { 7497 do { 7498 /* 7499 * If there are attentions and they are in global blocks, set 7500 * the GLOBAL_RESET bit regardless whether it will be this 7501 * function that will complete the recovery or not. 7502 */ 7503 if (global) { 7504 bnx2x_set_reset_global(sc); 7505 } 7506 7507 /* 7508 * Only the first function on the current engine should try 7509 * to recover in open. In case of attentions in global blocks 7510 * only the first in the chip should try to recover. 7511 */ 7512 if ((!load_status 7513 && (!global ||!other_load_status)) 7514 && bnx2x_trylock_leader_lock(sc) 7515 && !bnx2x_leader_reset(sc)) { 7516 PMD_DRV_LOG(INFO, sc, 7517 "Recovered during init"); 7518 break; 7519 } 7520 7521 /* recovery has failed... */ 7522 bnx2x_set_power_state(sc, PCI_PM_D3hot); 7523 7524 sc->recovery_state = BNX2X_RECOVERY_FAILED; 7525 7526 PMD_DRV_LOG(NOTICE, sc, 7527 "Recovery flow hasn't properly " 7528 "completed yet, try again later. " 7529 "If you still see this message after a " 7530 "few retries then power cycle is required."); 7531 7532 rc = -ENXIO; 7533 goto bnx2x_init_done; 7534 } while (0); 7535 } 7536 } 7537 7538 sc->recovery_state = BNX2X_RECOVERY_DONE; 7539 7540 rc = bnx2x_nic_load(sc); 7541 7542 bnx2x_init_done: 7543 7544 if (rc) { 7545 PMD_DRV_LOG(NOTICE, sc, "Initialization failed, " 7546 "stack notified driver is NOT running!"); 7547 } 7548 7549 return rc; 7550 } 7551 7552 static void bnx2x_get_function_num(struct bnx2x_softc *sc) 7553 { 7554 uint32_t val = 0; 7555 7556 /* 7557 * Read the ME register to get the function number. The ME register 7558 * holds the relative-function number and absolute-function number. The 7559 * absolute-function number appears only in E2 and above. Before that 7560 * these bits always contained zero, therefore we cannot blindly use them. 7561 */ 7562 7563 val = REG_RD(sc, BAR_ME_REGISTER); 7564 7565 sc->pfunc_rel = 7566 (uint8_t) ((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 7567 sc->path_id = 7568 (uint8_t) ((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 7569 1; 7570 7571 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 7572 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 7573 } else { 7574 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 7575 } 7576 7577 PMD_DRV_LOG(DEBUG, sc, 7578 "Relative function %d, Absolute function %d, Path %d", 7579 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 7580 } 7581 7582 static uint32_t bnx2x_get_shmem_mf_cfg_base(struct bnx2x_softc *sc) 7583 { 7584 uint32_t shmem2_size; 7585 uint32_t offset; 7586 uint32_t mf_cfg_offset_value; 7587 7588 /* Non 57712 */ 7589 offset = (SHMEM_ADDR(sc, func_mb) + 7590 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 7591 7592 /* 57712 plus */ 7593 if (sc->devinfo.shmem2_base != 0) { 7594 shmem2_size = SHMEM2_RD(sc, size); 7595 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 7596 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 7597 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 7598 offset = mf_cfg_offset_value; 7599 } 7600 } 7601 } 7602 7603 return offset; 7604 } 7605 7606 static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg) 7607 { 7608 uint32_t ret; 7609 struct bnx2x_pci_cap *caps; 7610 7611 /* ensure PCIe capability is enabled */ 7612 caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP); 7613 if (NULL != caps) { 7614 PMD_DRV_LOG(DEBUG, sc, "Found PCIe capability: " 7615 "id=0x%04X type=0x%04X addr=0x%08X", 7616 caps->id, caps->type, caps->addr); 7617 pci_read(sc, (caps->addr + reg), &ret, 2); 7618 return ret; 7619 } 7620 7621 PMD_DRV_LOG(WARNING, sc, "PCIe capability NOT FOUND!!!"); 7622 7623 return 0; 7624 } 7625 7626 static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc) 7627 { 7628 return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) & 7629 PCIM_EXP_STA_TRANSACTION_PND; 7630 } 7631 7632 /* 7633 * Walk the PCI capabiites list for the device to find what features are 7634 * supported. These capabilites may be enabled/disabled by firmware so it's 7635 * best to walk the list rather than make assumptions. 7636 */ 7637 static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) 7638 { 7639 PMD_INIT_FUNC_TRACE(sc); 7640 7641 struct bnx2x_pci_cap *caps; 7642 uint16_t link_status; 7643 int reg = 0; 7644 7645 /* check if PCI Power Management is enabled */ 7646 caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP); 7647 if (NULL != caps) { 7648 PMD_DRV_LOG(DEBUG, sc, "Found PM capability: " 7649 "id=0x%04X type=0x%04X addr=0x%08X", 7650 caps->id, caps->type, caps->addr); 7651 7652 sc->devinfo.pcie_cap_flags |= BNX2X_PM_CAPABLE_FLAG; 7653 sc->devinfo.pcie_pm_cap_reg = caps->addr; 7654 } 7655 7656 link_status = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA); 7657 7658 sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); 7659 sc->devinfo.pcie_link_width = 7660 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 7661 7662 PMD_DRV_LOG(DEBUG, sc, "PCIe link speed=%d width=%d", 7663 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 7664 7665 sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG; 7666 7667 /* check if MSI capability is enabled */ 7668 caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP); 7669 if (NULL != caps) { 7670 PMD_DRV_LOG(DEBUG, sc, "Found MSI capability at 0x%04x", reg); 7671 7672 sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG; 7673 sc->devinfo.pcie_msi_cap_reg = caps->addr; 7674 } 7675 7676 /* check if MSI-X capability is enabled */ 7677 caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP); 7678 if (NULL != caps) { 7679 PMD_DRV_LOG(DEBUG, sc, "Found MSI-X capability at 0x%04x", reg); 7680 7681 sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG; 7682 sc->devinfo.pcie_msix_cap_reg = caps->addr; 7683 } 7684 } 7685 7686 static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc) 7687 { 7688 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7689 uint32_t val; 7690 7691 /* get the outer vlan if we're in switch-dependent mode */ 7692 7693 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7694 mf_info->ext_id = (uint16_t) val; 7695 7696 mf_info->multi_vnics_mode = 1; 7697 7698 if (!VALID_OVLAN(mf_info->ext_id)) { 7699 PMD_DRV_LOG(NOTICE, sc, "Invalid VLAN (%d)", mf_info->ext_id); 7700 return 1; 7701 } 7702 7703 /* get the capabilities */ 7704 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 7705 FUNC_MF_CFG_PROTOCOL_ISCSI) { 7706 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 7707 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) 7708 == FUNC_MF_CFG_PROTOCOL_FCOE) { 7709 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 7710 } else { 7711 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 7712 } 7713 7714 mf_info->vnics_per_port = 7715 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7716 7717 return 0; 7718 } 7719 7720 static uint32_t bnx2x_get_shmem_ext_proto_support_flags(struct bnx2x_softc *sc) 7721 { 7722 uint32_t retval = 0; 7723 uint32_t val; 7724 7725 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 7726 7727 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 7728 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 7729 retval |= MF_PROTO_SUPPORT_ETHERNET; 7730 } 7731 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 7732 retval |= MF_PROTO_SUPPORT_ISCSI; 7733 } 7734 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 7735 retval |= MF_PROTO_SUPPORT_FCOE; 7736 } 7737 } 7738 7739 return retval; 7740 } 7741 7742 static int bnx2x_get_shmem_mf_cfg_info_si(struct bnx2x_softc *sc) 7743 { 7744 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7745 uint32_t val; 7746 7747 /* 7748 * There is no outer vlan if we're in switch-independent mode. 7749 * If the mac is valid then assume multi-function. 7750 */ 7751 7752 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 7753 7754 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 7755 7756 mf_info->mf_protos_supported = 7757 bnx2x_get_shmem_ext_proto_support_flags(sc); 7758 7759 mf_info->vnics_per_port = 7760 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7761 7762 return 0; 7763 } 7764 7765 static int bnx2x_get_shmem_mf_cfg_info_niv(struct bnx2x_softc *sc) 7766 { 7767 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7768 uint32_t e1hov_tag; 7769 uint32_t func_config; 7770 uint32_t niv_config; 7771 7772 mf_info->multi_vnics_mode = 1; 7773 7774 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7775 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 7776 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 7777 7778 mf_info->ext_id = 7779 (uint16_t) ((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 7780 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 7781 7782 mf_info->default_vlan = 7783 (uint16_t) ((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 7784 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 7785 7786 mf_info->niv_allowed_priorities = 7787 (uint8_t) ((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 7788 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 7789 7790 mf_info->niv_default_cos = 7791 (uint8_t) ((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 7792 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 7793 7794 mf_info->afex_vlan_mode = 7795 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 7796 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 7797 7798 mf_info->niv_mba_enabled = 7799 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 7800 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 7801 7802 mf_info->mf_protos_supported = 7803 bnx2x_get_shmem_ext_proto_support_flags(sc); 7804 7805 mf_info->vnics_per_port = 7806 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 7807 7808 return 0; 7809 } 7810 7811 static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc) 7812 { 7813 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7814 uint32_t mf_cfg1; 7815 uint32_t mf_cfg2; 7816 uint32_t ovlan1; 7817 uint32_t ovlan2; 7818 uint8_t i, j; 7819 7820 /* various MF mode sanity checks... */ 7821 7822 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 7823 PMD_DRV_LOG(NOTICE, sc, 7824 "Enumerated function %d is marked as hidden", 7825 SC_PORT(sc)); 7826 return 1; 7827 } 7828 7829 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 7830 PMD_DRV_LOG(NOTICE, sc, "vnics_per_port=%d multi_vnics_mode=%d", 7831 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 7832 return 1; 7833 } 7834 7835 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 7836 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 7837 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 7838 PMD_DRV_LOG(NOTICE, sc, "mf_mode=SD vnic_id=%d ovlan=%d", 7839 SC_VN(sc), OVLAN(sc)); 7840 return 1; 7841 } 7842 7843 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 7844 PMD_DRV_LOG(NOTICE, sc, 7845 "mf_mode=SD multi_vnics_mode=%d ovlan=%d", 7846 mf_info->multi_vnics_mode, OVLAN(sc)); 7847 return 1; 7848 } 7849 7850 /* 7851 * Verify all functions are either MF or SF mode. If MF, make sure 7852 * sure that all non-hidden functions have a valid ovlan. If SF, 7853 * make sure that all non-hidden functions have an invalid ovlan. 7854 */ 7855 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 7856 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 7857 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 7858 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 7859 (((mf_info->multi_vnics_mode) 7860 && !VALID_OVLAN(ovlan1)) 7861 || ((!mf_info->multi_vnics_mode) 7862 && VALID_OVLAN(ovlan1)))) { 7863 PMD_DRV_LOG(NOTICE, sc, 7864 "mf_mode=SD function %d MF config " 7865 "mismatch, multi_vnics_mode=%d ovlan=%d", 7866 i, mf_info->multi_vnics_mode, 7867 ovlan1); 7868 return 1; 7869 } 7870 } 7871 7872 /* Verify all funcs on the same port each have a different ovlan. */ 7873 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 7874 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 7875 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 7876 /* iterate from the next function on the port to the max func */ 7877 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 7878 mf_cfg2 = 7879 MFCFG_RD(sc, func_mf_config[j].config); 7880 ovlan2 = 7881 MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 7882 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) 7883 && VALID_OVLAN(ovlan1) 7884 && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) 7885 && VALID_OVLAN(ovlan2) 7886 && (ovlan1 == ovlan2)) { 7887 PMD_DRV_LOG(NOTICE, sc, 7888 "mf_mode=SD functions %d and %d " 7889 "have the same ovlan (%d)", 7890 i, j, ovlan1); 7891 return 1; 7892 } 7893 } 7894 } 7895 } 7896 /* MULTI_FUNCTION_SD */ 7897 return 0; 7898 } 7899 7900 static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc) 7901 { 7902 struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; 7903 uint32_t val, mac_upper; 7904 uint8_t i, vnic; 7905 7906 /* initialize mf_info defaults */ 7907 mf_info->vnics_per_port = 1; 7908 mf_info->multi_vnics_mode = FALSE; 7909 mf_info->path_has_ovlan = FALSE; 7910 mf_info->mf_mode = SINGLE_FUNCTION; 7911 7912 if (!CHIP_IS_MF_CAP(sc)) { 7913 return 0; 7914 } 7915 7916 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 7917 PMD_DRV_LOG(NOTICE, sc, "Invalid mf_cfg_base!"); 7918 return 1; 7919 } 7920 7921 /* get the MF mode (switch dependent / independent / single-function) */ 7922 7923 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 7924 7925 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { 7926 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 7927 7928 mac_upper = 7929 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 7930 7931 /* check for legal upper mac bytes */ 7932 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 7933 mf_info->mf_mode = MULTI_FUNCTION_SI; 7934 } else { 7935 PMD_DRV_LOG(NOTICE, sc, 7936 "Invalid config for Switch Independent mode"); 7937 } 7938 7939 break; 7940 7941 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 7942 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 7943 7944 /* get outer vlan configuration */ 7945 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 7946 7947 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 7948 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7949 mf_info->mf_mode = MULTI_FUNCTION_SD; 7950 } else { 7951 PMD_DRV_LOG(NOTICE, sc, 7952 "Invalid config for Switch Dependent mode"); 7953 } 7954 7955 break; 7956 7957 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 7958 7959 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 7960 return 0; 7961 7962 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 7963 7964 /* 7965 * Mark MF mode as NIV if MCP version includes NPAR-SD support 7966 * and the MAC address is valid. 7967 */ 7968 mac_upper = 7969 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 7970 7971 if ((SHMEM2_HAS(sc, afex_driver_support)) && 7972 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 7973 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 7974 } else { 7975 PMD_DRV_LOG(NOTICE, sc, "Invalid config for AFEX mode"); 7976 } 7977 7978 break; 7979 7980 default: 7981 7982 PMD_DRV_LOG(NOTICE, sc, "Unknown MF mode (0x%08x)", 7983 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 7984 7985 return 1; 7986 } 7987 7988 /* set path mf_mode (which could be different than function mf_mode) */ 7989 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 7990 mf_info->path_has_ovlan = TRUE; 7991 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 7992 /* 7993 * Decide on path multi vnics mode. If we're not in MF mode and in 7994 * 4-port mode, this is good enough to check vnic-0 of the other port 7995 * on the same path 7996 */ 7997 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 7998 uint8_t other_port = !(PORT_ID(sc) & 1); 7999 uint8_t abs_func_other_port = 8000 (SC_PATH(sc) + (2 * other_port)); 8001 8002 val = 8003 MFCFG_RD(sc, 8004 func_mf_config 8005 [abs_func_other_port].e1hov_tag); 8006 8007 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t) val); 8008 } 8009 } 8010 8011 if (mf_info->mf_mode == SINGLE_FUNCTION) { 8012 /* invalid MF config */ 8013 if (SC_VN(sc) >= 1) { 8014 PMD_DRV_LOG(NOTICE, sc, "VNIC ID >= 1 in SF mode"); 8015 return 1; 8016 } 8017 8018 return 0; 8019 } 8020 8021 /* get the MF configuration */ 8022 mf_info->mf_config[SC_VN(sc)] = 8023 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8024 8025 switch (mf_info->mf_mode) { 8026 case MULTI_FUNCTION_SD: 8027 8028 bnx2x_get_shmem_mf_cfg_info_sd(sc); 8029 break; 8030 8031 case MULTI_FUNCTION_SI: 8032 8033 bnx2x_get_shmem_mf_cfg_info_si(sc); 8034 break; 8035 8036 case MULTI_FUNCTION_AFEX: 8037 8038 bnx2x_get_shmem_mf_cfg_info_niv(sc); 8039 break; 8040 8041 default: 8042 8043 PMD_DRV_LOG(NOTICE, sc, "Get MF config failed (mf_mode=0x%08x)", 8044 mf_info->mf_mode); 8045 return 1; 8046 } 8047 8048 /* get the congestion management parameters */ 8049 8050 vnic = 0; 8051 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 8052 /* get min/max bw */ 8053 val = MFCFG_RD(sc, func_mf_config[i].config); 8054 mf_info->min_bw[vnic] = 8055 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> 8056 FUNC_MF_CFG_MIN_BW_SHIFT); 8057 mf_info->max_bw[vnic] = 8058 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> 8059 FUNC_MF_CFG_MAX_BW_SHIFT); 8060 vnic++; 8061 } 8062 8063 return bnx2x_check_valid_mf_cfg(sc); 8064 } 8065 8066 static int bnx2x_get_shmem_info(struct bnx2x_softc *sc) 8067 { 8068 int port; 8069 uint32_t mac_hi, mac_lo, val; 8070 8071 PMD_INIT_FUNC_TRACE(sc); 8072 8073 port = SC_PORT(sc); 8074 mac_hi = mac_lo = 0; 8075 8076 sc->link_params.sc = sc; 8077 sc->link_params.port = port; 8078 8079 /* get the hardware config info */ 8080 sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); 8081 sc->devinfo.hw_config2 = 8082 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 8083 8084 sc->link_params.hw_led_mode = 8085 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 8086 SHARED_HW_CFG_LED_MODE_SHIFT); 8087 8088 /* get the port feature config */ 8089 sc->port.config = 8090 SHMEM_RD(sc, dev_info.port_feature_config[port].config); 8091 8092 /* get the link params */ 8093 sc->link_params.speed_cap_mask[ELINK_INT_PHY] = 8094 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask) 8095 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 8096 sc->link_params.speed_cap_mask[ELINK_EXT_PHY1] = 8097 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2) 8098 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 8099 8100 /* get the lane config */ 8101 sc->link_params.lane_config = 8102 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 8103 8104 /* get the link config */ 8105 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 8106 sc->port.link_config[ELINK_INT_PHY] = val; 8107 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 8108 sc->port.link_config[ELINK_EXT_PHY1] = 8109 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 8110 8111 /* get the override preemphasis flag and enable it or turn it off */ 8112 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 8113 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 8114 sc->link_params.feature_config_flags |= 8115 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 8116 } else { 8117 sc->link_params.feature_config_flags &= 8118 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 8119 } 8120 8121 val = sc->devinfo.bc_ver >> 8; 8122 if (val < BNX2X_BC_VER) { 8123 /* for now only warn later we might need to enforce this */ 8124 PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC\n", 8125 BNX2X_BC_VER, val); 8126 } 8127 sc->link_params.feature_config_flags |= 8128 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 8129 ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 8130 0; 8131 8132 sc->link_params.feature_config_flags |= 8133 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 8134 ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 8135 sc->link_params.feature_config_flags |= 8136 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 8137 ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 8138 sc->link_params.feature_config_flags |= 8139 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 8140 ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 8141 8142 /* get the initial value of the link params */ 8143 sc->link_params.multi_phy_config = 8144 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 8145 8146 /* get external phy info */ 8147 sc->port.ext_phy_config = 8148 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 8149 8150 /* get the multifunction configuration */ 8151 bnx2x_get_mf_cfg_info(sc); 8152 8153 /* get the mac address */ 8154 if (IS_MF(sc)) { 8155 mac_hi = 8156 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 8157 mac_lo = 8158 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 8159 } else { 8160 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 8161 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 8162 } 8163 8164 if ((mac_lo == 0) && (mac_hi == 0)) { 8165 *sc->mac_addr_str = 0; 8166 PMD_DRV_LOG(NOTICE, sc, "No Ethernet address programmed!"); 8167 } else { 8168 sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8); 8169 sc->link_params.mac_addr[1] = (uint8_t) (mac_hi); 8170 sc->link_params.mac_addr[2] = (uint8_t) (mac_lo >> 24); 8171 sc->link_params.mac_addr[3] = (uint8_t) (mac_lo >> 16); 8172 sc->link_params.mac_addr[4] = (uint8_t) (mac_lo >> 8); 8173 sc->link_params.mac_addr[5] = (uint8_t) (mac_lo); 8174 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 8175 "%02x:%02x:%02x:%02x:%02x:%02x", 8176 sc->link_params.mac_addr[0], 8177 sc->link_params.mac_addr[1], 8178 sc->link_params.mac_addr[2], 8179 sc->link_params.mac_addr[3], 8180 sc->link_params.mac_addr[4], 8181 sc->link_params.mac_addr[5]); 8182 PMD_DRV_LOG(DEBUG, sc, 8183 "Ethernet address: %s", sc->mac_addr_str); 8184 } 8185 8186 return 0; 8187 } 8188 8189 static void bnx2x_media_detect(struct bnx2x_softc *sc) 8190 { 8191 uint32_t phy_idx = bnx2x_get_cur_phy_idx(sc); 8192 switch (sc->link_params.phy[phy_idx].media_type) { 8193 case ELINK_ETH_PHY_SFPP_10G_FIBER: 8194 case ELINK_ETH_PHY_SFP_1G_FIBER: 8195 case ELINK_ETH_PHY_XFP_FIBER: 8196 case ELINK_ETH_PHY_KR: 8197 case ELINK_ETH_PHY_CX4: 8198 PMD_DRV_LOG(INFO, sc, "Found 10GBase-CX4 media."); 8199 sc->media = IFM_10G_CX4; 8200 break; 8201 case ELINK_ETH_PHY_DA_TWINAX: 8202 PMD_DRV_LOG(INFO, sc, "Found 10Gb Twinax media."); 8203 sc->media = IFM_10G_TWINAX; 8204 break; 8205 case ELINK_ETH_PHY_BASE_T: 8206 PMD_DRV_LOG(INFO, sc, "Found 10GBase-T media."); 8207 sc->media = IFM_10G_T; 8208 break; 8209 case ELINK_ETH_PHY_NOT_PRESENT: 8210 PMD_DRV_LOG(INFO, sc, "Media not present."); 8211 sc->media = 0; 8212 break; 8213 case ELINK_ETH_PHY_UNSPECIFIED: 8214 default: 8215 PMD_DRV_LOG(INFO, sc, "Unknown media!"); 8216 sc->media = 0; 8217 break; 8218 } 8219 } 8220 8221 #define GET_FIELD(value, fname) \ 8222 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 8223 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 8224 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 8225 8226 static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc) 8227 { 8228 int pfid = SC_FUNC(sc); 8229 int igu_sb_id; 8230 uint32_t val; 8231 uint8_t fid, igu_sb_cnt = 0; 8232 8233 sc->igu_base_sb = 0xff; 8234 8235 if (CHIP_INT_MODE_IS_BC(sc)) { 8236 int vn = SC_VN(sc); 8237 igu_sb_cnt = sc->igu_sb_cnt; 8238 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 8239 FP_SB_MAX_E1x); 8240 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 8241 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 8242 return 0; 8243 } 8244 8245 /* IGU in normal mode - read CAM */ 8246 for (igu_sb_id = 0; 8247 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { 8248 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 8249 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 8250 continue; 8251 } 8252 fid = IGU_FID(val); 8253 if (fid & IGU_FID_ENCODE_IS_PF) { 8254 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 8255 continue; 8256 } 8257 if (IGU_VEC(val) == 0) { 8258 /* default status block */ 8259 sc->igu_dsb_id = igu_sb_id; 8260 } else { 8261 if (sc->igu_base_sb == 0xff) { 8262 sc->igu_base_sb = igu_sb_id; 8263 } 8264 igu_sb_cnt++; 8265 } 8266 } 8267 } 8268 8269 /* 8270 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 8271 * that number of CAM entries will not be equal to the value advertised in 8272 * PCI. Driver should use the minimal value of both as the actual status 8273 * block count 8274 */ 8275 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 8276 8277 if (igu_sb_cnt == 0) { 8278 PMD_DRV_LOG(ERR, sc, "CAM configuration error"); 8279 return -1; 8280 } 8281 8282 return 0; 8283 } 8284 8285 /* 8286 * Gather various information from the device config space, the device itself, 8287 * shmem, and the user input. 8288 */ 8289 static int bnx2x_get_device_info(struct bnx2x_softc *sc) 8290 { 8291 uint32_t val; 8292 int rc; 8293 8294 /* get the chip revision (chip metal comes from pci config space) */ 8295 sc->devinfo.chip_id = sc->link_params.chip_id = 8296 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 8297 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 8298 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 8299 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 8300 8301 /* force 57811 according to MISC register */ 8302 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 8303 if (CHIP_IS_57810(sc)) { 8304 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 8305 (sc-> 8306 devinfo.chip_id & 0x0000ffff)); 8307 } else if (CHIP_IS_57810_MF(sc)) { 8308 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 8309 (sc-> 8310 devinfo.chip_id & 0x0000ffff)); 8311 } 8312 sc->devinfo.chip_id |= 0x1; 8313 } 8314 8315 PMD_DRV_LOG(DEBUG, sc, 8316 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)", 8317 sc->devinfo.chip_id, 8318 ((sc->devinfo.chip_id >> 16) & 0xffff), 8319 ((sc->devinfo.chip_id >> 12) & 0xf), 8320 ((sc->devinfo.chip_id >> 4) & 0xff), 8321 ((sc->devinfo.chip_id >> 0) & 0xf)); 8322 8323 val = (REG_RD(sc, 0x2874) & 0x55); 8324 if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) { 8325 sc->flags |= BNX2X_ONE_PORT_FLAG; 8326 PMD_DRV_LOG(DEBUG, sc, "single port device"); 8327 } 8328 8329 /* set the doorbell size */ 8330 sc->doorbell_size = (1 << BNX2X_DB_SHIFT); 8331 8332 /* determine whether the device is in 2 port or 4 port mode */ 8333 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1h */ 8334 if (CHIP_IS_E2E3(sc)) { 8335 /* 8336 * Read port4mode_en_ovwr[0]: 8337 * If 1, four port mode is in port4mode_en_ovwr[1]. 8338 * If 0, four port mode is in port4mode_en[0]. 8339 */ 8340 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 8341 if (val & 1) { 8342 val = ((val >> 1) & 1); 8343 } else { 8344 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 8345 } 8346 8347 sc->devinfo.chip_port_mode = 8348 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 8349 8350 PMD_DRV_LOG(DEBUG, sc, "Port mode = %s", (val) ? "4" : "2"); 8351 } 8352 8353 /* get the function and path info for the device */ 8354 bnx2x_get_function_num(sc); 8355 8356 /* get the shared memory base address */ 8357 sc->devinfo.shmem_base = 8358 sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 8359 sc->devinfo.shmem2_base = 8360 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 8361 MISC_REG_GENERIC_CR_0)); 8362 8363 if (!sc->devinfo.shmem_base) { 8364 /* this should ONLY prevent upcoming shmem reads */ 8365 PMD_DRV_LOG(INFO, sc, "MCP not active"); 8366 sc->flags |= BNX2X_NO_MCP_FLAG; 8367 return 0; 8368 } 8369 8370 /* make sure the shared memory contents are valid */ 8371 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 8372 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 8373 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 8374 PMD_DRV_LOG(NOTICE, sc, "Invalid SHMEM validity signature: 0x%08x", 8375 val); 8376 return 0; 8377 } 8378 8379 /* get the bootcode version */ 8380 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 8381 snprintf(sc->devinfo.bc_ver_str, 8382 sizeof(sc->devinfo.bc_ver_str), 8383 "%d.%d.%d", 8384 ((sc->devinfo.bc_ver >> 24) & 0xff), 8385 ((sc->devinfo.bc_ver >> 16) & 0xff), 8386 ((sc->devinfo.bc_ver >> 8) & 0xff)); 8387 PMD_DRV_LOG(DEBUG, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str); 8388 8389 /* get the bootcode shmem address */ 8390 sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc); 8391 8392 /* clean indirect addresses as they're not used */ 8393 pci_write_long(sc, PCICFG_GRC_ADDRESS, 0); 8394 if (IS_PF(sc)) { 8395 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 8396 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 8397 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 8398 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 8399 if (CHIP_IS_E1x(sc)) { 8400 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 8401 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 8402 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 8403 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 8404 } 8405 } 8406 8407 /* get the nvram size */ 8408 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 8409 sc->devinfo.flash_size = 8410 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 8411 8412 bnx2x_set_power_state(sc, PCI_PM_D0); 8413 /* get various configuration parameters from shmem */ 8414 bnx2x_get_shmem_info(sc); 8415 8416 /* initialize IGU parameters */ 8417 if (CHIP_IS_E1x(sc)) { 8418 sc->devinfo.int_block = INT_BLOCK_HC; 8419 sc->igu_dsb_id = DEF_SB_IGU_ID; 8420 sc->igu_base_sb = 0; 8421 } else { 8422 sc->devinfo.int_block = INT_BLOCK_IGU; 8423 8424 /* do not allow device reset during IGU info preocessing */ 8425 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8426 8427 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 8428 8429 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 8430 int tout = 5000; 8431 8432 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 8433 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 8434 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 8435 8436 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 8437 tout--; 8438 DELAY(1000); 8439 } 8440 8441 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 8442 PMD_DRV_LOG(NOTICE, sc, 8443 "FORCING IGU Normal Mode failed!!!"); 8444 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8445 return -1; 8446 } 8447 } 8448 8449 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 8450 PMD_DRV_LOG(DEBUG, sc, "IGU Backward Compatible Mode"); 8451 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 8452 } else { 8453 PMD_DRV_LOG(DEBUG, sc, "IGU Normal Mode"); 8454 } 8455 8456 rc = bnx2x_get_igu_cam_info(sc); 8457 8458 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 8459 8460 if (rc) { 8461 return rc; 8462 } 8463 } 8464 8465 /* 8466 * Get base FW non-default (fast path) status block ID. This value is 8467 * used to initialize the fw_sb_id saved on the fp/queue structure to 8468 * determine the id used by the FW. 8469 */ 8470 if (CHIP_IS_E1x(sc)) { 8471 sc->base_fw_ndsb = 8472 ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 8473 } else { 8474 /* 8475 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 8476 * the same queue are indicated on the same IGU SB). So we prefer 8477 * FW and IGU SBs to be the same value. 8478 */ 8479 sc->base_fw_ndsb = sc->igu_base_sb; 8480 } 8481 8482 elink_phy_probe(&sc->link_params); 8483 8484 return 0; 8485 } 8486 8487 static void 8488 bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg) 8489 { 8490 uint32_t cfg_size = 0; 8491 uint32_t idx; 8492 uint8_t port = SC_PORT(sc); 8493 8494 /* aggregation of supported attributes of all external phys */ 8495 sc->port.supported[0] = 0; 8496 sc->port.supported[1] = 0; 8497 8498 switch (sc->link_params.num_phys) { 8499 case 1: 8500 sc->port.supported[0] = 8501 sc->link_params.phy[ELINK_INT_PHY].supported; 8502 cfg_size = 1; 8503 break; 8504 case 2: 8505 sc->port.supported[0] = 8506 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8507 cfg_size = 1; 8508 break; 8509 case 3: 8510 if (sc->link_params.multi_phy_config & 8511 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 8512 sc->port.supported[1] = 8513 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8514 sc->port.supported[0] = 8515 sc->link_params.phy[ELINK_EXT_PHY2].supported; 8516 } else { 8517 sc->port.supported[0] = 8518 sc->link_params.phy[ELINK_EXT_PHY1].supported; 8519 sc->port.supported[1] = 8520 sc->link_params.phy[ELINK_EXT_PHY2].supported; 8521 } 8522 cfg_size = 2; 8523 break; 8524 } 8525 8526 if (!(sc->port.supported[0] || sc->port.supported[1])) { 8527 PMD_DRV_LOG(ERR, sc, 8528 "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)", 8529 SHMEM_RD(sc, 8530 dev_info.port_hw_config 8531 [port].external_phy_config), 8532 SHMEM_RD(sc, 8533 dev_info.port_hw_config 8534 [port].external_phy_config2)); 8535 return; 8536 } 8537 8538 if (CHIP_IS_E3(sc)) 8539 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 8540 else { 8541 switch (switch_cfg) { 8542 case ELINK_SWITCH_CFG_1G: 8543 sc->port.phy_addr = 8544 REG_RD(sc, 8545 NIG_REG_SERDES0_CTRL_PHY_ADDR + port * 0x10); 8546 break; 8547 case ELINK_SWITCH_CFG_10G: 8548 sc->port.phy_addr = 8549 REG_RD(sc, 8550 NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18); 8551 break; 8552 default: 8553 PMD_DRV_LOG(ERR, sc, 8554 "Invalid switch config in" 8555 "link_config=0x%08x", 8556 sc->port.link_config[0]); 8557 return; 8558 } 8559 } 8560 8561 PMD_DRV_LOG(INFO, sc, "PHY addr 0x%08x", sc->port.phy_addr); 8562 8563 /* mask what we support according to speed_cap_mask per configuration */ 8564 for (idx = 0; idx < cfg_size; idx++) { 8565 if (!(sc->link_params.speed_cap_mask[idx] & 8566 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 8567 sc->port.supported[idx] &= 8568 ~ELINK_SUPPORTED_10baseT_Half; 8569 } 8570 8571 if (!(sc->link_params.speed_cap_mask[idx] & 8572 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 8573 sc->port.supported[idx] &= 8574 ~ELINK_SUPPORTED_10baseT_Full; 8575 } 8576 8577 if (!(sc->link_params.speed_cap_mask[idx] & 8578 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 8579 sc->port.supported[idx] &= 8580 ~ELINK_SUPPORTED_100baseT_Half; 8581 } 8582 8583 if (!(sc->link_params.speed_cap_mask[idx] & 8584 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 8585 sc->port.supported[idx] &= 8586 ~ELINK_SUPPORTED_100baseT_Full; 8587 } 8588 8589 if (!(sc->link_params.speed_cap_mask[idx] & 8590 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 8591 sc->port.supported[idx] &= 8592 ~ELINK_SUPPORTED_1000baseT_Full; 8593 } 8594 8595 if (!(sc->link_params.speed_cap_mask[idx] & 8596 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 8597 sc->port.supported[idx] &= 8598 ~ELINK_SUPPORTED_2500baseX_Full; 8599 } 8600 8601 if (!(sc->link_params.speed_cap_mask[idx] & 8602 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 8603 sc->port.supported[idx] &= 8604 ~ELINK_SUPPORTED_10000baseT_Full; 8605 } 8606 8607 if (!(sc->link_params.speed_cap_mask[idx] & 8608 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 8609 sc->port.supported[idx] &= 8610 ~ELINK_SUPPORTED_20000baseKR2_Full; 8611 } 8612 } 8613 8614 PMD_DRV_LOG(INFO, sc, "PHY supported 0=0x%08x 1=0x%08x", 8615 sc->port.supported[0], sc->port.supported[1]); 8616 } 8617 8618 static void bnx2x_link_settings_requested(struct bnx2x_softc *sc) 8619 { 8620 uint32_t link_config; 8621 uint32_t idx; 8622 uint32_t cfg_size = 0; 8623 8624 sc->port.advertising[0] = 0; 8625 sc->port.advertising[1] = 0; 8626 8627 switch (sc->link_params.num_phys) { 8628 case 1: 8629 case 2: 8630 cfg_size = 1; 8631 break; 8632 case 3: 8633 cfg_size = 2; 8634 break; 8635 } 8636 8637 for (idx = 0; idx < cfg_size; idx++) { 8638 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 8639 link_config = sc->port.link_config[idx]; 8640 8641 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 8642 case PORT_FEATURE_LINK_SPEED_AUTO: 8643 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 8644 sc->link_params.req_line_speed[idx] = 8645 ELINK_SPEED_AUTO_NEG; 8646 sc->port.advertising[idx] |= 8647 sc->port.supported[idx]; 8648 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 8649 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) 8650 sc->port.advertising[idx] |= 8651 (ELINK_SUPPORTED_100baseT_Half | 8652 ELINK_SUPPORTED_100baseT_Full); 8653 } else { 8654 /* force 10G, no AN */ 8655 sc->link_params.req_line_speed[idx] = 8656 ELINK_SPEED_10000; 8657 sc->port.advertising[idx] |= 8658 (ADVERTISED_10000baseT_Full | 8659 ADVERTISED_FIBRE); 8660 continue; 8661 } 8662 break; 8663 8664 case PORT_FEATURE_LINK_SPEED_10M_FULL: 8665 if (sc-> 8666 port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) 8667 { 8668 sc->link_params.req_line_speed[idx] = 8669 ELINK_SPEED_10; 8670 sc->port.advertising[idx] |= 8671 (ADVERTISED_10baseT_Full | ADVERTISED_TP); 8672 } else { 8673 PMD_DRV_LOG(ERR, sc, 8674 "Invalid NVRAM config link_config=0x%08x " 8675 "speed_cap_mask=0x%08x", 8676 link_config, 8677 sc-> 8678 link_params.speed_cap_mask[idx]); 8679 return; 8680 } 8681 break; 8682 8683 case PORT_FEATURE_LINK_SPEED_10M_HALF: 8684 if (sc-> 8685 port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) 8686 { 8687 sc->link_params.req_line_speed[idx] = 8688 ELINK_SPEED_10; 8689 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 8690 sc->port.advertising[idx] |= 8691 (ADVERTISED_10baseT_Half | ADVERTISED_TP); 8692 } else { 8693 PMD_DRV_LOG(ERR, sc, 8694 "Invalid NVRAM config link_config=0x%08x " 8695 "speed_cap_mask=0x%08x", 8696 link_config, 8697 sc-> 8698 link_params.speed_cap_mask[idx]); 8699 return; 8700 } 8701 break; 8702 8703 case PORT_FEATURE_LINK_SPEED_100M_FULL: 8704 if (sc-> 8705 port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) 8706 { 8707 sc->link_params.req_line_speed[idx] = 8708 ELINK_SPEED_100; 8709 sc->port.advertising[idx] |= 8710 (ADVERTISED_100baseT_Full | ADVERTISED_TP); 8711 } else { 8712 PMD_DRV_LOG(ERR, sc, 8713 "Invalid NVRAM config link_config=0x%08x " 8714 "speed_cap_mask=0x%08x", 8715 link_config, 8716 sc-> 8717 link_params.speed_cap_mask[idx]); 8718 return; 8719 } 8720 break; 8721 8722 case PORT_FEATURE_LINK_SPEED_100M_HALF: 8723 if (sc-> 8724 port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) 8725 { 8726 sc->link_params.req_line_speed[idx] = 8727 ELINK_SPEED_100; 8728 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 8729 sc->port.advertising[idx] |= 8730 (ADVERTISED_100baseT_Half | ADVERTISED_TP); 8731 } else { 8732 PMD_DRV_LOG(ERR, sc, 8733 "Invalid NVRAM config link_config=0x%08x " 8734 "speed_cap_mask=0x%08x", 8735 link_config, 8736 sc-> 8737 link_params.speed_cap_mask[idx]); 8738 return; 8739 } 8740 break; 8741 8742 case PORT_FEATURE_LINK_SPEED_1G: 8743 if (sc->port.supported[idx] & 8744 ELINK_SUPPORTED_1000baseT_Full) { 8745 sc->link_params.req_line_speed[idx] = 8746 ELINK_SPEED_1000; 8747 sc->port.advertising[idx] |= 8748 (ADVERTISED_1000baseT_Full | ADVERTISED_TP); 8749 } else { 8750 PMD_DRV_LOG(ERR, sc, 8751 "Invalid NVRAM config link_config=0x%08x " 8752 "speed_cap_mask=0x%08x", 8753 link_config, 8754 sc-> 8755 link_params.speed_cap_mask[idx]); 8756 return; 8757 } 8758 break; 8759 8760 case PORT_FEATURE_LINK_SPEED_2_5G: 8761 if (sc->port.supported[idx] & 8762 ELINK_SUPPORTED_2500baseX_Full) { 8763 sc->link_params.req_line_speed[idx] = 8764 ELINK_SPEED_2500; 8765 sc->port.advertising[idx] |= 8766 (ADVERTISED_2500baseX_Full | ADVERTISED_TP); 8767 } else { 8768 PMD_DRV_LOG(ERR, sc, 8769 "Invalid NVRAM config link_config=0x%08x " 8770 "speed_cap_mask=0x%08x", 8771 link_config, 8772 sc-> 8773 link_params.speed_cap_mask[idx]); 8774 return; 8775 } 8776 break; 8777 8778 case PORT_FEATURE_LINK_SPEED_10G_CX4: 8779 if (sc->port.supported[idx] & 8780 ELINK_SUPPORTED_10000baseT_Full) { 8781 sc->link_params.req_line_speed[idx] = 8782 ELINK_SPEED_10000; 8783 sc->port.advertising[idx] |= 8784 (ADVERTISED_10000baseT_Full | 8785 ADVERTISED_FIBRE); 8786 } else { 8787 PMD_DRV_LOG(ERR, sc, 8788 "Invalid NVRAM config link_config=0x%08x " 8789 "speed_cap_mask=0x%08x", 8790 link_config, 8791 sc-> 8792 link_params.speed_cap_mask[idx]); 8793 return; 8794 } 8795 break; 8796 8797 case PORT_FEATURE_LINK_SPEED_20G: 8798 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 8799 break; 8800 8801 default: 8802 PMD_DRV_LOG(ERR, sc, 8803 "Invalid NVRAM config link_config=0x%08x " 8804 "speed_cap_mask=0x%08x", link_config, 8805 sc->link_params.speed_cap_mask[idx]); 8806 sc->link_params.req_line_speed[idx] = 8807 ELINK_SPEED_AUTO_NEG; 8808 sc->port.advertising[idx] = sc->port.supported[idx]; 8809 break; 8810 } 8811 8812 sc->link_params.req_flow_ctrl[idx] = 8813 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 8814 8815 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 8816 if (! 8817 (sc-> 8818 port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 8819 sc->link_params.req_flow_ctrl[idx] = 8820 ELINK_FLOW_CTRL_NONE; 8821 } else { 8822 bnx2x_set_requested_fc(sc); 8823 } 8824 } 8825 } 8826 } 8827 8828 static void bnx2x_get_phy_info(struct bnx2x_softc *sc) 8829 { 8830 uint8_t port = SC_PORT(sc); 8831 uint32_t eee_mode; 8832 8833 PMD_INIT_FUNC_TRACE(sc); 8834 8835 /* shmem data already read in bnx2x_get_shmem_info() */ 8836 8837 bnx2x_link_settings_supported(sc, sc->link_params.switch_cfg); 8838 bnx2x_link_settings_requested(sc); 8839 8840 /* configure link feature according to nvram value */ 8841 eee_mode = 8842 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) 8843 & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 8844 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 8845 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 8846 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 8847 ELINK_EEE_MODE_ENABLE_LPI | 8848 ELINK_EEE_MODE_OUTPUT_TIME); 8849 } else { 8850 sc->link_params.eee_mode = 0; 8851 } 8852 8853 /* get the media type */ 8854 bnx2x_media_detect(sc); 8855 } 8856 8857 static void bnx2x_set_modes_bitmap(struct bnx2x_softc *sc) 8858 { 8859 uint32_t flags = MODE_ASIC | MODE_PORT2; 8860 8861 if (CHIP_IS_E2(sc)) { 8862 flags |= MODE_E2; 8863 } else if (CHIP_IS_E3(sc)) { 8864 flags |= MODE_E3; 8865 if (CHIP_REV(sc) == CHIP_REV_Ax) { 8866 flags |= MODE_E3_A0; 8867 } else { /*if (CHIP_REV(sc) == CHIP_REV_Bx) */ 8868 8869 flags |= MODE_E3_B0 | MODE_COS3; 8870 } 8871 } 8872 8873 if (IS_MF(sc)) { 8874 flags |= MODE_MF; 8875 switch (sc->devinfo.mf_info.mf_mode) { 8876 case MULTI_FUNCTION_SD: 8877 flags |= MODE_MF_SD; 8878 break; 8879 case MULTI_FUNCTION_SI: 8880 flags |= MODE_MF_SI; 8881 break; 8882 case MULTI_FUNCTION_AFEX: 8883 flags |= MODE_MF_AFEX; 8884 break; 8885 } 8886 } else { 8887 flags |= MODE_SF; 8888 } 8889 8890 #if defined(__LITTLE_ENDIAN) 8891 flags |= MODE_LITTLE_ENDIAN; 8892 #else /* __BIG_ENDIAN */ 8893 flags |= MODE_BIG_ENDIAN; 8894 #endif 8895 8896 INIT_MODE_FLAGS(sc) = flags; 8897 } 8898 8899 int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) 8900 { 8901 struct bnx2x_fastpath *fp; 8902 char buf[32]; 8903 uint32_t i; 8904 8905 if (IS_PF(sc)) { 8906 /************************/ 8907 /* DEFAULT STATUS BLOCK */ 8908 /************************/ 8909 8910 if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block), 8911 &sc->def_sb_dma, "def_sb", 8912 RTE_CACHE_LINE_SIZE) != 0) { 8913 return -1; 8914 } 8915 8916 sc->def_sb = 8917 (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 8918 /***************/ 8919 /* EVENT QUEUE */ 8920 /***************/ 8921 8922 if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, 8923 &sc->eq_dma, "ev_queue", 8924 RTE_CACHE_LINE_SIZE) != 0) { 8925 sc->def_sb = NULL; 8926 return -1; 8927 } 8928 8929 sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr; 8930 8931 /*************/ 8932 /* SLOW PATH */ 8933 /*************/ 8934 8935 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath), 8936 &sc->sp_dma, "sp", 8937 RTE_CACHE_LINE_SIZE) != 0) { 8938 sc->eq = NULL; 8939 sc->def_sb = NULL; 8940 return -1; 8941 } 8942 8943 sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr; 8944 8945 /*******************/ 8946 /* SLOW PATH QUEUE */ 8947 /*******************/ 8948 8949 if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, 8950 &sc->spq_dma, "sp_queue", 8951 RTE_CACHE_LINE_SIZE) != 0) { 8952 sc->sp = NULL; 8953 sc->eq = NULL; 8954 sc->def_sb = NULL; 8955 return -1; 8956 } 8957 8958 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 8959 8960 /***************************/ 8961 /* FW DECOMPRESSION BUFFER */ 8962 /***************************/ 8963 8964 if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 8965 "fw_buf", RTE_CACHE_LINE_SIZE) != 0) { 8966 sc->spq = NULL; 8967 sc->sp = NULL; 8968 sc->eq = NULL; 8969 sc->def_sb = NULL; 8970 return -1; 8971 } 8972 8973 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 8974 } 8975 8976 /*************/ 8977 /* FASTPATHS */ 8978 /*************/ 8979 8980 /* allocate DMA memory for each fastpath structure */ 8981 for (i = 0; i < sc->num_queues; i++) { 8982 fp = &sc->fp[i]; 8983 fp->sc = sc; 8984 fp->index = i; 8985 8986 /*******************/ 8987 /* FP STATUS BLOCK */ 8988 /*******************/ 8989 8990 snprintf(buf, sizeof(buf), "fp_%d_sb", i); 8991 if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block), 8992 &fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) { 8993 PMD_DRV_LOG(NOTICE, sc, "Failed to alloc %s", buf); 8994 return -1; 8995 } else { 8996 if (CHIP_IS_E2E3(sc)) { 8997 fp->status_block.e2_sb = 8998 (struct host_hc_status_block_e2 *) 8999 fp->sb_dma.vaddr; 9000 } else { 9001 fp->status_block.e1x_sb = 9002 (struct host_hc_status_block_e1x *) 9003 fp->sb_dma.vaddr; 9004 } 9005 } 9006 } 9007 9008 return 0; 9009 } 9010 9011 void bnx2x_free_hsi_mem(struct bnx2x_softc *sc) 9012 { 9013 struct bnx2x_fastpath *fp; 9014 int i; 9015 9016 for (i = 0; i < sc->num_queues; i++) { 9017 fp = &sc->fp[i]; 9018 9019 /*******************/ 9020 /* FP STATUS BLOCK */ 9021 /*******************/ 9022 9023 memset(&fp->status_block, 0, sizeof(fp->status_block)); 9024 bnx2x_dma_free(&fp->sb_dma); 9025 } 9026 9027 if (IS_PF(sc)) { 9028 /***************************/ 9029 /* FW DECOMPRESSION BUFFER */ 9030 /***************************/ 9031 9032 bnx2x_dma_free(&sc->gz_buf_dma); 9033 sc->gz_buf = NULL; 9034 9035 /*******************/ 9036 /* SLOW PATH QUEUE */ 9037 /*******************/ 9038 9039 bnx2x_dma_free(&sc->spq_dma); 9040 sc->spq = NULL; 9041 9042 /*************/ 9043 /* SLOW PATH */ 9044 /*************/ 9045 9046 bnx2x_dma_free(&sc->sp_dma); 9047 sc->sp = NULL; 9048 9049 /***************/ 9050 /* EVENT QUEUE */ 9051 /***************/ 9052 9053 bnx2x_dma_free(&sc->eq_dma); 9054 sc->eq = NULL; 9055 9056 /************************/ 9057 /* DEFAULT STATUS BLOCK */ 9058 /************************/ 9059 9060 bnx2x_dma_free(&sc->def_sb_dma); 9061 sc->def_sb = NULL; 9062 } 9063 } 9064 9065 /* 9066 * Previous driver DMAE transaction may have occurred when pre-boot stage 9067 * ended and boot began. This would invalidate the addresses of the 9068 * transaction, resulting in was-error bit set in the PCI causing all 9069 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 9070 * the interrupt which detected this from the pglueb and the was-done bit 9071 */ 9072 static void bnx2x_prev_interrupted_dmae(struct bnx2x_softc *sc) 9073 { 9074 uint32_t val; 9075 9076 if (!CHIP_IS_E1x(sc)) { 9077 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 9078 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9079 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 9080 1 << SC_FUNC(sc)); 9081 } 9082 } 9083 } 9084 9085 static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc) 9086 { 9087 uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 9088 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 9089 if (!rc) { 9090 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 9091 return -1; 9092 } 9093 9094 return 0; 9095 } 9096 9097 static struct bnx2x_prev_list_node *bnx2x_prev_path_get_entry(struct bnx2x_softc *sc) 9098 { 9099 struct bnx2x_prev_list_node *tmp; 9100 9101 LIST_FOREACH(tmp, &bnx2x_prev_list, node) { 9102 if ((sc->pcie_bus == tmp->bus) && 9103 (sc->pcie_device == tmp->slot) && 9104 (SC_PATH(sc) == tmp->path)) { 9105 return tmp; 9106 } 9107 } 9108 9109 return NULL; 9110 } 9111 9112 static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc) 9113 { 9114 struct bnx2x_prev_list_node *tmp; 9115 int rc = FALSE; 9116 9117 rte_spinlock_lock(&bnx2x_prev_mtx); 9118 9119 tmp = bnx2x_prev_path_get_entry(sc); 9120 if (tmp) { 9121 if (tmp->aer) { 9122 PMD_DRV_LOG(DEBUG, sc, 9123 "Path %d/%d/%d was marked by AER", 9124 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9125 } else { 9126 rc = TRUE; 9127 PMD_DRV_LOG(DEBUG, sc, 9128 "Path %d/%d/%d was already cleaned from previous drivers", 9129 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9130 } 9131 } 9132 9133 rte_spinlock_unlock(&bnx2x_prev_mtx); 9134 9135 return rc; 9136 } 9137 9138 static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi) 9139 { 9140 struct bnx2x_prev_list_node *tmp; 9141 9142 rte_spinlock_lock(&bnx2x_prev_mtx); 9143 9144 /* Check whether the entry for this path already exists */ 9145 tmp = bnx2x_prev_path_get_entry(sc); 9146 if (tmp) { 9147 if (!tmp->aer) { 9148 PMD_DRV_LOG(DEBUG, sc, 9149 "Re-marking AER in path %d/%d/%d", 9150 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9151 } else { 9152 PMD_DRV_LOG(DEBUG, sc, 9153 "Removing AER indication from path %d/%d/%d", 9154 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 9155 tmp->aer = 0; 9156 } 9157 9158 rte_spinlock_unlock(&bnx2x_prev_mtx); 9159 return 0; 9160 } 9161 9162 rte_spinlock_unlock(&bnx2x_prev_mtx); 9163 9164 /* Create an entry for this path and add it */ 9165 tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node), 9166 RTE_CACHE_LINE_SIZE); 9167 if (!tmp) { 9168 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate 'bnx2x_prev_list_node'"); 9169 return -1; 9170 } 9171 9172 tmp->bus = sc->pcie_bus; 9173 tmp->slot = sc->pcie_device; 9174 tmp->path = SC_PATH(sc); 9175 tmp->aer = 0; 9176 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 9177 9178 rte_spinlock_lock(&bnx2x_prev_mtx); 9179 9180 LIST_INSERT_HEAD(&bnx2x_prev_list, tmp, node); 9181 9182 rte_spinlock_unlock(&bnx2x_prev_mtx); 9183 9184 return 0; 9185 } 9186 9187 static int bnx2x_do_flr(struct bnx2x_softc *sc) 9188 { 9189 int i; 9190 9191 /* only E2 and onwards support FLR */ 9192 if (CHIP_IS_E1x(sc)) { 9193 PMD_DRV_LOG(WARNING, sc, "FLR not supported in E1H"); 9194 return -1; 9195 } 9196 9197 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 9198 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 9199 PMD_DRV_LOG(WARNING, sc, 9200 "FLR not supported by BC_VER: 0x%08x", 9201 sc->devinfo.bc_ver); 9202 return -1; 9203 } 9204 9205 /* Wait for Transaction Pending bit clean */ 9206 for (i = 0; i < 4; i++) { 9207 if (i) { 9208 DELAY(((1 << (i - 1)) * 100) * 1000); 9209 } 9210 9211 if (!bnx2x_is_pcie_pending(sc)) { 9212 goto clear; 9213 } 9214 } 9215 9216 PMD_DRV_LOG(NOTICE, sc, "PCIE transaction is not cleared, " 9217 "proceeding with reset anyway"); 9218 9219 clear: 9220 bnx2x_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 9221 9222 return 0; 9223 } 9224 9225 struct bnx2x_mac_vals { 9226 uint32_t xmac_addr; 9227 uint32_t xmac_val; 9228 uint32_t emac_addr; 9229 uint32_t emac_val; 9230 uint32_t umac_addr; 9231 uint32_t umac_val; 9232 uint32_t bmac_addr; 9233 uint32_t bmac_val[2]; 9234 }; 9235 9236 static void 9237 bnx2x_prev_unload_close_mac(struct bnx2x_softc *sc, struct bnx2x_mac_vals *vals) 9238 { 9239 uint32_t val, base_addr, offset, mask, reset_reg; 9240 uint8_t mac_stopped = FALSE; 9241 uint8_t port = SC_PORT(sc); 9242 uint32_t wb_data[2]; 9243 9244 /* reset addresses as they also mark which values were changed */ 9245 vals->bmac_addr = 0; 9246 vals->umac_addr = 0; 9247 vals->xmac_addr = 0; 9248 vals->emac_addr = 0; 9249 9250 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 9251 9252 if (!CHIP_IS_E3(sc)) { 9253 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9254 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9255 if ((mask & reset_reg) && val) { 9256 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 9257 : NIG_REG_INGRESS_BMAC0_MEM; 9258 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 9259 : BIGMAC_REGISTER_BMAC_CONTROL; 9260 9261 /* 9262 * use rd/wr since we cannot use dmae. This is safe 9263 * since MCP won't access the bus due to the request 9264 * to unload, and no function on the path can be 9265 * loaded at this time. 9266 */ 9267 wb_data[0] = REG_RD(sc, base_addr + offset); 9268 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 9269 vals->bmac_addr = base_addr + offset; 9270 vals->bmac_val[0] = wb_data[0]; 9271 vals->bmac_val[1] = wb_data[1]; 9272 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 9273 REG_WR(sc, vals->bmac_addr, wb_data[0]); 9274 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 9275 } 9276 9277 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc) * 4; 9278 vals->emac_val = REG_RD(sc, vals->emac_addr); 9279 REG_WR(sc, vals->emac_addr, 0); 9280 mac_stopped = TRUE; 9281 } else { 9282 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9283 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9284 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 9285 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, 9286 val & ~(1 << 1)); 9287 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, 9288 val | (1 << 1)); 9289 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 9290 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 9291 REG_WR(sc, vals->xmac_addr, 0); 9292 mac_stopped = TRUE; 9293 } 9294 9295 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9296 if (mask & reset_reg) { 9297 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9298 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 9299 vals->umac_val = REG_RD(sc, vals->umac_addr); 9300 REG_WR(sc, vals->umac_addr, 0); 9301 mac_stopped = TRUE; 9302 } 9303 } 9304 9305 if (mac_stopped) { 9306 DELAY(20000); 9307 } 9308 } 9309 9310 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9311 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9312 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9313 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9314 9315 static void 9316 bnx2x_prev_unload_undi_inc(struct bnx2x_softc *sc, uint8_t port, uint8_t inc) 9317 { 9318 uint16_t rcq, bd; 9319 uint32_t tmp_reg = REG_RD(sc, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9320 9321 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9322 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9323 9324 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9325 REG_WR(sc, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9326 } 9327 9328 static int bnx2x_prev_unload_common(struct bnx2x_softc *sc) 9329 { 9330 uint32_t reset_reg, tmp_reg = 0, rc; 9331 uint8_t prev_undi = FALSE; 9332 struct bnx2x_mac_vals mac_vals; 9333 uint32_t timer_count = 1000; 9334 uint32_t prev_brb; 9335 9336 /* 9337 * It is possible a previous function received 'common' answer, 9338 * but hasn't loaded yet, therefore creating a scenario of 9339 * multiple functions receiving 'common' on the same path. 9340 */ 9341 memset(&mac_vals, 0, sizeof(mac_vals)); 9342 9343 if (bnx2x_prev_is_path_marked(sc)) { 9344 return bnx2x_prev_mcp_done(sc); 9345 } 9346 9347 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 9348 9349 /* Reset should be performed after BRB is emptied */ 9350 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 9351 /* Close the MAC Rx to prevent BRB from filling up */ 9352 bnx2x_prev_unload_close_mac(sc, &mac_vals); 9353 9354 /* close LLH filters towards the BRB */ 9355 elink_set_rx_filter(&sc->link_params, 0); 9356 9357 /* 9358 * Check if the UNDI driver was previously loaded. 9359 * UNDI driver initializes CID offset for normal bell to 0x7 9360 */ 9361 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 9362 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 9363 if (tmp_reg == 0x7) { 9364 PMD_DRV_LOG(DEBUG, sc, "UNDI previously loaded"); 9365 prev_undi = TRUE; 9366 /* clear the UNDI indication */ 9367 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 9368 /* clear possible idle check errors */ 9369 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 9370 } 9371 } 9372 9373 /* wait until BRB is empty */ 9374 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 9375 while (timer_count) { 9376 prev_brb = tmp_reg; 9377 9378 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 9379 if (!tmp_reg) { 9380 break; 9381 } 9382 9383 PMD_DRV_LOG(DEBUG, sc, "BRB still has 0x%08x", tmp_reg); 9384 9385 /* reset timer as long as BRB actually gets emptied */ 9386 if (prev_brb > tmp_reg) { 9387 timer_count = 1000; 9388 } else { 9389 timer_count--; 9390 } 9391 9392 /* If UNDI resides in memory, manually increment it */ 9393 if (prev_undi) { 9394 bnx2x_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 9395 } 9396 9397 DELAY(10); 9398 } 9399 9400 if (!timer_count) { 9401 PMD_DRV_LOG(NOTICE, sc, "Failed to empty BRB"); 9402 } 9403 } 9404 9405 /* No packets are in the pipeline, path is ready for reset */ 9406 bnx2x_reset_common(sc); 9407 9408 if (mac_vals.xmac_addr) { 9409 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 9410 } 9411 if (mac_vals.umac_addr) { 9412 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 9413 } 9414 if (mac_vals.emac_addr) { 9415 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 9416 } 9417 if (mac_vals.bmac_addr) { 9418 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 9419 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 9420 } 9421 9422 rc = bnx2x_prev_mark_path(sc, prev_undi); 9423 if (rc) { 9424 bnx2x_prev_mcp_done(sc); 9425 return rc; 9426 } 9427 9428 return bnx2x_prev_mcp_done(sc); 9429 } 9430 9431 static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc) 9432 { 9433 int rc; 9434 9435 /* Test if previous unload process was already finished for this path */ 9436 if (bnx2x_prev_is_path_marked(sc)) { 9437 return bnx2x_prev_mcp_done(sc); 9438 } 9439 9440 /* 9441 * If function has FLR capabilities, and existing FW version matches 9442 * the one required, then FLR will be sufficient to clean any residue 9443 * left by previous driver 9444 */ 9445 rc = bnx2x_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 9446 if (!rc) { 9447 /* fw version is good */ 9448 rc = bnx2x_do_flr(sc); 9449 } 9450 9451 if (!rc) { 9452 /* FLR was performed */ 9453 return 0; 9454 } 9455 9456 PMD_DRV_LOG(INFO, sc, "Could not FLR"); 9457 9458 /* Close the MCP request, return failure */ 9459 rc = bnx2x_prev_mcp_done(sc); 9460 if (!rc) { 9461 rc = BNX2X_PREV_WAIT_NEEDED; 9462 } 9463 9464 return rc; 9465 } 9466 9467 static int bnx2x_prev_unload(struct bnx2x_softc *sc) 9468 { 9469 int time_counter = 10; 9470 uint32_t fw, hw_lock_reg, hw_lock_val; 9471 uint32_t rc = 0; 9472 9473 PMD_INIT_FUNC_TRACE(sc); 9474 9475 /* 9476 * Clear HW from errors which may have resulted from an interrupted 9477 * DMAE transaction. 9478 */ 9479 bnx2x_prev_interrupted_dmae(sc); 9480 9481 /* Release previously held locks */ 9482 hw_lock_reg = (SC_FUNC(sc) <= 5) ? 9483 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 9484 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 9485 9486 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 9487 if (hw_lock_val) { 9488 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 9489 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock\n"); 9490 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 9491 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 9492 } 9493 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock\n"); 9494 REG_WR(sc, hw_lock_reg, 0xffffffff); 9495 } 9496 9497 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 9498 PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR\n"); 9499 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 9500 } 9501 9502 do { 9503 /* Lock MCP using an unload request */ 9504 fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 9505 if (!fw) { 9506 PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); 9507 rc = -1; 9508 break; 9509 } 9510 9511 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 9512 rc = bnx2x_prev_unload_common(sc); 9513 break; 9514 } 9515 9516 /* non-common reply from MCP might require looping */ 9517 rc = bnx2x_prev_unload_uncommon(sc); 9518 if (rc != BNX2X_PREV_WAIT_NEEDED) { 9519 break; 9520 } 9521 9522 DELAY(20000); 9523 } while (--time_counter); 9524 9525 if (!time_counter || rc) { 9526 PMD_DRV_LOG(NOTICE, sc, "Failed to unload previous driver!"); 9527 rc = -1; 9528 } 9529 9530 return rc; 9531 } 9532 9533 static void 9534 bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) 9535 { 9536 if (!CHIP_IS_E1x(sc)) { 9537 sc->dcb_state = dcb_on; 9538 sc->dcbx_enabled = dcbx_enabled; 9539 } else { 9540 sc->dcb_state = FALSE; 9541 sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; 9542 } 9543 PMD_DRV_LOG(DEBUG, sc, 9544 "DCB state [%s:%s]", 9545 dcb_on ? "ON" : "OFF", 9546 (dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" : 9547 (dcbx_enabled == 9548 BNX2X_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" 9549 : (dcbx_enabled == 9550 BNX2X_DCBX_ENABLED_ON_NEG_ON) ? 9551 "on-chip with negotiation" : "invalid"); 9552 } 9553 9554 static int bnx2x_set_qm_cid_count(struct bnx2x_softc *sc) 9555 { 9556 int cid_count = BNX2X_L2_MAX_CID(sc); 9557 9558 if (CNIC_SUPPORT(sc)) { 9559 cid_count += CNIC_CID_MAX; 9560 } 9561 9562 return roundup(cid_count, QM_CID_ROUND); 9563 } 9564 9565 static void bnx2x_init_multi_cos(struct bnx2x_softc *sc) 9566 { 9567 int pri, cos; 9568 9569 uint32_t pri_map = 0; 9570 9571 for (pri = 0; pri < BNX2X_MAX_PRIORITY; pri++) { 9572 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 9573 if (cos < sc->max_cos) { 9574 sc->prio_to_cos[pri] = cos; 9575 } else { 9576 PMD_DRV_LOG(WARNING, sc, 9577 "Invalid COS %d for priority %d " 9578 "(max COS is %d), setting to 0", cos, pri, 9579 (sc->max_cos - 1)); 9580 sc->prio_to_cos[pri] = 0; 9581 } 9582 } 9583 } 9584 9585 static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) 9586 { 9587 struct { 9588 uint8_t id; 9589 uint8_t next; 9590 } pci_cap; 9591 uint16_t status; 9592 struct bnx2x_pci_cap *cap; 9593 9594 cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap), 9595 RTE_CACHE_LINE_SIZE); 9596 if (!cap) { 9597 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); 9598 return -ENOMEM; 9599 } 9600 9601 #ifndef RTE_EXEC_ENV_FREEBSD 9602 pci_read(sc, PCI_STATUS, &status, 2); 9603 if (!(status & PCI_STATUS_CAP_LIST)) { 9604 #else 9605 pci_read(sc, PCIR_STATUS, &status, 2); 9606 if (!(status & PCIM_STATUS_CAPPRESENT)) { 9607 #endif 9608 PMD_DRV_LOG(NOTICE, sc, "PCIe capability reading failed"); 9609 return -1; 9610 } 9611 9612 #ifndef RTE_EXEC_ENV_FREEBSD 9613 pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1); 9614 #else 9615 pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1); 9616 #endif 9617 while (pci_cap.next) { 9618 cap->addr = pci_cap.next & ~3; 9619 pci_read(sc, pci_cap.next & ~3, &pci_cap, 2); 9620 if (pci_cap.id == 0xff) 9621 break; 9622 cap->id = pci_cap.id; 9623 cap->type = BNX2X_PCI_CAP; 9624 cap->next = rte_zmalloc("pci_cap", 9625 sizeof(struct bnx2x_pci_cap), 9626 RTE_CACHE_LINE_SIZE); 9627 if (!cap->next) { 9628 PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); 9629 return -ENOMEM; 9630 } 9631 cap = cap->next; 9632 } 9633 9634 return 0; 9635 } 9636 9637 static void bnx2x_init_rte(struct bnx2x_softc *sc) 9638 { 9639 if (IS_VF(sc)) { 9640 sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, 9641 sc->igu_sb_cnt); 9642 sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, 9643 sc->igu_sb_cnt); 9644 } else { 9645 sc->max_rx_queues = BNX2X_MAX_RSS_COUNT(sc); 9646 sc->max_tx_queues = sc->max_rx_queues; 9647 } 9648 } 9649 9650 #define FW_HEADER_LEN 104 9651 #define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.13.11.0.fw" 9652 #define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.13.11.0.fw" 9653 9654 void bnx2x_load_firmware(struct bnx2x_softc *sc) 9655 { 9656 const char *fwname; 9657 int f; 9658 struct stat st; 9659 9660 fwname = sc->devinfo.device_id == CHIP_NUM_57711 9661 ? FW_NAME_57711 : FW_NAME_57810; 9662 f = open(fwname, O_RDONLY); 9663 if (f < 0) { 9664 PMD_DRV_LOG(NOTICE, sc, "Can't open firmware file"); 9665 return; 9666 } 9667 9668 if (fstat(f, &st) < 0) { 9669 PMD_DRV_LOG(NOTICE, sc, "Can't stat firmware file"); 9670 close(f); 9671 return; 9672 } 9673 9674 sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE); 9675 if (!sc->firmware) { 9676 PMD_DRV_LOG(NOTICE, sc, "Can't allocate memory for firmware"); 9677 close(f); 9678 return; 9679 } 9680 9681 if (read(f, sc->firmware, st.st_size) != st.st_size) { 9682 PMD_DRV_LOG(NOTICE, sc, "Can't read firmware data"); 9683 close(f); 9684 return; 9685 } 9686 close(f); 9687 9688 sc->fw_len = st.st_size; 9689 if (sc->fw_len < FW_HEADER_LEN) { 9690 PMD_DRV_LOG(NOTICE, sc, 9691 "Invalid fw size: %" PRIu64, sc->fw_len); 9692 return; 9693 } 9694 PMD_DRV_LOG(DEBUG, sc, "fw_len = %" PRIu64, sc->fw_len); 9695 } 9696 9697 static void 9698 bnx2x_data_to_init_ops(uint8_t * data, struct raw_op *dst, uint32_t len) 9699 { 9700 uint32_t *src = (uint32_t *) data; 9701 uint32_t i, j, tmp; 9702 9703 for (i = 0, j = 0; i < len / 8; ++i, j += 2) { 9704 tmp = rte_be_to_cpu_32(src[j]); 9705 dst[i].op = (tmp >> 24) & 0xFF; 9706 dst[i].offset = tmp & 0xFFFFFF; 9707 dst[i].raw_data = rte_be_to_cpu_32(src[j + 1]); 9708 } 9709 } 9710 9711 static void 9712 bnx2x_data_to_init_offsets(uint8_t * data, uint16_t * dst, uint32_t len) 9713 { 9714 uint16_t *src = (uint16_t *) data; 9715 uint32_t i; 9716 9717 for (i = 0; i < len / 2; ++i) 9718 dst[i] = rte_be_to_cpu_16(src[i]); 9719 } 9720 9721 static void bnx2x_data_to_init_data(uint8_t * data, uint32_t * dst, uint32_t len) 9722 { 9723 uint32_t *src = (uint32_t *) data; 9724 uint32_t i; 9725 9726 for (i = 0; i < len / 4; ++i) 9727 dst[i] = rte_be_to_cpu_32(src[i]); 9728 } 9729 9730 static void bnx2x_data_to_iro_array(uint8_t * data, struct iro *dst, uint32_t len) 9731 { 9732 uint32_t *src = (uint32_t *) data; 9733 uint32_t i, j, tmp; 9734 9735 for (i = 0, j = 0; i < len / sizeof(struct iro); ++i, ++j) { 9736 dst[i].base = rte_be_to_cpu_32(src[j++]); 9737 tmp = rte_be_to_cpu_32(src[j]); 9738 dst[i].m1 = (tmp >> 16) & 0xFFFF; 9739 dst[i].m2 = tmp & 0xFFFF; 9740 ++j; 9741 tmp = rte_be_to_cpu_32(src[j]); 9742 dst[i].m3 = (tmp >> 16) & 0xFFFF; 9743 dst[i].size = tmp & 0xFFFF; 9744 } 9745 } 9746 9747 /* 9748 * Device attach function. 9749 * 9750 * Allocates device resources, performs secondary chip identification, and 9751 * initializes driver instance variables. This function is called from driver 9752 * load after a successful probe. 9753 * 9754 * Returns: 9755 * 0 = Success, >0 = Failure 9756 */ 9757 int bnx2x_attach(struct bnx2x_softc *sc) 9758 { 9759 int rc; 9760 9761 PMD_DRV_LOG(DEBUG, sc, "Starting attach..."); 9762 9763 rc = bnx2x_pci_get_caps(sc); 9764 if (rc) { 9765 PMD_DRV_LOG(NOTICE, sc, "PCIe caps reading was failed"); 9766 return rc; 9767 } 9768 9769 sc->state = BNX2X_STATE_CLOSED; 9770 9771 pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); 9772 9773 sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 9774 9775 /* get PCI capabilites */ 9776 bnx2x_probe_pci_caps(sc); 9777 9778 if (sc->devinfo.pcie_msix_cap_reg != 0) { 9779 uint32_t val; 9780 pci_read(sc, 9781 (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val, 9782 2); 9783 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1; 9784 } else { 9785 sc->igu_sb_cnt = 1; 9786 } 9787 9788 /* Init RTE stuff */ 9789 bnx2x_init_rte(sc); 9790 9791 if (IS_PF(sc)) { 9792 /* Enable internal target-read (in case we are probed after PF 9793 * FLR). Must be done prior to any BAR read access. Only for 9794 * 57712 and up 9795 */ 9796 if (!CHIP_IS_E1x(sc)) { 9797 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 9798 1); 9799 DELAY(200000); 9800 } 9801 9802 /* get device info and set params */ 9803 if (bnx2x_get_device_info(sc) != 0) { 9804 PMD_DRV_LOG(NOTICE, sc, "getting device info"); 9805 return -ENXIO; 9806 } 9807 9808 /* get phy settings from shmem and 'and' against admin settings */ 9809 bnx2x_get_phy_info(sc); 9810 } else { 9811 /* Left mac of VF unfilled, PF should set it for VF */ 9812 memset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN); 9813 } 9814 9815 sc->wol = 0; 9816 9817 /* set the default MTU (changed via ifconfig) */ 9818 sc->mtu = RTE_ETHER_MTU; 9819 9820 bnx2x_set_modes_bitmap(sc); 9821 9822 /* need to reset chip if UNDI was active */ 9823 if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { 9824 /* init fw_seq */ 9825 sc->fw_seq = 9826 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 9827 DRV_MSG_SEQ_NUMBER_MASK); 9828 PMD_DRV_LOG(DEBUG, sc, "prev unload fw_seq 0x%04x", 9829 sc->fw_seq); 9830 bnx2x_prev_unload(sc); 9831 } 9832 9833 bnx2x_dcbx_set_state(sc, FALSE, BNX2X_DCBX_ENABLED_OFF); 9834 9835 /* calculate qm_cid_count */ 9836 sc->qm_cid_count = bnx2x_set_qm_cid_count(sc); 9837 9838 sc->max_cos = 1; 9839 bnx2x_init_multi_cos(sc); 9840 9841 return 0; 9842 } 9843 9844 static void 9845 bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment, 9846 uint16_t index, uint8_t op, uint8_t update) 9847 { 9848 uint32_t igu_addr = sc->igu_base_addr; 9849 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 9850 bnx2x_igu_ack_sb_gen(sc, segment, index, op, update, igu_addr); 9851 } 9852 9853 static void 9854 bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, 9855 uint16_t index, uint8_t op, uint8_t update) 9856 { 9857 if (unlikely(sc->devinfo.int_block == INT_BLOCK_HC)) 9858 bnx2x_hc_ack_sb(sc, igu_sb_id, storm, index, op, update); 9859 else { 9860 uint8_t segment; 9861 if (CHIP_INT_MODE_IS_BC(sc)) { 9862 segment = storm; 9863 } else if (igu_sb_id != sc->igu_dsb_id) { 9864 segment = IGU_SEG_ACCESS_DEF; 9865 } else if (storm == ATTENTION_ID) { 9866 segment = IGU_SEG_ACCESS_ATTN; 9867 } else { 9868 segment = IGU_SEG_ACCESS_DEF; 9869 } 9870 bnx2x_igu_ack_sb(sc, igu_sb_id, segment, index, op, update); 9871 } 9872 } 9873 9874 static void 9875 bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id, 9876 uint8_t is_pf) 9877 { 9878 uint32_t data, ctl, cnt = 100; 9879 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 9880 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 9881 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + 9882 (idu_sb_id / 32) * 4; 9883 uint32_t sb_bit = 1 << (idu_sb_id % 32); 9884 uint32_t func_encode = func | 9885 (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 9886 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 9887 9888 /* Not supported in BC mode */ 9889 if (CHIP_INT_MODE_IS_BC(sc)) { 9890 return; 9891 } 9892 9893 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 9894 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 9895 IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); 9896 9897 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 9898 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 9899 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 9900 9901 REG_WR(sc, igu_addr_data, data); 9902 9903 mb(); 9904 9905 PMD_DRV_LOG(DEBUG, sc, "write 0x%08x to IGU(via GRC) addr 0x%x", 9906 ctl, igu_addr_ctl); 9907 REG_WR(sc, igu_addr_ctl, ctl); 9908 9909 mb(); 9910 9911 /* wait for clean up to finish */ 9912 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 9913 DELAY(20000); 9914 } 9915 9916 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 9917 PMD_DRV_LOG(DEBUG, sc, 9918 "Unable to finish IGU cleanup: " 9919 "idu_sb_id %d offset %d bit %d (cnt %d)", 9920 idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt); 9921 } 9922 } 9923 9924 static void bnx2x_igu_clear_sb(struct bnx2x_softc *sc, uint8_t idu_sb_id) 9925 { 9926 bnx2x_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 9927 } 9928 9929 /*******************/ 9930 /* ECORE CALLBACKS */ 9931 /*******************/ 9932 9933 static void bnx2x_reset_common(struct bnx2x_softc *sc) 9934 { 9935 uint32_t val = 0x1400; 9936 9937 PMD_INIT_FUNC_TRACE(sc); 9938 9939 /* reset_common */ 9940 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 9941 0xd3ffff7f); 9942 9943 if (CHIP_IS_E3(sc)) { 9944 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 9945 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 9946 } 9947 9948 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 9949 } 9950 9951 static void bnx2x_common_init_phy(struct bnx2x_softc *sc) 9952 { 9953 uint32_t shmem_base[2]; 9954 uint32_t shmem2_base[2]; 9955 9956 /* Avoid common init in case MFW supports LFA */ 9957 if (SHMEM2_RD(sc, size) > 9958 (uint32_t) offsetof(struct shmem2_region, 9959 lfa_host_addr[SC_PORT(sc)])) { 9960 return; 9961 } 9962 9963 shmem_base[0] = sc->devinfo.shmem_base; 9964 shmem2_base[0] = sc->devinfo.shmem2_base; 9965 9966 if (!CHIP_IS_E1x(sc)) { 9967 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 9968 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 9969 } 9970 9971 bnx2x_acquire_phy_lock(sc); 9972 elink_common_init_phy(sc, shmem_base, shmem2_base, 9973 sc->devinfo.chip_id, 0); 9974 bnx2x_release_phy_lock(sc); 9975 } 9976 9977 static void bnx2x_pf_disable(struct bnx2x_softc *sc) 9978 { 9979 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 9980 9981 val &= ~IGU_PF_CONF_FUNC_EN; 9982 9983 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 9984 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 9985 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 9986 } 9987 9988 static void bnx2x_init_pxp(struct bnx2x_softc *sc) 9989 { 9990 uint16_t devctl; 9991 int r_order, w_order; 9992 9993 devctl = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL); 9994 9995 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 9996 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 9997 9998 ecore_init_pxp_arb(sc, r_order, w_order); 9999 } 10000 10001 static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc *sc) 10002 { 10003 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 10004 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 10005 return base + (SC_ABS_FUNC(sc)) * stride; 10006 } 10007 10008 /* 10009 * Called only on E1H or E2. 10010 * When pretending to be PF, the pretend value is the function number 0..7. 10011 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 10012 * combination. 10013 */ 10014 static int bnx2x_pretend_func(struct bnx2x_softc *sc, uint16_t pretend_func_val) 10015 { 10016 uint32_t pretend_reg; 10017 10018 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) 10019 return -1; 10020 10021 /* get my own pretend register */ 10022 pretend_reg = bnx2x_get_pretend_reg(sc); 10023 REG_WR(sc, pretend_reg, pretend_func_val); 10024 REG_RD(sc, pretend_reg); 10025 return 0; 10026 } 10027 10028 static void bnx2x_setup_fan_failure_detection(struct bnx2x_softc *sc) 10029 { 10030 int is_required; 10031 uint32_t val; 10032 int port; 10033 10034 is_required = 0; 10035 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 10036 SHARED_HW_CFG_FAN_FAILURE_MASK); 10037 10038 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 10039 is_required = 1; 10040 } 10041 /* 10042 * The fan failure mechanism is usually related to the PHY type since 10043 * the power consumption of the board is affected by the PHY. Currently, 10044 * fan is required for most designs with SFX7101, BNX2X8727 and BNX2X8481. 10045 */ 10046 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 10047 for (port = PORT_0; port < PORT_MAX; port++) { 10048 is_required |= elink_fan_failure_det_req(sc, 10049 sc-> 10050 devinfo.shmem_base, 10051 sc-> 10052 devinfo.shmem2_base, 10053 port); 10054 } 10055 } 10056 10057 if (is_required == 0) { 10058 return; 10059 } 10060 10061 /* Fan failure is indicated by SPIO 5 */ 10062 bnx2x_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 10063 10064 /* set to active low mode */ 10065 val = REG_RD(sc, MISC_REG_SPIO_INT); 10066 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 10067 REG_WR(sc, MISC_REG_SPIO_INT, val); 10068 10069 /* enable interrupt to signal the IGU */ 10070 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 10071 val |= MISC_SPIO_SPIO5; 10072 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 10073 } 10074 10075 static void bnx2x_enable_blocks_attention(struct bnx2x_softc *sc) 10076 { 10077 uint32_t val; 10078 10079 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 10080 if (!CHIP_IS_E1x(sc)) { 10081 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 10082 } else { 10083 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 10084 } 10085 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 10086 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 10087 /* 10088 * mask read length error interrupts in brb for parser 10089 * (parsing unit and 'checksum and crc' unit) 10090 * these errors are legal (PU reads fixed length and CAC can cause 10091 * read length error on truncated packets) 10092 */ 10093 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 10094 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 10095 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 10096 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 10097 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 10098 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 10099 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 10100 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 10101 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 10102 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 10103 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 10104 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 10105 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 10106 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 10107 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 10108 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 10109 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 10110 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 10111 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 10112 10113 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 10114 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 10115 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 10116 if (!CHIP_IS_E1x(sc)) { 10117 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 10118 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 10119 } 10120 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 10121 10122 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 10123 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 10124 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 10125 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 10126 10127 if (!CHIP_IS_E1x(sc)) { 10128 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 10129 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 10130 } 10131 10132 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 10133 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 10134 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 10135 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 10136 } 10137 10138 /** 10139 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 10140 * 10141 * @sc: driver handle 10142 */ 10143 static int bnx2x_init_hw_common(struct bnx2x_softc *sc) 10144 { 10145 uint8_t abs_func_id; 10146 uint32_t val; 10147 10148 PMD_DRV_LOG(DEBUG, sc, 10149 "starting common init for func %d", SC_ABS_FUNC(sc)); 10150 10151 /* 10152 * take the RESET lock to protect undi_unload flow from accessing 10153 * registers while we are resetting the chip 10154 */ 10155 bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 10156 10157 bnx2x_reset_common(sc); 10158 10159 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 10160 10161 val = 0xfffc; 10162 if (CHIP_IS_E3(sc)) { 10163 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 10164 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 10165 } 10166 10167 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 10168 10169 bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 10170 10171 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 10172 10173 if (!CHIP_IS_E1x(sc)) { 10174 /* 10175 * 4-port mode or 2-port mode we need to turn off master-enable for 10176 * everyone. After that we turn it back on for self. So, we disregard 10177 * multi-function, and always disable all functions on the given path, 10178 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 10179 */ 10180 for (abs_func_id = SC_PATH(sc); 10181 abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { 10182 if (abs_func_id == SC_ABS_FUNC(sc)) { 10183 REG_WR(sc, 10184 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 10185 1); 10186 continue; 10187 } 10188 10189 bnx2x_pretend_func(sc, abs_func_id); 10190 10191 /* clear pf enable */ 10192 bnx2x_pf_disable(sc); 10193 10194 bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); 10195 } 10196 } 10197 10198 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 10199 10200 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 10201 bnx2x_init_pxp(sc); 10202 10203 #ifdef __BIG_ENDIAN 10204 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 10205 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 10206 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 10207 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 10208 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 10209 /* make sure this value is 0 */ 10210 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 10211 10212 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 10213 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 10214 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 10215 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 10216 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 10217 #endif 10218 10219 ecore_ilt_init_page_size(sc, INITOP_SET); 10220 10221 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 10222 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 10223 } 10224 10225 /* let the HW do it's magic... */ 10226 DELAY(100000); 10227 10228 /* finish PXP init */ 10229 10230 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 10231 if (val != 1) { 10232 PMD_DRV_LOG(NOTICE, sc, "PXP2 CFG failed"); 10233 return -1; 10234 } 10235 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 10236 if (val != 1) { 10237 PMD_DRV_LOG(NOTICE, sc, "PXP2 RD_INIT failed"); 10238 return -1; 10239 } 10240 10241 /* 10242 * Timer bug workaround for E2 only. We need to set the entire ILT to have 10243 * entries with value "0" and valid bit on. This needs to be done by the 10244 * first PF that is loaded in a path (i.e. common phase) 10245 */ 10246 if (!CHIP_IS_E1x(sc)) { 10247 /* 10248 * In E2 there is a bug in the timers block that can cause function 6 / 7 10249 * (i.e. vnic3) to start even if it is marked as "scan-off". 10250 * This occurs when a different function (func2,3) is being marked 10251 * as "scan-off". Real-life scenario for example: if a driver is being 10252 * load-unloaded while func6,7 are down. This will cause the timer to access 10253 * the ilt, translate to a logical address and send a request to read/write. 10254 * Since the ilt for the function that is down is not valid, this will cause 10255 * a translation error which is unrecoverable. 10256 * The Workaround is intended to make sure that when this happens nothing 10257 * fatal will occur. The workaround: 10258 * 1. First PF driver which loads on a path will: 10259 * a. After taking the chip out of reset, by using pretend, 10260 * it will write "0" to the following registers of 10261 * the other vnics. 10262 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10263 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 10264 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 10265 * And for itself it will write '1' to 10266 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 10267 * dmae-operations (writing to pram for example.) 10268 * note: can be done for only function 6,7 but cleaner this 10269 * way. 10270 * b. Write zero+valid to the entire ILT. 10271 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 10272 * VNIC3 (of that port). The range allocated will be the 10273 * entire ILT. This is needed to prevent ILT range error. 10274 * 2. Any PF driver load flow: 10275 * a. ILT update with the physical addresses of the allocated 10276 * logical pages. 10277 * b. Wait 20msec. - note that this timeout is needed to make 10278 * sure there are no requests in one of the PXP internal 10279 * queues with "old" ILT addresses. 10280 * c. PF enable in the PGLC. 10281 * d. Clear the was_error of the PF in the PGLC. (could have 10282 * occurred while driver was down) 10283 * e. PF enable in the CFC (WEAK + STRONG) 10284 * f. Timers scan enable 10285 * 3. PF driver unload flow: 10286 * a. Clear the Timers scan_en. 10287 * b. Polling for scan_on=0 for that PF. 10288 * c. Clear the PF enable bit in the PXP. 10289 * d. Clear the PF enable in the CFC (WEAK + STRONG) 10290 * e. Write zero+valid to all ILT entries (The valid bit must 10291 * stay set) 10292 * f. If this is VNIC 3 of a port then also init 10293 * first_timers_ilt_entry to zero and last_timers_ilt_entry 10294 * to the last enrty in the ILT. 10295 * 10296 * Notes: 10297 * Currently the PF error in the PGLC is non recoverable. 10298 * In the future the there will be a recovery routine for this error. 10299 * Currently attention is masked. 10300 * Having an MCP lock on the load/unload process does not guarantee that 10301 * there is no Timer disable during Func6/7 enable. This is because the 10302 * Timers scan is currently being cleared by the MCP on FLR. 10303 * Step 2.d can be done only for PF6/7 and the driver can also check if 10304 * there is error before clearing it. But the flow above is simpler and 10305 * more general. 10306 * All ILT entries are written by zero+valid and not just PF6/7 10307 * ILT entries since in the future the ILT entries allocation for 10308 * PF-s might be dynamic. 10309 */ 10310 struct ilt_client_info ilt_cli; 10311 struct ecore_ilt ilt; 10312 10313 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 10314 memset(&ilt, 0, sizeof(struct ecore_ilt)); 10315 10316 /* initialize dummy TM client */ 10317 ilt_cli.start = 0; 10318 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 10319 ilt_cli.client_num = ILT_CLIENT_TM; 10320 10321 /* 10322 * Step 1: set zeroes to all ilt page entries with valid bit on 10323 * Step 2: set the timers first/last ilt entry to point 10324 * to the entire range to prevent ILT range error for 3rd/4th 10325 * vnic (this code assumes existence of the vnic) 10326 * 10327 * both steps performed by call to ecore_ilt_client_init_op() 10328 * with dummy TM client 10329 * 10330 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 10331 * and his brother are split registers 10332 */ 10333 10334 bnx2x_pretend_func(sc, (SC_PATH(sc) + 6)); 10335 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 10336 bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); 10337 10338 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 10339 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 10340 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 10341 } 10342 10343 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 10344 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 10345 10346 if (!CHIP_IS_E1x(sc)) { 10347 int factor = 0; 10348 10349 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 10350 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 10351 10352 /* let the HW do it's magic... */ 10353 do { 10354 DELAY(200000); 10355 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 10356 } while (factor-- && (val != 1)); 10357 10358 if (val != 1) { 10359 PMD_DRV_LOG(NOTICE, sc, "ATC_INIT failed"); 10360 return -1; 10361 } 10362 } 10363 10364 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 10365 10366 /* clean the DMAE memory */ 10367 sc->dmae_ready = 1; 10368 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 10369 10370 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 10371 10372 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 10373 10374 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 10375 10376 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 10377 10378 bnx2x_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 10379 bnx2x_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 10380 bnx2x_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 10381 bnx2x_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 10382 10383 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 10384 10385 /* QM queues pointers table */ 10386 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 10387 10388 /* soft reset pulse */ 10389 REG_WR(sc, QM_REG_SOFT_RESET, 1); 10390 REG_WR(sc, QM_REG_SOFT_RESET, 0); 10391 10392 if (CNIC_SUPPORT(sc)) 10393 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 10394 10395 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 10396 10397 if (!CHIP_REV_IS_SLOW(sc)) { 10398 /* enable hw interrupt from doorbell Q */ 10399 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 10400 } 10401 10402 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 10403 10404 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 10405 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 10406 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 10407 10408 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 10409 if (IS_MF_AFEX(sc)) { 10410 /* 10411 * configure that AFEX and VLAN headers must be 10412 * received in AFEX mode 10413 */ 10414 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 10415 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 10416 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 10417 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 10418 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 10419 } else { 10420 /* 10421 * Bit-map indicating which L2 hdrs may appear 10422 * after the basic Ethernet header 10423 */ 10424 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 10425 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 10426 } 10427 } 10428 10429 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 10430 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 10431 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 10432 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 10433 10434 if (!CHIP_IS_E1x(sc)) { 10435 /* reset VFC memories */ 10436 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 10437 VFC_MEMORIES_RST_REG_CAM_RST | 10438 VFC_MEMORIES_RST_REG_RAM_RST); 10439 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 10440 VFC_MEMORIES_RST_REG_CAM_RST | 10441 VFC_MEMORIES_RST_REG_RAM_RST); 10442 10443 DELAY(20000); 10444 } 10445 10446 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 10447 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 10448 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 10449 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 10450 10451 /* sync semi rtc */ 10452 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); 10453 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); 10454 10455 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 10456 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 10457 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 10458 10459 if (!CHIP_IS_E1x(sc)) { 10460 if (IS_MF_AFEX(sc)) { 10461 /* 10462 * configure that AFEX and VLAN headers must be 10463 * sent in AFEX mode 10464 */ 10465 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 10466 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 10467 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 10468 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 10469 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 10470 } else { 10471 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 10472 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 10473 } 10474 } 10475 10476 REG_WR(sc, SRC_REG_SOFT_RST, 1); 10477 10478 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 10479 10480 if (CNIC_SUPPORT(sc)) { 10481 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 10482 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 10483 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 10484 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 10485 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 10486 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 10487 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 10488 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 10489 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 10490 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 10491 } 10492 REG_WR(sc, SRC_REG_SOFT_RST, 0); 10493 10494 if (sizeof(union cdu_context) != 1024) { 10495 /* we currently assume that a context is 1024 bytes */ 10496 PMD_DRV_LOG(NOTICE, sc, 10497 "please adjust the size of cdu_context(%ld)", 10498 (long)sizeof(union cdu_context)); 10499 } 10500 10501 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 10502 val = (4 << 24) + (0 << 12) + 1024; 10503 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 10504 10505 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 10506 10507 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 10508 /* enable context validation interrupt from CFC */ 10509 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 10510 10511 /* set the thresholds to prevent CFC/CDU race */ 10512 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 10513 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 10514 10515 if (!CHIP_IS_E1x(sc) && BNX2X_NOMCP(sc)) { 10516 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 10517 } 10518 10519 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 10520 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 10521 10522 /* Reset PCIE errors for debug */ 10523 REG_WR(sc, 0x2814, 0xffffffff); 10524 REG_WR(sc, 0x3820, 0xffffffff); 10525 10526 if (!CHIP_IS_E1x(sc)) { 10527 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 10528 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 10529 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 10530 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 10531 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 10532 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 10533 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 10534 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 10535 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 10536 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 10537 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 10538 } 10539 10540 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 10541 10542 /* in E3 this done in per-port section */ 10543 if (!CHIP_IS_E3(sc)) 10544 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 10545 10546 if (CHIP_IS_E1H(sc)) { 10547 /* not applicable for E2 (and above ...) */ 10548 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 10549 } 10550 10551 if (CHIP_REV_IS_SLOW(sc)) { 10552 DELAY(200000); 10553 } 10554 10555 /* finish CFC init */ 10556 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 10557 if (val != 1) { 10558 PMD_DRV_LOG(NOTICE, sc, "CFC LL_INIT failed"); 10559 return -1; 10560 } 10561 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 10562 if (val != 1) { 10563 PMD_DRV_LOG(NOTICE, sc, "CFC AC_INIT failed"); 10564 return -1; 10565 } 10566 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 10567 if (val != 1) { 10568 PMD_DRV_LOG(NOTICE, sc, "CFC CAM_INIT failed"); 10569 return -1; 10570 } 10571 REG_WR(sc, CFC_REG_DEBUG0, 0); 10572 10573 bnx2x_setup_fan_failure_detection(sc); 10574 10575 /* clear PXP2 attentions */ 10576 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 10577 10578 bnx2x_enable_blocks_attention(sc); 10579 10580 if (!CHIP_REV_IS_SLOW(sc)) { 10581 ecore_enable_blocks_parity(sc); 10582 } 10583 10584 if (!BNX2X_NOMCP(sc)) { 10585 if (CHIP_IS_E1x(sc)) { 10586 bnx2x_common_init_phy(sc); 10587 } 10588 } 10589 10590 return 0; 10591 } 10592 10593 /** 10594 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 10595 * 10596 * @sc: driver handle 10597 */ 10598 static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc) 10599 { 10600 int rc = bnx2x_init_hw_common(sc); 10601 10602 if (rc) { 10603 return rc; 10604 } 10605 10606 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 10607 if (!BNX2X_NOMCP(sc)) { 10608 bnx2x_common_init_phy(sc); 10609 } 10610 10611 return 0; 10612 } 10613 10614 static int bnx2x_init_hw_port(struct bnx2x_softc *sc) 10615 { 10616 int port = SC_PORT(sc); 10617 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 10618 uint32_t low, high; 10619 uint32_t val; 10620 10621 PMD_DRV_LOG(DEBUG, sc, "starting port init for port %d", port); 10622 10623 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); 10624 10625 ecore_init_block(sc, BLOCK_MISC, init_phase); 10626 ecore_init_block(sc, BLOCK_PXP, init_phase); 10627 ecore_init_block(sc, BLOCK_PXP2, init_phase); 10628 10629 /* 10630 * Timers bug workaround: disables the pf_master bit in pglue at 10631 * common phase, we need to enable it here before any dmae access are 10632 * attempted. Therefore we manually added the enable-master to the 10633 * port phase (it also happens in the function phase) 10634 */ 10635 if (!CHIP_IS_E1x(sc)) { 10636 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 10637 } 10638 10639 ecore_init_block(sc, BLOCK_ATC, init_phase); 10640 ecore_init_block(sc, BLOCK_DMAE, init_phase); 10641 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 10642 ecore_init_block(sc, BLOCK_QM, init_phase); 10643 10644 ecore_init_block(sc, BLOCK_TCM, init_phase); 10645 ecore_init_block(sc, BLOCK_UCM, init_phase); 10646 ecore_init_block(sc, BLOCK_CCM, init_phase); 10647 ecore_init_block(sc, BLOCK_XCM, init_phase); 10648 10649 /* QM cid (connection) count */ 10650 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 10651 10652 if (CNIC_SUPPORT(sc)) { 10653 ecore_init_block(sc, BLOCK_TM, init_phase); 10654 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port * 4, 20); 10655 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port * 4, 31); 10656 } 10657 10658 ecore_init_block(sc, BLOCK_DORQ, init_phase); 10659 10660 ecore_init_block(sc, BLOCK_BRB1, init_phase); 10661 10662 if (CHIP_IS_E1H(sc)) { 10663 if (IS_MF(sc)) { 10664 low = (BNX2X_ONE_PORT(sc) ? 160 : 246); 10665 } else if (sc->mtu > 4096) { 10666 if (BNX2X_ONE_PORT(sc)) { 10667 low = 160; 10668 } else { 10669 val = sc->mtu; 10670 /* (24*1024 + val*4)/256 */ 10671 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 10672 } 10673 } else { 10674 low = (BNX2X_ONE_PORT(sc) ? 80 : 160); 10675 } 10676 high = (low + 56); /* 14*1024/256 */ 10677 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low); 10678 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high); 10679 } 10680 10681 if (CHIP_IS_MODE_4_PORT(sc)) { 10682 REG_WR(sc, SC_PORT(sc) ? 10683 BRB1_REG_MAC_GUARANTIED_1 : 10684 BRB1_REG_MAC_GUARANTIED_0, 40); 10685 } 10686 10687 ecore_init_block(sc, BLOCK_PRS, init_phase); 10688 if (CHIP_IS_E3B0(sc)) { 10689 if (IS_MF_AFEX(sc)) { 10690 /* configure headers for AFEX mode */ 10691 if (SC_PORT(sc)) { 10692 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_1, 10693 0xE); 10694 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_1, 10695 0x6); 10696 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_1, 0xA); 10697 } else { 10698 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_0, 10699 0xE); 10700 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 10701 0x6); 10702 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 10703 } 10704 } else { 10705 /* Ovlan exists only if we are in multi-function + 10706 * switch-dependent mode, in switch-independent there 10707 * is no ovlan headers 10708 */ 10709 REG_WR(sc, SC_PORT(sc) ? 10710 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 10711 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 10712 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 10713 } 10714 } 10715 10716 ecore_init_block(sc, BLOCK_TSDM, init_phase); 10717 ecore_init_block(sc, BLOCK_CSDM, init_phase); 10718 ecore_init_block(sc, BLOCK_USDM, init_phase); 10719 ecore_init_block(sc, BLOCK_XSDM, init_phase); 10720 10721 ecore_init_block(sc, BLOCK_TSEM, init_phase); 10722 ecore_init_block(sc, BLOCK_USEM, init_phase); 10723 ecore_init_block(sc, BLOCK_CSEM, init_phase); 10724 ecore_init_block(sc, BLOCK_XSEM, init_phase); 10725 10726 ecore_init_block(sc, BLOCK_UPB, init_phase); 10727 ecore_init_block(sc, BLOCK_XPB, init_phase); 10728 10729 ecore_init_block(sc, BLOCK_PBF, init_phase); 10730 10731 if (CHIP_IS_E1x(sc)) { 10732 /* configure PBF to work without PAUSE mtu 9000 */ 10733 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0); 10734 10735 /* update threshold */ 10736 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040 / 16)); 10737 /* update init credit */ 10738 REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, 10739 (9040 / 16) + 553 - 22); 10740 10741 /* probe changes */ 10742 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1); 10743 DELAY(50); 10744 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0); 10745 } 10746 10747 if (CNIC_SUPPORT(sc)) { 10748 ecore_init_block(sc, BLOCK_SRC, init_phase); 10749 } 10750 10751 ecore_init_block(sc, BLOCK_CDU, init_phase); 10752 ecore_init_block(sc, BLOCK_CFC, init_phase); 10753 ecore_init_block(sc, BLOCK_HC, init_phase); 10754 ecore_init_block(sc, BLOCK_IGU, init_phase); 10755 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 10756 /* init aeu_mask_attn_func_0/1: 10757 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 10758 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 10759 * bits 4-7 are used for "per vn group attention" */ 10760 val = IS_MF(sc) ? 0xF7 : 0x7; 10761 val |= 0x10; 10762 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, val); 10763 10764 ecore_init_block(sc, BLOCK_NIG, init_phase); 10765 10766 if (!CHIP_IS_E1x(sc)) { 10767 /* Bit-map indicating which L2 hdrs may appear after the 10768 * basic Ethernet header 10769 */ 10770 if (IS_MF_AFEX(sc)) { 10771 REG_WR(sc, SC_PORT(sc) ? 10772 NIG_REG_P1_HDRS_AFTER_BASIC : 10773 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 10774 } else { 10775 REG_WR(sc, SC_PORT(sc) ? 10776 NIG_REG_P1_HDRS_AFTER_BASIC : 10777 NIG_REG_P0_HDRS_AFTER_BASIC, 10778 IS_MF_SD(sc) ? 7 : 6); 10779 } 10780 10781 if (CHIP_IS_E3(sc)) { 10782 REG_WR(sc, SC_PORT(sc) ? 10783 NIG_REG_LLH1_MF_MODE : 10784 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 10785 } 10786 } 10787 if (!CHIP_IS_E3(sc)) { 10788 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); 10789 } 10790 10791 /* 0x2 disable mf_ov, 0x1 enable */ 10792 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4, 10793 (IS_MF_SD(sc) ? 0x1 : 0x2)); 10794 10795 if (!CHIP_IS_E1x(sc)) { 10796 val = 0; 10797 switch (sc->devinfo.mf_info.mf_mode) { 10798 case MULTI_FUNCTION_SD: 10799 val = 1; 10800 break; 10801 case MULTI_FUNCTION_SI: 10802 case MULTI_FUNCTION_AFEX: 10803 val = 2; 10804 break; 10805 } 10806 10807 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 10808 NIG_REG_LLH0_CLS_TYPE), val); 10809 } 10810 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0); 10811 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0); 10812 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1); 10813 10814 /* If SPIO5 is set to generate interrupts, enable it for this port */ 10815 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 10816 if (val & MISC_SPIO_SPIO5) { 10817 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10818 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 10819 val = REG_RD(sc, reg_addr); 10820 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 10821 REG_WR(sc, reg_addr, val); 10822 } 10823 10824 return 0; 10825 } 10826 10827 static uint32_t 10828 bnx2x_flr_clnup_reg_poll(struct bnx2x_softc *sc, uint32_t reg, 10829 uint32_t expected, uint32_t poll_count) 10830 { 10831 uint32_t cur_cnt = poll_count; 10832 uint32_t val; 10833 10834 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 10835 DELAY(FLR_WAIT_INTERVAL); 10836 } 10837 10838 return val; 10839 } 10840 10841 static int 10842 bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg, 10843 __rte_unused const char *msg, uint32_t poll_cnt) 10844 { 10845 uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 10846 10847 if (val != 0) { 10848 PMD_DRV_LOG(NOTICE, sc, "%s usage count=%d", msg, val); 10849 return -1; 10850 } 10851 10852 return 0; 10853 } 10854 10855 /* Common routines with VF FLR cleanup */ 10856 static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc *sc) 10857 { 10858 /* adjust polling timeout */ 10859 if (CHIP_REV_IS_EMUL(sc)) { 10860 return FLR_POLL_CNT * 2000; 10861 } 10862 10863 if (CHIP_REV_IS_FPGA(sc)) { 10864 return FLR_POLL_CNT * 120; 10865 } 10866 10867 return FLR_POLL_CNT; 10868 } 10869 10870 static int bnx2x_poll_hw_usage_counters(struct bnx2x_softc *sc, uint32_t poll_cnt) 10871 { 10872 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 10873 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10874 CFC_REG_NUM_LCIDS_INSIDE_PF, 10875 "CFC PF usage counter timed out", 10876 poll_cnt)) { 10877 return -1; 10878 } 10879 10880 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 10881 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10882 DORQ_REG_PF_USAGE_CNT, 10883 "DQ PF usage counter timed out", 10884 poll_cnt)) { 10885 return -1; 10886 } 10887 10888 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 10889 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10890 QM_REG_PF_USG_CNT_0 + 4 * SC_FUNC(sc), 10891 "QM PF usage counter timed out", 10892 poll_cnt)) { 10893 return -1; 10894 } 10895 10896 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 10897 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10898 TM_REG_LIN0_VNIC_UC + 4 * SC_PORT(sc), 10899 "Timers VNIC usage counter timed out", 10900 poll_cnt)) { 10901 return -1; 10902 } 10903 10904 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10905 TM_REG_LIN0_NUM_SCANS + 10906 4 * SC_PORT(sc), 10907 "Timers NUM_SCANS usage counter timed out", 10908 poll_cnt)) { 10909 return -1; 10910 } 10911 10912 /* Wait DMAE PF usage counter to zero */ 10913 if (bnx2x_flr_clnup_poll_hw_counter(sc, 10914 dmae_reg_go_c[INIT_DMAE_C(sc)], 10915 "DMAE dommand register timed out", 10916 poll_cnt)) { 10917 return -1; 10918 } 10919 10920 return 0; 10921 } 10922 10923 #define OP_GEN_PARAM(param) \ 10924 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 10925 #define OP_GEN_TYPE(type) \ 10926 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 10927 #define OP_GEN_AGG_VECT(index) \ 10928 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 10929 10930 static int 10931 bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func, 10932 uint32_t poll_cnt) 10933 { 10934 uint32_t op_gen_command = 0; 10935 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 10936 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 10937 int ret = 0; 10938 10939 if (REG_RD(sc, comp_addr)) { 10940 PMD_DRV_LOG(NOTICE, sc, 10941 "Cleanup complete was not 0 before sending"); 10942 return -1; 10943 } 10944 10945 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 10946 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 10947 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 10948 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 10949 10950 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 10951 10952 if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 10953 PMD_DRV_LOG(NOTICE, sc, "FW final cleanup did not succeed"); 10954 PMD_DRV_LOG(DEBUG, sc, "At timeout completion address contained %x", 10955 (REG_RD(sc, comp_addr))); 10956 rte_panic("FLR cleanup failed"); 10957 return -1; 10958 } 10959 10960 /* Zero completion for nxt FLR */ 10961 REG_WR(sc, comp_addr, 0); 10962 10963 return ret; 10964 } 10965 10966 static void 10967 bnx2x_pbf_pN_buf_flushed(struct bnx2x_softc *sc, struct pbf_pN_buf_regs *regs, 10968 uint32_t poll_count) 10969 { 10970 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 10971 uint32_t cur_cnt = poll_count; 10972 10973 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 10974 crd = crd_start = REG_RD(sc, regs->crd); 10975 init_crd = REG_RD(sc, regs->init_crd); 10976 10977 while ((crd != init_crd) && 10978 ((uint32_t) ((int32_t) crd_freed - (int32_t) crd_freed_start) < 10979 (init_crd - crd_start))) { 10980 if (cur_cnt--) { 10981 DELAY(FLR_WAIT_INTERVAL); 10982 crd = REG_RD(sc, regs->crd); 10983 crd_freed = REG_RD(sc, regs->crd_freed); 10984 } else { 10985 break; 10986 } 10987 } 10988 } 10989 10990 static void 10991 bnx2x_pbf_pN_cmd_flushed(struct bnx2x_softc *sc, struct pbf_pN_cmd_regs *regs, 10992 uint32_t poll_count) 10993 { 10994 uint32_t occup, to_free, freed, freed_start; 10995 uint32_t cur_cnt = poll_count; 10996 10997 occup = to_free = REG_RD(sc, regs->lines_occup); 10998 freed = freed_start = REG_RD(sc, regs->lines_freed); 10999 11000 while (occup && 11001 ((uint32_t) ((int32_t) freed - (int32_t) freed_start) < 11002 to_free)) { 11003 if (cur_cnt--) { 11004 DELAY(FLR_WAIT_INTERVAL); 11005 occup = REG_RD(sc, regs->lines_occup); 11006 freed = REG_RD(sc, regs->lines_freed); 11007 } else { 11008 break; 11009 } 11010 } 11011 } 11012 11013 static void bnx2x_tx_hw_flushed(struct bnx2x_softc *sc, uint32_t poll_count) 11014 { 11015 struct pbf_pN_cmd_regs cmd_regs[] = { 11016 {0, (CHIP_IS_E3B0(sc)) ? 11017 PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, 11018 (CHIP_IS_E3B0(sc)) ? 11019 PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, 11020 {1, (CHIP_IS_E3B0(sc)) ? 11021 PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, 11022 (CHIP_IS_E3B0(sc)) ? 11023 PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, 11024 {4, (CHIP_IS_E3B0(sc)) ? 11025 PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, 11026 (CHIP_IS_E3B0(sc)) ? 11027 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 11028 PBF_REG_P4_TQ_LINES_FREED_CNT} 11029 }; 11030 11031 struct pbf_pN_buf_regs buf_regs[] = { 11032 {0, (CHIP_IS_E3B0(sc)) ? 11033 PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD, 11034 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, 11035 (CHIP_IS_E3B0(sc)) ? 11036 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 11037 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 11038 {1, (CHIP_IS_E3B0(sc)) ? 11039 PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, 11040 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, 11041 (CHIP_IS_E3B0(sc)) ? 11042 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 11043 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 11044 {4, (CHIP_IS_E3B0(sc)) ? 11045 PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, 11046 (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, 11047 (CHIP_IS_E3B0(sc)) ? 11048 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 11049 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 11050 }; 11051 11052 uint32_t i; 11053 11054 /* Verify the command queues are flushed P0, P1, P4 */ 11055 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 11056 bnx2x_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 11057 } 11058 11059 /* Verify the transmission buffers are flushed P0, P1, P4 */ 11060 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 11061 bnx2x_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 11062 } 11063 } 11064 11065 static void bnx2x_hw_enable_status(struct bnx2x_softc *sc) 11066 { 11067 __rte_unused uint32_t val; 11068 11069 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 11070 PMD_DRV_LOG(DEBUG, sc, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val); 11071 11072 val = REG_RD(sc, PBF_REG_DISABLE_PF); 11073 PMD_DRV_LOG(DEBUG, sc, "PBF_REG_DISABLE_PF is 0x%x", val); 11074 11075 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 11076 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val); 11077 11078 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 11079 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val); 11080 11081 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 11082 PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val); 11083 11084 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 11085 PMD_DRV_LOG(DEBUG, sc, 11086 "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val); 11087 11088 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 11089 PMD_DRV_LOG(DEBUG, sc, 11090 "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val); 11091 11092 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 11093 PMD_DRV_LOG(DEBUG, sc, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x", 11094 val); 11095 } 11096 11097 /** 11098 * bnx2x_pf_flr_clnup 11099 * a. re-enable target read on the PF 11100 * b. poll cfc per function usgae counter 11101 * c. poll the qm perfunction usage counter 11102 * d. poll the tm per function usage counter 11103 * e. poll the tm per function scan-done indication 11104 * f. clear the dmae channel associated wit hthe PF 11105 * g. zero the igu 'trailing edge' and 'leading edge' regs (attentions) 11106 * h. call the common flr cleanup code with -1 (pf indication) 11107 */ 11108 static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc) 11109 { 11110 uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(sc); 11111 11112 /* Re-enable PF target read access */ 11113 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 11114 11115 /* Poll HW usage counters */ 11116 if (bnx2x_poll_hw_usage_counters(sc, poll_cnt)) { 11117 return -1; 11118 } 11119 11120 /* Zero the igu 'trailing edge' and 'leading edge' */ 11121 11122 /* Send the FW cleanup command */ 11123 if (bnx2x_send_final_clnup(sc, (uint8_t) SC_FUNC(sc), poll_cnt)) { 11124 return -1; 11125 } 11126 11127 /* ATC cleanup */ 11128 11129 /* Verify TX hw is flushed */ 11130 bnx2x_tx_hw_flushed(sc, poll_cnt); 11131 11132 /* Wait 100ms (not adjusted according to platform) */ 11133 DELAY(100000); 11134 11135 /* Verify no pending pci transactions */ 11136 if (bnx2x_is_pcie_pending(sc)) { 11137 PMD_DRV_LOG(NOTICE, sc, "PCIE Transactions still pending"); 11138 } 11139 11140 /* Debug */ 11141 bnx2x_hw_enable_status(sc); 11142 11143 /* 11144 * Master enable - Due to WB DMAE writes performed before this 11145 * register is re-initialized as part of the regular function init 11146 */ 11147 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 11148 11149 return 0; 11150 } 11151 11152 static int bnx2x_init_hw_func(struct bnx2x_softc *sc) 11153 { 11154 int port = SC_PORT(sc); 11155 int func = SC_FUNC(sc); 11156 int init_phase = PHASE_PF0 + func; 11157 struct ecore_ilt *ilt = sc->ilt; 11158 uint16_t cdu_ilt_start; 11159 uint32_t addr, val; 11160 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 11161 int main_mem_width, rc; 11162 uint32_t i; 11163 11164 PMD_DRV_LOG(DEBUG, sc, "starting func init for func %d", func); 11165 11166 /* FLR cleanup */ 11167 if (!CHIP_IS_E1x(sc)) { 11168 rc = bnx2x_pf_flr_clnup(sc); 11169 if (rc) { 11170 PMD_DRV_LOG(NOTICE, sc, "FLR cleanup failed!"); 11171 return rc; 11172 } 11173 } 11174 11175 /* set MSI reconfigure capability */ 11176 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11177 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 11178 val = REG_RD(sc, addr); 11179 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 11180 REG_WR(sc, addr, val); 11181 } 11182 11183 ecore_init_block(sc, BLOCK_PXP, init_phase); 11184 ecore_init_block(sc, BLOCK_PXP2, init_phase); 11185 11186 ilt = sc->ilt; 11187 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 11188 11189 for (i = 0; i < L2_ILT_LINES(sc); i++) { 11190 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 11191 ilt->lines[cdu_ilt_start + i].page_mapping = 11192 (rte_iova_t)sc->context[i].vcxt_dma.paddr; 11193 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 11194 } 11195 ecore_ilt_init_op(sc, INITOP_SET); 11196 11197 REG_WR(sc, PRS_REG_NIC_MODE, 1); 11198 11199 if (!CHIP_IS_E1x(sc)) { 11200 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 11201 11202 /* Turn on a single ISR mode in IGU if driver is going to use 11203 * INT#x or MSI 11204 */ 11205 if ((sc->interrupt_mode != INTR_MODE_MSIX) 11206 || (sc->interrupt_mode != INTR_MODE_SINGLE_MSIX)) { 11207 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 11208 } 11209 11210 /* 11211 * Timers workaround bug: function init part. 11212 * Need to wait 20msec after initializing ILT, 11213 * needed to make sure there are no requests in 11214 * one of the PXP internal queues with "old" ILT addresses 11215 */ 11216 DELAY(20000); 11217 11218 /* 11219 * Master enable - Due to WB DMAE writes performed before this 11220 * register is re-initialized as part of the regular function 11221 * init 11222 */ 11223 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 11224 /* Enable the function in IGU */ 11225 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 11226 } 11227 11228 sc->dmae_ready = 1; 11229 11230 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 11231 11232 if (!CHIP_IS_E1x(sc)) 11233 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 11234 11235 ecore_init_block(sc, BLOCK_ATC, init_phase); 11236 ecore_init_block(sc, BLOCK_DMAE, init_phase); 11237 ecore_init_block(sc, BLOCK_NIG, init_phase); 11238 ecore_init_block(sc, BLOCK_SRC, init_phase); 11239 ecore_init_block(sc, BLOCK_MISC, init_phase); 11240 ecore_init_block(sc, BLOCK_TCM, init_phase); 11241 ecore_init_block(sc, BLOCK_UCM, init_phase); 11242 ecore_init_block(sc, BLOCK_CCM, init_phase); 11243 ecore_init_block(sc, BLOCK_XCM, init_phase); 11244 ecore_init_block(sc, BLOCK_TSEM, init_phase); 11245 ecore_init_block(sc, BLOCK_USEM, init_phase); 11246 ecore_init_block(sc, BLOCK_CSEM, init_phase); 11247 ecore_init_block(sc, BLOCK_XSEM, init_phase); 11248 11249 if (!CHIP_IS_E1x(sc)) 11250 REG_WR(sc, QM_REG_PF_EN, 1); 11251 11252 if (!CHIP_IS_E1x(sc)) { 11253 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11254 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11255 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11256 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 11257 } 11258 ecore_init_block(sc, BLOCK_QM, init_phase); 11259 11260 ecore_init_block(sc, BLOCK_TM, init_phase); 11261 ecore_init_block(sc, BLOCK_DORQ, init_phase); 11262 11263 ecore_init_block(sc, BLOCK_BRB1, init_phase); 11264 ecore_init_block(sc, BLOCK_PRS, init_phase); 11265 ecore_init_block(sc, BLOCK_TSDM, init_phase); 11266 ecore_init_block(sc, BLOCK_CSDM, init_phase); 11267 ecore_init_block(sc, BLOCK_USDM, init_phase); 11268 ecore_init_block(sc, BLOCK_XSDM, init_phase); 11269 ecore_init_block(sc, BLOCK_UPB, init_phase); 11270 ecore_init_block(sc, BLOCK_XPB, init_phase); 11271 ecore_init_block(sc, BLOCK_PBF, init_phase); 11272 if (!CHIP_IS_E1x(sc)) 11273 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 11274 11275 ecore_init_block(sc, BLOCK_CDU, init_phase); 11276 11277 ecore_init_block(sc, BLOCK_CFC, init_phase); 11278 11279 if (!CHIP_IS_E1x(sc)) 11280 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 11281 11282 if (IS_MF(sc)) { 11283 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 11284 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, OVLAN(sc)); 11285 } 11286 11287 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 11288 11289 /* HC init per function */ 11290 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11291 if (CHIP_IS_E1H(sc)) { 11292 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 11293 11294 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); 11295 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); 11296 } 11297 ecore_init_block(sc, BLOCK_HC, init_phase); 11298 11299 } else { 11300 uint32_t num_segs, sb_idx, prod_offset; 11301 11302 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); 11303 11304 if (!CHIP_IS_E1x(sc)) { 11305 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 11306 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 11307 } 11308 11309 ecore_init_block(sc, BLOCK_IGU, init_phase); 11310 11311 if (!CHIP_IS_E1x(sc)) { 11312 int dsb_idx = 0; 11313 /** 11314 * Producer memory: 11315 * E2 mode: address 0-135 match to the mapping memory; 11316 * 136 - PF0 default prod; 137 - PF1 default prod; 11317 * 138 - PF2 default prod; 139 - PF3 default prod; 11318 * 140 - PF0 attn prod; 141 - PF1 attn prod; 11319 * 142 - PF2 attn prod; 143 - PF3 attn prod; 11320 * 144-147 reserved. 11321 * 11322 * E1.5 mode - In backward compatible mode; 11323 * for non default SB; each even line in the memory 11324 * holds the U producer and each odd line hold 11325 * the C producer. The first 128 producers are for 11326 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 11327 * producers are for the DSB for each PF. 11328 * Each PF has five segments: (the order inside each 11329 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 11330 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 11331 * 144-147 attn prods; 11332 */ 11333 /* non-default-status-blocks */ 11334 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 11335 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 11336 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 11337 prod_offset = (sc->igu_base_sb + sb_idx) * 11338 num_segs; 11339 11340 for (i = 0; i < num_segs; i++) { 11341 addr = IGU_REG_PROD_CONS_MEMORY + 11342 (prod_offset + i) * 4; 11343 REG_WR(sc, addr, 0); 11344 } 11345 /* send consumer update with value 0 */ 11346 bnx2x_ack_sb(sc, sc->igu_base_sb + sb_idx, 11347 USTORM_ID, 0, IGU_INT_NOP, 1); 11348 bnx2x_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 11349 } 11350 11351 /* default-status-blocks */ 11352 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 11353 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 11354 11355 if (CHIP_IS_MODE_4_PORT(sc)) 11356 dsb_idx = SC_FUNC(sc); 11357 else 11358 dsb_idx = SC_VN(sc); 11359 11360 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 11361 IGU_BC_BASE_DSB_PROD + dsb_idx : 11362 IGU_NORM_BASE_DSB_PROD + dsb_idx); 11363 11364 /* 11365 * igu prods come in chunks of E1HVN_MAX (4) - 11366 * does not matters what is the current chip mode 11367 */ 11368 for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { 11369 addr = IGU_REG_PROD_CONS_MEMORY + 11370 (prod_offset + i) * 4; 11371 REG_WR(sc, addr, 0); 11372 } 11373 /* send consumer update with 0 */ 11374 if (CHIP_INT_MODE_IS_BC(sc)) { 11375 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11376 USTORM_ID, 0, IGU_INT_NOP, 1); 11377 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11378 CSTORM_ID, 0, IGU_INT_NOP, 1); 11379 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11380 XSTORM_ID, 0, IGU_INT_NOP, 1); 11381 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11382 TSTORM_ID, 0, IGU_INT_NOP, 1); 11383 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11384 ATTENTION_ID, 0, IGU_INT_NOP, 1); 11385 } else { 11386 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11387 USTORM_ID, 0, IGU_INT_NOP, 1); 11388 bnx2x_ack_sb(sc, sc->igu_dsb_id, 11389 ATTENTION_ID, 0, IGU_INT_NOP, 1); 11390 } 11391 bnx2x_igu_clear_sb(sc, sc->igu_dsb_id); 11392 11393 /* !!! these should become driver const once 11394 rf-tool supports split-68 const */ 11395 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 11396 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 11397 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 11398 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 11399 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 11400 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 11401 } 11402 } 11403 11404 /* Reset PCIE errors for debug */ 11405 REG_WR(sc, 0x2114, 0xffffffff); 11406 REG_WR(sc, 0x2120, 0xffffffff); 11407 11408 if (CHIP_IS_E1x(sc)) { 11409 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords */ 11410 main_mem_base = HC_REG_MAIN_MEMORY + 11411 SC_PORT(sc) * (main_mem_size * 4); 11412 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 11413 main_mem_width = 8; 11414 11415 val = REG_RD(sc, main_mem_prty_clr); 11416 if (val) { 11417 PMD_DRV_LOG(DEBUG, sc, 11418 "Parity errors in HC block during function init (0x%x)!", 11419 val); 11420 } 11421 11422 /* Clear "false" parity errors in MSI-X table */ 11423 for (i = main_mem_base; 11424 i < main_mem_base + main_mem_size * 4; 11425 i += main_mem_width) { 11426 bnx2x_read_dmae(sc, i, main_mem_width / 4); 11427 bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), 11428 i, main_mem_width / 4); 11429 } 11430 /* Clear HC parity attention */ 11431 REG_RD(sc, main_mem_prty_clr); 11432 } 11433 11434 /* Enable STORMs SP logging */ 11435 REG_WR8(sc, BAR_USTRORM_INTMEM + 11436 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11437 REG_WR8(sc, BAR_TSTRORM_INTMEM + 11438 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11439 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11440 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11441 REG_WR8(sc, BAR_XSTRORM_INTMEM + 11442 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 11443 11444 elink_phy_probe(&sc->link_params); 11445 11446 return 0; 11447 } 11448 11449 static void bnx2x_link_reset(struct bnx2x_softc *sc) 11450 { 11451 if (!BNX2X_NOMCP(sc)) { 11452 bnx2x_acquire_phy_lock(sc); 11453 elink_lfa_reset(&sc->link_params, &sc->link_vars); 11454 bnx2x_release_phy_lock(sc); 11455 } else { 11456 if (!CHIP_REV_IS_SLOW(sc)) { 11457 PMD_DRV_LOG(WARNING, sc, 11458 "Bootcode is missing - cannot reset link"); 11459 } 11460 } 11461 } 11462 11463 static void bnx2x_reset_port(struct bnx2x_softc *sc) 11464 { 11465 int port = SC_PORT(sc); 11466 uint32_t val; 11467 11468 /* reset physical Link */ 11469 bnx2x_link_reset(sc); 11470 11471 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); 11472 11473 /* Do not rcv packets to BRB */ 11474 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0); 11475 /* Do not direct rcv packets that are not for MCP to the BRB */ 11476 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 11477 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 11478 11479 /* Configure AEU */ 11480 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0); 11481 11482 DELAY(100000); 11483 11484 /* Check for BRB port occupancy */ 11485 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4); 11486 if (val) { 11487 PMD_DRV_LOG(DEBUG, sc, 11488 "BRB1 is not empty, %d blocks are occupied", val); 11489 } 11490 } 11491 11492 static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr) 11493 { 11494 int reg; 11495 uint32_t wb_write[2]; 11496 11497 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8; 11498 11499 wb_write[0] = ONCHIP_ADDR1(addr); 11500 wb_write[1] = ONCHIP_ADDR2(addr); 11501 REG_WR_DMAE(sc, reg, wb_write, 2); 11502 } 11503 11504 static void bnx2x_clear_func_ilt(struct bnx2x_softc *sc, uint32_t func) 11505 { 11506 uint32_t i, base = FUNC_ILT_BASE(func); 11507 for (i = base; i < base + ILT_PER_FUNC; i++) { 11508 bnx2x_ilt_wr(sc, i, 0); 11509 } 11510 } 11511 11512 static void bnx2x_reset_func(struct bnx2x_softc *sc) 11513 { 11514 struct bnx2x_fastpath *fp; 11515 int port = SC_PORT(sc); 11516 int func = SC_FUNC(sc); 11517 int i; 11518 11519 /* Disable the function in the FW */ 11520 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 11521 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 11522 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 11523 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 11524 11525 /* FP SBs */ 11526 FOR_EACH_ETH_QUEUE(sc, i) { 11527 fp = &sc->fp[i]; 11528 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11529 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 11530 SB_DISABLED); 11531 } 11532 11533 /* SP SB */ 11534 REG_WR8(sc, BAR_CSTRORM_INTMEM + 11535 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); 11536 11537 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 11538 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 11539 0); 11540 } 11541 11542 /* Configure IGU */ 11543 if (sc->devinfo.int_block == INT_BLOCK_HC) { 11544 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); 11545 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); 11546 } else { 11547 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 11548 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 11549 } 11550 11551 if (CNIC_LOADED(sc)) { 11552 /* Disable Timer scan */ 11553 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port * 4, 0); 11554 /* 11555 * Wait for at least 10ms and up to 2 second for the timers 11556 * scan to complete 11557 */ 11558 for (i = 0; i < 200; i++) { 11559 DELAY(10000); 11560 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port * 4)) 11561 break; 11562 } 11563 } 11564 11565 /* Clear ILT */ 11566 bnx2x_clear_func_ilt(sc, func); 11567 11568 /* 11569 * Timers workaround bug for E2: if this is vnic-3, 11570 * we need to set the entire ilt range for this timers. 11571 */ 11572 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 11573 struct ilt_client_info ilt_cli; 11574 /* use dummy TM client */ 11575 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 11576 ilt_cli.start = 0; 11577 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 11578 ilt_cli.client_num = ILT_CLIENT_TM; 11579 11580 ecore_ilt_boundary_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 11581 } 11582 11583 /* this assumes that reset_port() called before reset_func() */ 11584 if (!CHIP_IS_E1x(sc)) { 11585 bnx2x_pf_disable(sc); 11586 } 11587 11588 sc->dmae_ready = 0; 11589 } 11590 11591 static void bnx2x_release_firmware(struct bnx2x_softc *sc) 11592 { 11593 rte_free(sc->init_ops); 11594 rte_free(sc->init_ops_offsets); 11595 rte_free(sc->init_data); 11596 rte_free(sc->iro_array); 11597 } 11598 11599 static int bnx2x_init_firmware(struct bnx2x_softc *sc) 11600 { 11601 uint32_t len, i; 11602 uint8_t *p = sc->firmware; 11603 uint32_t off[24]; 11604 11605 for (i = 0; i < 24; ++i) 11606 off[i] = rte_be_to_cpu_32(*((uint32_t *) sc->firmware + i)); 11607 11608 len = off[0]; 11609 sc->init_ops = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11610 if (!sc->init_ops) 11611 goto alloc_failed; 11612 bnx2x_data_to_init_ops(p + off[1], sc->init_ops, len); 11613 11614 len = off[2]; 11615 sc->init_ops_offsets = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11616 if (!sc->init_ops_offsets) 11617 goto alloc_failed; 11618 bnx2x_data_to_init_offsets(p + off[3], sc->init_ops_offsets, len); 11619 11620 len = off[4]; 11621 sc->init_data = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11622 if (!sc->init_data) 11623 goto alloc_failed; 11624 bnx2x_data_to_init_data(p + off[5], sc->init_data, len); 11625 11626 sc->tsem_int_table_data = p + off[7]; 11627 sc->tsem_pram_data = p + off[9]; 11628 sc->usem_int_table_data = p + off[11]; 11629 sc->usem_pram_data = p + off[13]; 11630 sc->csem_int_table_data = p + off[15]; 11631 sc->csem_pram_data = p + off[17]; 11632 sc->xsem_int_table_data = p + off[19]; 11633 sc->xsem_pram_data = p + off[21]; 11634 11635 len = off[22]; 11636 sc->iro_array = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); 11637 if (!sc->iro_array) 11638 goto alloc_failed; 11639 bnx2x_data_to_iro_array(p + off[23], sc->iro_array, len); 11640 11641 return 0; 11642 11643 alloc_failed: 11644 bnx2x_release_firmware(sc); 11645 return -1; 11646 } 11647 11648 static int cut_gzip_prefix(const uint8_t * zbuf, int len) 11649 { 11650 #define MIN_PREFIX_SIZE (10) 11651 11652 int n = MIN_PREFIX_SIZE; 11653 uint16_t xlen; 11654 11655 if (!(zbuf[0] == 0x1f && zbuf[1] == 0x8b && zbuf[2] == Z_DEFLATED) || 11656 len <= MIN_PREFIX_SIZE) { 11657 return -1; 11658 } 11659 11660 /* optional extra fields are present */ 11661 if (zbuf[3] & 0x4) { 11662 xlen = zbuf[13]; 11663 xlen <<= 8; 11664 xlen += zbuf[12]; 11665 11666 n += xlen; 11667 } 11668 /* file name is present */ 11669 if (zbuf[3] & 0x8) { 11670 while ((zbuf[n++] != 0) && (n < len)) ; 11671 } 11672 11673 return n; 11674 } 11675 11676 static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len) 11677 { 11678 int ret; 11679 int data_begin = cut_gzip_prefix(zbuf, len); 11680 11681 PMD_DRV_LOG(DEBUG, sc, "ecore_gunzip %d", len); 11682 11683 if (data_begin <= 0) { 11684 PMD_DRV_LOG(NOTICE, sc, "bad gzip prefix"); 11685 return -1; 11686 } 11687 11688 memset(&zlib_stream, 0, sizeof(zlib_stream)); 11689 zlib_stream.next_in = zbuf + data_begin; 11690 zlib_stream.avail_in = len - data_begin; 11691 zlib_stream.next_out = sc->gz_buf; 11692 zlib_stream.avail_out = FW_BUF_SIZE; 11693 11694 ret = inflateInit2(&zlib_stream, -MAX_WBITS); 11695 if (ret != Z_OK) { 11696 PMD_DRV_LOG(NOTICE, sc, "zlib inflateInit2 error"); 11697 return ret; 11698 } 11699 11700 ret = inflate(&zlib_stream, Z_FINISH); 11701 if ((ret != Z_STREAM_END) && (ret != Z_OK)) { 11702 PMD_DRV_LOG(NOTICE, sc, "zlib inflate error: %d %s", ret, 11703 zlib_stream.msg); 11704 } 11705 11706 sc->gz_outlen = zlib_stream.total_out; 11707 if (sc->gz_outlen & 0x3) { 11708 PMD_DRV_LOG(NOTICE, sc, "firmware is not aligned. gz_outlen == %d", 11709 sc->gz_outlen); 11710 } 11711 sc->gz_outlen >>= 2; 11712 11713 inflateEnd(&zlib_stream); 11714 11715 if (ret == Z_STREAM_END) 11716 return 0; 11717 11718 return ret; 11719 } 11720 11721 static void 11722 ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, 11723 uint32_t addr, uint32_t len) 11724 { 11725 bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len); 11726 } 11727 11728 void 11729 ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, size_t size, 11730 uint32_t * data) 11731 { 11732 uint8_t i; 11733 for (i = 0; i < size / 4; i++) { 11734 REG_WR(sc, addr + (i * 4), data[i]); 11735 } 11736 } 11737 11738 static const char *get_ext_phy_type(uint32_t ext_phy_type) 11739 { 11740 uint32_t phy_type_idx = ext_phy_type >> 8; 11741 static const char *types[] = 11742 { "DIRECT", "BNX2X-8071", "BNX2X-8072", "BNX2X-8073", 11743 "BNX2X-8705", "BNX2X-8706", "BNX2X-8726", "BNX2X-8481", "SFX-7101", 11744 "BNX2X-8727", 11745 "BNX2X-8727-NOC", "BNX2X-84823", "NOT_CONN", "FAILURE" 11746 }; 11747 11748 if (phy_type_idx < 12) 11749 return types[phy_type_idx]; 11750 else if (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN == ext_phy_type) 11751 return types[12]; 11752 else 11753 return types[13]; 11754 } 11755 11756 static const char *get_state(uint32_t state) 11757 { 11758 uint32_t state_idx = state >> 12; 11759 static const char *states[] = { "CLOSED", "OPENING_WAIT4_LOAD", 11760 "OPENING_WAIT4_PORT", "OPEN", "CLOSING_WAIT4_HALT", 11761 "CLOSING_WAIT4_DELETE", "CLOSING_WAIT4_UNLOAD", 11762 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", 11763 "UNKNOWN", "DISABLED", "DIAG", "ERROR", "UNDEFINED" 11764 }; 11765 11766 if (state_idx <= 0xF) 11767 return states[state_idx]; 11768 else 11769 return states[0x10]; 11770 } 11771 11772 static const char *get_recovery_state(uint32_t state) 11773 { 11774 static const char *states[] = { "NONE", "DONE", "INIT", 11775 "WAIT", "FAILED", "NIC_LOADING" 11776 }; 11777 return states[state]; 11778 } 11779 11780 static const char *get_rx_mode(uint32_t mode) 11781 { 11782 static const char *modes[] = { "NONE", "NORMAL", "ALLMULTI", 11783 "PROMISC", "MAX_MULTICAST", "ERROR" 11784 }; 11785 11786 if (mode < 0x4) 11787 return modes[mode]; 11788 else if (BNX2X_MAX_MULTICAST == mode) 11789 return modes[4]; 11790 else 11791 return modes[5]; 11792 } 11793 11794 #define BNX2X_INFO_STR_MAX 256 11795 static const char *get_bnx2x_flags(uint32_t flags) 11796 { 11797 int i; 11798 static const char *flag[] = { "ONE_PORT ", "NO_ISCSI ", 11799 "NO_FCOE ", "NO_WOL ", "USING_DAC ", "USING_MSIX ", 11800 "USING_MSI ", "DISABLE_MSI ", "UNKNOWN ", "NO_MCP ", 11801 "SAFC_TX_FLAG ", "MF_FUNC_DIS ", "TX_SWITCHING " 11802 }; 11803 static char flag_str[BNX2X_INFO_STR_MAX]; 11804 memset(flag_str, 0, BNX2X_INFO_STR_MAX); 11805 11806 for (i = 0; i < 5; i++) 11807 if (flags & (1 << i)) { 11808 strlcat(flag_str, flag[i], sizeof(flag_str)); 11809 flags ^= (1 << i); 11810 } 11811 if (flags) { 11812 static char unknown[BNX2X_INFO_STR_MAX]; 11813 snprintf(unknown, 32, "Unknown flag mask %x", flags); 11814 strlcat(flag_str, unknown, sizeof(flag_str)); 11815 } 11816 return flag_str; 11817 } 11818 11819 /* Prints useful adapter info. */ 11820 void bnx2x_print_adapter_info(struct bnx2x_softc *sc) 11821 { 11822 int i = 0; 11823 11824 PMD_DRV_LOG(INFO, sc, "========================================"); 11825 /* DPDK and Driver versions */ 11826 PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK", 11827 rte_version()); 11828 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver", 11829 bnx2x_pmd_version()); 11830 /* Firmware versions. */ 11831 PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d", 11832 "Firmware", 11833 BNX2X_5710_FW_MAJOR_VERSION, 11834 BNX2X_5710_FW_MINOR_VERSION, 11835 BNX2X_5710_FW_REVISION_VERSION); 11836 PMD_DRV_LOG(INFO, sc, "%12s : %s", 11837 "Bootcode", sc->devinfo.bc_ver_str); 11838 /* Hardware chip info. */ 11839 PMD_DRV_LOG(INFO, sc, "%12s : %#08x", "ASIC", sc->devinfo.chip_id); 11840 PMD_DRV_LOG(INFO, sc, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A', 11841 (CHIP_METAL(sc) >> 4)); 11842 /* Bus PCIe info. */ 11843 PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Vendor Id", 11844 sc->devinfo.vendor_id); 11845 PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Device Id", 11846 sc->devinfo.device_id); 11847 PMD_DRV_LOG(INFO, sc, "%12s : width x%d, ", "Bus PCIe", 11848 sc->devinfo.pcie_link_width); 11849 switch (sc->devinfo.pcie_link_speed) { 11850 case 1: 11851 PMD_DRV_LOG(INFO, sc, "%23s", "2.5 Gbps"); 11852 break; 11853 case 2: 11854 PMD_DRV_LOG(INFO, sc, "%21s", "5 Gbps"); 11855 break; 11856 case 4: 11857 PMD_DRV_LOG(INFO, sc, "%21s", "8 Gbps"); 11858 break; 11859 default: 11860 PMD_DRV_LOG(INFO, sc, "%33s", "Unknown link speed"); 11861 } 11862 /* Device features. */ 11863 PMD_DRV_LOG(INFO, sc, "%12s : ", "Flags"); 11864 /* Miscellaneous flags. */ 11865 if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) { 11866 PMD_DRV_LOG(INFO, sc, "%18s", "MSI"); 11867 i++; 11868 } 11869 if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) { 11870 if (i > 0) 11871 PMD_DRV_LOG(INFO, sc, "|"); 11872 PMD_DRV_LOG(INFO, sc, "%20s", "MSI-X"); 11873 i++; 11874 } 11875 PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO")); 11876 PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO")); 11877 PMD_DRV_LOG(INFO, sc, "========================================"); 11878 } 11879 11880 /* Prints useful device info. */ 11881 void bnx2x_print_device_info(struct bnx2x_softc *sc) 11882 { 11883 __rte_unused uint32_t ext_phy_type; 11884 uint32_t offset, reg_val; 11885 11886 PMD_INIT_FUNC_TRACE(sc); 11887 offset = offsetof(struct shmem_region, 11888 dev_info.port_hw_config[0].external_phy_config); 11889 reg_val = REG_RD(sc, sc->devinfo.shmem_base + offset); 11890 if (sc->link_vars.phy_flags & PHY_XGXS_FLAG) 11891 ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(reg_val); 11892 else 11893 ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(reg_val); 11894 11895 /* Device features. */ 11896 PMD_DRV_LOG(INFO, sc, "%12s : %u", "Bnx2x Func", sc->pcie_func); 11897 PMD_DRV_LOG(INFO, sc, 11898 "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags)); 11899 PMD_DRV_LOG(INFO, sc, "%12s : %s", "DMAE Is", 11900 (sc->dmae_ready ? "Ready" : "Not Ready")); 11901 PMD_DRV_LOG(INFO, sc, "%12s : %u", "MTU", sc->mtu); 11902 PMD_DRV_LOG(INFO, sc, 11903 "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type)); 11904 PMD_DRV_LOG(INFO, sc, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr", 11905 sc->link_params.mac_addr[0], 11906 sc->link_params.mac_addr[1], 11907 sc->link_params.mac_addr[2], 11908 sc->link_params.mac_addr[3], 11909 sc->link_params.mac_addr[4], 11910 sc->link_params.mac_addr[5]); 11911 PMD_DRV_LOG(INFO, sc, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode)); 11912 PMD_DRV_LOG(INFO, sc, "%12s : %s", "State", get_state(sc->state)); 11913 if (sc->recovery_state) 11914 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Recovery", 11915 get_recovery_state(sc->recovery_state)); 11916 /* Queue info. */ 11917 if (IS_PF(sc)) { 11918 switch (sc->sp->rss_rdata.rss_mode) { 11919 case ETH_RSS_MODE_DISABLED: 11920 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - None"); 11921 break; 11922 case ETH_RSS_MODE_REGULAR: 11923 PMD_DRV_LOG(INFO, sc, "%12s : %s,", "Queues", "RSS mode - Regular"); 11924 PMD_DRV_LOG(INFO, sc, "%16d", sc->num_queues); 11925 break; 11926 default: 11927 PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - Unknown"); 11928 break; 11929 } 11930 } 11931 PMD_DRV_LOG(INFO, sc, "%12s : CQ = %lx, EQ = %lx", "SPQ Left", 11932 sc->cq_spq_left, sc->eq_spq_left); 11933 11934 PMD_DRV_LOG(INFO, sc, 11935 "%12s : %x", "Switch", sc->link_params.switch_cfg); 11936 PMD_DRV_LOG(INFO, sc, "pcie_bus=%d, pcie_device=%d", 11937 sc->pcie_bus, sc->pcie_device); 11938 PMD_DRV_LOG(INFO, sc, "bar0.addr=%p, bar1.addr=%p", 11939 sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr); 11940 PMD_DRV_LOG(INFO, sc, "port=%d, path=%d, vnic=%d, func=%d", 11941 PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc)); 11942 } 11943