1 /* 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_status.h" 12 #include "nvm_cfg.h" 13 #include "ecore_mcp.h" 14 #include "mcp_public.h" 15 #include "reg_addr.h" 16 #include "ecore_hw.h" 17 #include "ecore_init_fw_funcs.h" 18 #include "ecore_sriov.h" 19 #include "ecore_vf.h" 20 #include "ecore_iov_api.h" 21 #include "ecore_gtt_reg_addr.h" 22 #include "ecore_iro.h" 23 #include "ecore_dcbx.h" 24 #include "ecore_sp_commands.h" 25 #include "ecore_cxt.h" 26 27 #define CHIP_MCP_RESP_ITER_US 10 28 #define EMUL_MCP_RESP_ITER_US (1000 * 1000) 29 30 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 31 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 32 33 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 34 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ 35 _val) 36 37 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 38 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) 39 40 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 41 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 42 OFFSETOF(struct public_drv_mb, _field), _val) 43 44 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 45 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 46 OFFSETOF(struct public_drv_mb, _field)) 47 48 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 49 DRV_ID_PDA_COMP_VER_OFFSET) 50 51 #define MCP_BYTES_PER_MBIT_OFFSET 17 52 53 #ifndef ASIC_ONLY 54 static int loaded; 55 static int loaded_port[MAX_NUM_PORTS] = { 0 }; 56 #endif 57 58 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) 59 { 60 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 61 return false; 62 return true; 63 } 64 65 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 66 { 67 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 68 PUBLIC_PORT); 69 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); 70 71 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 72 MFW_PORT(p_hwfn)); 73 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 74 "port_addr = 0x%x, port_id 0x%02x\n", 75 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 76 } 77 78 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 79 { 80 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 81 OSAL_BE32 tmp; 82 u32 i; 83 84 #ifndef ASIC_ONLY 85 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) 86 return; 87 #endif 88 89 if (!p_hwfn->mcp_info->public_base) 90 return; 91 92 for (i = 0; i < length; i++) { 93 tmp = ecore_rd(p_hwfn, p_ptt, 94 p_hwfn->mcp_info->mfw_mb_addr + 95 (i << 2) + sizeof(u32)); 96 97 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 98 OSAL_BE32_TO_CPU(tmp); 99 } 100 } 101 102 struct ecore_mcp_cmd_elem { 103 osal_list_entry_t list; 104 struct ecore_mcp_mb_params *p_mb_params; 105 u16 expected_seq_num; 106 bool b_is_completed; 107 }; 108 109 /* Must be called while cmd_lock is acquired */ 110 static struct ecore_mcp_cmd_elem * 111 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn, 112 struct ecore_mcp_mb_params *p_mb_params, 113 u16 expected_seq_num) 114 { 115 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 116 117 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 118 sizeof(*p_cmd_elem)); 119 if (!p_cmd_elem) { 120 DP_NOTICE(p_hwfn, false, 121 "Failed to allocate `struct ecore_mcp_cmd_elem'\n"); 122 goto out; 123 } 124 125 p_cmd_elem->p_mb_params = p_mb_params; 126 p_cmd_elem->expected_seq_num = expected_seq_num; 127 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 128 out: 129 return p_cmd_elem; 130 } 131 132 /* Must be called while cmd_lock is acquired */ 133 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn, 134 struct ecore_mcp_cmd_elem *p_cmd_elem) 135 { 136 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 137 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem); 138 } 139 140 /* Must be called while cmd_lock is acquired */ 141 static struct ecore_mcp_cmd_elem * 142 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num) 143 { 144 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 145 146 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list, 147 struct ecore_mcp_cmd_elem) { 148 if (p_cmd_elem->expected_seq_num == seq_num) 149 return p_cmd_elem; 150 } 151 152 return OSAL_NULL; 153 } 154 155 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) 156 { 157 if (p_hwfn->mcp_info) { 158 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; 159 160 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); 161 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); 162 163 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 164 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, 165 &p_hwfn->mcp_info->cmd_list, list, 166 struct ecore_mcp_cmd_elem) { 167 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 168 } 169 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 170 171 #ifdef CONFIG_ECORE_LOCK_ALLOC 172 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); 173 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); 174 #endif 175 } 176 177 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 178 179 return ECORE_SUCCESS; 180 } 181 182 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, 183 struct ecore_ptt *p_ptt) 184 { 185 struct ecore_mcp_info *p_info = p_hwfn->mcp_info; 186 u32 drv_mb_offsize, mfw_mb_offsize; 187 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 188 189 #ifndef ASIC_ONLY 190 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 191 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n"); 192 p_info->public_base = 0; 193 return ECORE_INVAL; 194 } 195 #endif 196 197 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 198 if (!p_info->public_base) 199 return ECORE_INVAL; 200 201 p_info->public_base |= GRCBASE_MCP; 202 203 /* Calculate the driver and MFW mailbox address */ 204 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, 205 SECTION_OFFSIZE_ADDR(p_info->public_base, 206 PUBLIC_DRV_MB)); 207 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 208 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 209 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x" 210 " mcp_pf_id = 0x%x\n", 211 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 212 213 /* Set the MFW MB address */ 214 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, 215 SECTION_OFFSIZE_ADDR(p_info->public_base, 216 PUBLIC_MFW_MB)); 217 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 218 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, 219 p_info->mfw_mb_addr); 220 221 /* Get the current driver mailbox sequence before sending 222 * the first command 223 */ 224 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 225 DRV_MSG_SEQ_NUMBER_MASK; 226 227 /* Get current FW pulse sequence */ 228 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 229 DRV_PULSE_SEQ_MASK; 230 231 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 232 233 return ECORE_SUCCESS; 234 } 235 236 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, 237 struct ecore_ptt *p_ptt) 238 { 239 struct ecore_mcp_info *p_info; 240 u32 size; 241 242 /* Allocate mcp_info structure */ 243 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 244 sizeof(*p_hwfn->mcp_info)); 245 if (!p_hwfn->mcp_info) { 246 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); 247 return ECORE_NOMEM; 248 } 249 p_info = p_hwfn->mcp_info; 250 251 /* Initialize the MFW spinlocks */ 252 #ifdef CONFIG_ECORE_LOCK_ALLOC 253 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { 254 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 255 return ECORE_NOMEM; 256 } 257 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { 258 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); 259 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 260 return ECORE_NOMEM; 261 } 262 #endif 263 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); 264 OSAL_SPIN_LOCK_INIT(&p_info->link_lock); 265 266 OSAL_LIST_INIT(&p_info->cmd_list); 267 268 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { 269 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); 270 /* Do not free mcp_info here, since public_base indicate that 271 * the MCP is not initialized 272 */ 273 return ECORE_SUCCESS; 274 } 275 276 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 277 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 278 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 279 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 280 goto err; 281 282 return ECORE_SUCCESS; 283 284 err: 285 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); 286 ecore_mcp_free(p_hwfn); 287 return ECORE_NOMEM; 288 } 289 290 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn, 291 struct ecore_ptt *p_ptt) 292 { 293 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 294 295 /* Use MCP history register to check if MCP reset occurred between init 296 * time and now. 297 */ 298 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { 299 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 300 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", 301 p_hwfn->mcp_info->mcp_hist, generic_por_0); 302 303 ecore_load_mcp_offsets(p_hwfn, p_ptt); 304 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 305 } 306 } 307 308 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, 309 struct ecore_ptt *p_ptt) 310 { 311 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; 312 enum _ecore_status_t rc = ECORE_SUCCESS; 313 314 #ifndef ASIC_ONLY 315 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 316 delay = EMUL_MCP_RESP_ITER_US; 317 #endif 318 319 if (p_hwfn->mcp_info->b_block_cmd) { 320 DP_NOTICE(p_hwfn, false, 321 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); 322 return ECORE_ABORTED; 323 } 324 325 /* Ensure that only a single thread is accessing the mailbox */ 326 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 327 328 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 329 330 /* Set drv command along with the updated sequence */ 331 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 332 seq = ++p_hwfn->mcp_info->drv_mb_seq; 333 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); 334 335 do { 336 /* Wait for MFW response */ 337 OSAL_UDELAY(delay); 338 /* Give the FW up to 500 second (50*1000*10usec) */ 339 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt, 340 MISCS_REG_GENERIC_POR_0)) && 341 (cnt++ < ECORE_MCP_RESET_RETRIES)); 342 343 if (org_mcp_reset_seq != 344 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 345 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 346 "MCP was reset after %d usec\n", cnt * delay); 347 } else { 348 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 349 rc = ECORE_AGAIN; 350 } 351 352 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 353 354 return rc; 355 } 356 357 /* Must be called while cmd_lock is acquired */ 358 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn) 359 { 360 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 361 362 /* There is at most one pending command at a certain time, and if it 363 * exists - it is placed at the HEAD of the list. 364 */ 365 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) { 366 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list, 367 struct ecore_mcp_cmd_elem, 368 list); 369 return !p_cmd_elem->b_is_completed; 370 } 371 372 return false; 373 } 374 375 /* Must be called while cmd_lock is acquired */ 376 static enum _ecore_status_t 377 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 378 { 379 struct ecore_mcp_mb_params *p_mb_params; 380 struct ecore_mcp_cmd_elem *p_cmd_elem; 381 u32 mcp_resp; 382 u16 seq_num; 383 384 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 385 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); 386 387 /* Return if no new non-handled response has been received */ 388 if (seq_num != p_hwfn->mcp_info->drv_mb_seq) 389 return ECORE_AGAIN; 390 391 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num); 392 if (!p_cmd_elem) { 393 DP_ERR(p_hwfn, 394 "Failed to find a pending mailbox cmd that expects sequence number %d\n", 395 seq_num); 396 return ECORE_UNKNOWN_ERROR; 397 } 398 399 p_mb_params = p_cmd_elem->p_mb_params; 400 401 /* Get the MFW response along with the sequence number */ 402 p_mb_params->mcp_resp = mcp_resp; 403 404 /* Get the MFW param */ 405 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 406 407 /* Get the union data */ 408 if (p_mb_params->p_data_dst != OSAL_NULL && 409 p_mb_params->data_dst_size) { 410 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 411 OFFSETOF(struct public_drv_mb, 412 union_data); 413 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 414 union_data_addr, p_mb_params->data_dst_size); 415 } 416 417 p_cmd_elem->b_is_completed = true; 418 419 return ECORE_SUCCESS; 420 } 421 422 /* Must be called while cmd_lock is acquired */ 423 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 424 struct ecore_ptt *p_ptt, 425 struct ecore_mcp_mb_params *p_mb_params, 426 u16 seq_num) 427 { 428 union drv_union_data union_data; 429 u32 union_data_addr; 430 431 /* Set the union data */ 432 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 433 OFFSETOF(struct public_drv_mb, union_data); 434 OSAL_MEM_ZERO(&union_data, sizeof(union_data)); 435 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size) 436 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src, 437 p_mb_params->data_src_size); 438 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, 439 sizeof(union_data)); 440 441 /* Set the drv param */ 442 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); 443 444 /* Set the drv command along with the sequence number */ 445 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); 446 447 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 448 "MFW mailbox: command 0x%08x param 0x%08x\n", 449 (p_mb_params->cmd | seq_num), p_mb_params->param); 450 } 451 452 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, 453 bool block_cmd) 454 { 455 p_hwfn->mcp_info->b_block_cmd = block_cmd; 456 457 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", 458 block_cmd ? "Block" : "Unblock"); 459 } 460 461 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, 462 struct ecore_ptt *p_ptt) 463 { 464 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; 465 466 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 467 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 468 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 469 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 470 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 471 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 472 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 473 474 DP_NOTICE(p_hwfn, false, 475 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", 476 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); 477 } 478 479 static enum _ecore_status_t 480 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 481 struct ecore_mcp_mb_params *p_mb_params, 482 u32 max_retries, u32 delay) 483 { 484 struct ecore_mcp_cmd_elem *p_cmd_elem; 485 u32 cnt = 0; 486 u16 seq_num; 487 enum _ecore_status_t rc = ECORE_SUCCESS; 488 489 /* Wait until the mailbox is non-occupied */ 490 do { 491 /* Exit the loop if there is no pending command, or if the 492 * pending command is completed during this iteration. 493 * The spinlock stays locked until the command is sent. 494 */ 495 496 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 497 498 if (!ecore_mcp_has_pending_cmd(p_hwfn)) 499 break; 500 501 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 502 if (rc == ECORE_SUCCESS) 503 break; 504 else if (rc != ECORE_AGAIN) 505 goto err; 506 507 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 508 OSAL_UDELAY(delay); 509 OSAL_MFW_CMD_PREEMPT(p_hwfn); 510 } while (++cnt < max_retries); 511 512 if (cnt >= max_retries) { 513 DP_NOTICE(p_hwfn, false, 514 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", 515 p_mb_params->cmd, p_mb_params->param); 516 return ECORE_AGAIN; 517 } 518 519 /* Send the mailbox command */ 520 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 521 seq_num = ++p_hwfn->mcp_info->drv_mb_seq; 522 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); 523 if (!p_cmd_elem) { 524 rc = ECORE_NOMEM; 525 goto err; 526 } 527 528 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); 529 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 530 531 /* Wait for the MFW response */ 532 do { 533 /* Exit the loop if the command is already completed, or if the 534 * command is completed during this iteration. 535 * The spinlock stays locked until the list element is removed. 536 */ 537 538 OSAL_UDELAY(delay); 539 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 540 541 if (p_cmd_elem->b_is_completed) 542 break; 543 544 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 545 if (rc == ECORE_SUCCESS) 546 break; 547 else if (rc != ECORE_AGAIN) 548 goto err; 549 550 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 551 OSAL_MFW_CMD_PREEMPT(p_hwfn); 552 } while (++cnt < max_retries); 553 554 if (cnt >= max_retries) { 555 DP_NOTICE(p_hwfn, false, 556 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 557 p_mb_params->cmd, p_mb_params->param); 558 ecore_mcp_print_cpu_info(p_hwfn, p_ptt); 559 560 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 561 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 562 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 563 564 ecore_mcp_cmd_set_blocking(p_hwfn, true); 565 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); 566 return ECORE_AGAIN; 567 } 568 569 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 570 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 571 572 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 573 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 574 p_mb_params->mcp_resp, p_mb_params->mcp_param, 575 (cnt * delay) / 1000, (cnt * delay) % 1000); 576 577 /* Clear the sequence number from the MFW response */ 578 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 579 580 return ECORE_SUCCESS; 581 582 err: 583 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 584 return rc; 585 } 586 587 static enum _ecore_status_t 588 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 589 struct ecore_ptt *p_ptt, 590 struct ecore_mcp_mb_params *p_mb_params) 591 { 592 osal_size_t union_data_size = sizeof(union drv_union_data); 593 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES; 594 u32 delay = CHIP_MCP_RESP_ITER_US; 595 596 #ifndef ASIC_ONLY 597 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 598 delay = EMUL_MCP_RESP_ITER_US; 599 /* There is a built-in delay of 100usec in each MFW response read */ 600 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 601 max_retries /= 10; 602 #endif 603 604 /* MCP not initialized */ 605 if (!ecore_mcp_is_init(p_hwfn)) { 606 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); 607 return ECORE_BUSY; 608 } 609 610 if (p_mb_params->data_src_size > union_data_size || 611 p_mb_params->data_dst_size > union_data_size) { 612 DP_ERR(p_hwfn, 613 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", 614 p_mb_params->data_src_size, p_mb_params->data_dst_size, 615 union_data_size); 616 return ECORE_INVAL; 617 } 618 619 if (p_hwfn->mcp_info->b_block_cmd) { 620 DP_NOTICE(p_hwfn, false, 621 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", 622 p_mb_params->cmd, p_mb_params->param); 623 return ECORE_ABORTED; 624 } 625 626 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 627 delay); 628 } 629 630 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, 631 struct ecore_ptt *p_ptt, u32 cmd, u32 param, 632 u32 *o_mcp_resp, u32 *o_mcp_param) 633 { 634 struct ecore_mcp_mb_params mb_params; 635 enum _ecore_status_t rc; 636 637 #ifndef ASIC_ONLY 638 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 639 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) { 640 loaded--; 641 loaded_port[p_hwfn->port_id]--; 642 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", 643 loaded); 644 } 645 return ECORE_SUCCESS; 646 } 647 #endif 648 649 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 650 mb_params.cmd = cmd; 651 mb_params.param = param; 652 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 653 if (rc != ECORE_SUCCESS) 654 return rc; 655 656 *o_mcp_resp = mb_params.mcp_resp; 657 *o_mcp_param = mb_params.mcp_param; 658 659 return ECORE_SUCCESS; 660 } 661 662 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, 663 struct ecore_ptt *p_ptt, 664 u32 cmd, 665 u32 param, 666 u32 *o_mcp_resp, 667 u32 *o_mcp_param, 668 u32 i_txn_size, u32 *i_buf) 669 { 670 struct ecore_mcp_mb_params mb_params; 671 enum _ecore_status_t rc; 672 673 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 674 mb_params.cmd = cmd; 675 mb_params.param = param; 676 mb_params.p_data_src = i_buf; 677 mb_params.data_src_size = (u8)i_txn_size; 678 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 679 if (rc != ECORE_SUCCESS) 680 return rc; 681 682 *o_mcp_resp = mb_params.mcp_resp; 683 *o_mcp_param = mb_params.mcp_param; 684 685 return ECORE_SUCCESS; 686 } 687 688 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, 689 struct ecore_ptt *p_ptt, 690 u32 cmd, 691 u32 param, 692 u32 *o_mcp_resp, 693 u32 *o_mcp_param, 694 u32 *o_txn_size, u32 *o_buf) 695 { 696 struct ecore_mcp_mb_params mb_params; 697 u8 raw_data[MCP_DRV_NVM_BUF_LEN]; 698 enum _ecore_status_t rc; 699 700 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 701 mb_params.cmd = cmd; 702 mb_params.param = param; 703 mb_params.p_data_dst = raw_data; 704 705 /* Use the maximal value since the actual one is part of the response */ 706 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; 707 708 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 709 if (rc != ECORE_SUCCESS) 710 return rc; 711 712 *o_mcp_resp = mb_params.mcp_resp; 713 *o_mcp_param = mb_params.mcp_param; 714 715 *o_txn_size = *o_mcp_param; 716 /* @DPDK */ 717 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN)); 718 719 return ECORE_SUCCESS; 720 } 721 722 #ifndef ASIC_ONLY 723 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, 724 u32 *p_load_code) 725 { 726 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 727 728 if (!loaded) 729 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 730 else if (!loaded_port[p_hwfn->port_id]) 731 load_phase = FW_MSG_CODE_DRV_LOAD_PORT; 732 else 733 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION; 734 735 /* On CMT, always tell that it's engine */ 736 if (ECORE_IS_CMT(p_hwfn->p_dev)) 737 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 738 739 *p_load_code = load_phase; 740 loaded++; 741 loaded_port[p_hwfn->port_id]++; 742 743 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 744 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n", 745 *p_load_code, loaded, p_hwfn->port_id, 746 loaded_port[p_hwfn->port_id]); 747 } 748 #endif 749 750 static bool 751 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, 752 enum ecore_override_force_load override_force_load) 753 { 754 bool can_force_load = false; 755 756 switch (override_force_load) { 757 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS: 758 can_force_load = true; 759 break; 760 case ECORE_OVERRIDE_FORCE_LOAD_NEVER: 761 can_force_load = false; 762 break; 763 default: 764 can_force_load = (drv_role == DRV_ROLE_OS && 765 exist_drv_role == DRV_ROLE_PREBOOT) || 766 (drv_role == DRV_ROLE_KDUMP && 767 exist_drv_role == DRV_ROLE_OS); 768 break; 769 } 770 771 return can_force_load; 772 } 773 774 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, 775 struct ecore_ptt *p_ptt) 776 { 777 u32 resp = 0, param = 0; 778 enum _ecore_status_t rc; 779 780 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, 781 &resp, ¶m); 782 if (rc != ECORE_SUCCESS) 783 DP_NOTICE(p_hwfn, false, 784 "Failed to send cancel load request, rc = %d\n", rc); 785 786 return rc; 787 } 788 789 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) 790 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) 791 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) 792 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) 793 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) 794 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) 795 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) 796 797 static u32 ecore_get_config_bitmap(void) 798 { 799 u32 config_bitmap = 0x0; 800 801 #ifdef CONFIG_ECORE_L2 802 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; 803 #endif 804 #ifdef CONFIG_ECORE_SRIOV 805 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; 806 #endif 807 #ifdef CONFIG_ECORE_ROCE 808 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; 809 #endif 810 #ifdef CONFIG_ECORE_IWARP 811 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; 812 #endif 813 #ifdef CONFIG_ECORE_FCOE 814 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; 815 #endif 816 #ifdef CONFIG_ECORE_ISCSI 817 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; 818 #endif 819 #ifdef CONFIG_ECORE_LL2 820 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; 821 #endif 822 823 return config_bitmap; 824 } 825 826 struct ecore_load_req_in_params { 827 u8 hsi_ver; 828 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 829 #define ECORE_LOAD_REQ_HSI_VER_1 1 830 u32 drv_ver_0; 831 u32 drv_ver_1; 832 u32 fw_ver; 833 u8 drv_role; 834 u8 timeout_val; 835 u8 force_cmd; 836 bool avoid_eng_reset; 837 }; 838 839 struct ecore_load_req_out_params { 840 u32 load_code; 841 u32 exist_drv_ver_0; 842 u32 exist_drv_ver_1; 843 u32 exist_fw_ver; 844 u8 exist_drv_role; 845 u8 mfw_hsi_ver; 846 bool drv_exists; 847 }; 848 849 static enum _ecore_status_t 850 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 851 struct ecore_load_req_in_params *p_in_params, 852 struct ecore_load_req_out_params *p_out_params) 853 { 854 struct ecore_mcp_mb_params mb_params; 855 struct load_req_stc load_req; 856 struct load_rsp_stc load_rsp; 857 u32 hsi_ver; 858 enum _ecore_status_t rc; 859 860 OSAL_MEM_ZERO(&load_req, sizeof(load_req)); 861 load_req.drv_ver_0 = p_in_params->drv_ver_0; 862 load_req.drv_ver_1 = p_in_params->drv_ver_1; 863 load_req.fw_ver = p_in_params->fw_ver; 864 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); 865 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, 866 p_in_params->timeout_val); 867 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); 868 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, 869 p_in_params->avoid_eng_reset); 870 871 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? 872 DRV_ID_MCP_HSI_VER_CURRENT : 873 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); 874 875 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 876 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 877 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; 878 mb_params.p_data_src = &load_req; 879 mb_params.data_src_size = sizeof(load_req); 880 mb_params.p_data_dst = &load_rsp; 881 mb_params.data_dst_size = sizeof(load_rsp); 882 883 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 884 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 885 mb_params.param, 886 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), 887 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), 888 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), 889 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); 890 891 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) 892 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 893 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", 894 load_req.drv_ver_0, load_req.drv_ver_1, 895 load_req.fw_ver, load_req.misc0, 896 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), 897 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), 898 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), 899 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); 900 901 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 902 if (rc != ECORE_SUCCESS) { 903 DP_NOTICE(p_hwfn, false, 904 "Failed to send load request, rc = %d\n", rc); 905 return rc; 906 } 907 908 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 909 "Load Response: resp 0x%08x\n", mb_params.mcp_resp); 910 p_out_params->load_code = mb_params.mcp_resp; 911 912 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 913 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 914 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 915 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", 916 load_rsp.drv_ver_0, load_rsp.drv_ver_1, 917 load_rsp.fw_ver, load_rsp.misc0, 918 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), 919 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), 920 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); 921 922 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; 923 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; 924 p_out_params->exist_fw_ver = load_rsp.fw_ver; 925 p_out_params->exist_drv_role = 926 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); 927 p_out_params->mfw_hsi_ver = 928 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); 929 p_out_params->drv_exists = 930 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & 931 LOAD_RSP_FLAGS0_DRV_EXISTS; 932 } 933 934 return ECORE_SUCCESS; 935 } 936 937 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, 938 u8 *p_mfw_drv_role) 939 { 940 switch (drv_role) { 941 case ECORE_DRV_ROLE_OS: 942 *p_mfw_drv_role = DRV_ROLE_OS; 943 break; 944 case ECORE_DRV_ROLE_KDUMP: 945 *p_mfw_drv_role = DRV_ROLE_KDUMP; 946 break; 947 } 948 } 949 950 enum ecore_load_req_force { 951 ECORE_LOAD_REQ_FORCE_NONE, 952 ECORE_LOAD_REQ_FORCE_PF, 953 ECORE_LOAD_REQ_FORCE_ALL, 954 }; 955 956 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, 957 u8 *p_mfw_force_cmd) 958 { 959 switch (force_cmd) { 960 case ECORE_LOAD_REQ_FORCE_NONE: 961 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; 962 break; 963 case ECORE_LOAD_REQ_FORCE_PF: 964 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; 965 break; 966 case ECORE_LOAD_REQ_FORCE_ALL: 967 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; 968 break; 969 } 970 } 971 972 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, 973 struct ecore_ptt *p_ptt, 974 struct ecore_load_req_params *p_params) 975 { 976 struct ecore_load_req_out_params out_params; 977 struct ecore_load_req_in_params in_params; 978 u8 mfw_drv_role = 0, mfw_force_cmd; 979 enum _ecore_status_t rc; 980 981 #ifndef ASIC_ONLY 982 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 983 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code); 984 return ECORE_SUCCESS; 985 } 986 #endif 987 988 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 989 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; 990 in_params.drv_ver_0 = ECORE_VERSION; 991 in_params.drv_ver_1 = ecore_get_config_bitmap(); 992 in_params.fw_ver = STORM_FW_VERSION; 993 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); 994 in_params.drv_role = mfw_drv_role; 995 in_params.timeout_val = p_params->timeout_val; 996 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); 997 in_params.force_cmd = mfw_force_cmd; 998 in_params.avoid_eng_reset = p_params->avoid_eng_reset; 999 1000 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1001 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); 1002 if (rc != ECORE_SUCCESS) 1003 return rc; 1004 1005 /* First handle cases where another load request should/might be sent: 1006 * - MFW expects the old interface [HSI version = 1] 1007 * - MFW responds that a force load request is required 1008 */ 1009 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 1010 DP_INFO(p_hwfn, 1011 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); 1012 1013 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; 1014 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1015 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1016 &out_params); 1017 if (rc != ECORE_SUCCESS) 1018 return rc; 1019 } else if (out_params.load_code == 1020 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { 1021 if (ecore_mcp_can_force_load(in_params.drv_role, 1022 out_params.exist_drv_role, 1023 p_params->override_force_load)) { 1024 DP_INFO(p_hwfn, 1025 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n", 1026 in_params.drv_role, in_params.fw_ver, 1027 in_params.drv_ver_0, in_params.drv_ver_1, 1028 out_params.exist_drv_role, 1029 out_params.exist_fw_ver, 1030 out_params.exist_drv_ver_0, 1031 out_params.exist_drv_ver_1); 1032 1033 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, 1034 &mfw_force_cmd); 1035 1036 in_params.force_cmd = mfw_force_cmd; 1037 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1038 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1039 &out_params); 1040 if (rc != ECORE_SUCCESS) 1041 return rc; 1042 } else { 1043 DP_NOTICE(p_hwfn, false, 1044 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", 1045 in_params.drv_role, in_params.fw_ver, 1046 in_params.drv_ver_0, in_params.drv_ver_1, 1047 out_params.exist_drv_role, 1048 out_params.exist_fw_ver, 1049 out_params.exist_drv_ver_0, 1050 out_params.exist_drv_ver_1); 1051 1052 ecore_mcp_cancel_load_req(p_hwfn, p_ptt); 1053 return ECORE_BUSY; 1054 } 1055 } 1056 1057 /* Now handle the other types of responses. 1058 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not 1059 * expected here after the additional revised load requests were sent. 1060 */ 1061 switch (out_params.load_code) { 1062 case FW_MSG_CODE_DRV_LOAD_ENGINE: 1063 case FW_MSG_CODE_DRV_LOAD_PORT: 1064 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1065 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 1066 out_params.drv_exists) { 1067 /* The role and fw/driver version match, but the PF is 1068 * already loaded and has not been unloaded gracefully. 1069 * This is unexpected since a quasi-FLR request was 1070 * previously sent as part of ecore_hw_prepare(). 1071 */ 1072 DP_NOTICE(p_hwfn, false, 1073 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); 1074 return ECORE_INVAL; 1075 } 1076 break; 1077 default: 1078 DP_NOTICE(p_hwfn, false, 1079 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", 1080 out_params.load_code); 1081 return ECORE_BUSY; 1082 } 1083 1084 p_params->load_code = out_params.load_code; 1085 1086 return ECORE_SUCCESS; 1087 } 1088 1089 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, 1090 struct ecore_ptt *p_ptt) 1091 { 1092 u32 resp = 0, param = 0; 1093 enum _ecore_status_t rc; 1094 1095 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, 1096 ¶m); 1097 if (rc != ECORE_SUCCESS) { 1098 DP_NOTICE(p_hwfn, false, 1099 "Failed to send a LOAD_DONE command, rc = %d\n", rc); 1100 return rc; 1101 } 1102 1103 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 1104 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 1105 DP_NOTICE(p_hwfn, false, 1106 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 1107 1108 return ECORE_SUCCESS; 1109 } 1110 1111 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, 1112 struct ecore_ptt *p_ptt) 1113 { 1114 u32 wol_param, mcp_resp, mcp_param; 1115 1116 /* @DPDK */ 1117 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1118 1119 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1120 &mcp_resp, &mcp_param); 1121 } 1122 1123 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, 1124 struct ecore_ptt *p_ptt) 1125 { 1126 struct ecore_mcp_mb_params mb_params; 1127 struct mcp_mac wol_mac; 1128 1129 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1130 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; 1131 1132 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1133 } 1134 1135 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, 1136 struct ecore_ptt *p_ptt) 1137 { 1138 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1139 PUBLIC_PATH); 1140 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1141 u32 path_addr = SECTION_ADDR(mfw_path_offsize, 1142 ECORE_PATH_ID(p_hwfn)); 1143 u32 disabled_vfs[VF_MAX_STATIC / 32]; 1144 int i; 1145 1146 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1147 "Reading Disabled VF information from [offset %08x]," 1148 " path_addr %08x\n", 1149 mfw_path_offsize, path_addr); 1150 1151 for (i = 0; i < (VF_MAX_STATIC / 32); i++) { 1152 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, 1153 path_addr + 1154 OFFSETOF(struct public_path, 1155 mcp_vf_disabled) + 1156 sizeof(u32) * i); 1157 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1158 "FLR-ed VFs [%08x,...,%08x] - %08x\n", 1159 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); 1160 } 1161 1162 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) 1163 OSAL_VF_FLR_UPDATE(p_hwfn); 1164 } 1165 1166 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, 1167 struct ecore_ptt *p_ptt, 1168 u32 *vfs_to_ack) 1169 { 1170 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1171 PUBLIC_FUNC); 1172 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1173 u32 func_addr = SECTION_ADDR(mfw_func_offsize, 1174 MCP_PF_ID(p_hwfn)); 1175 struct ecore_mcp_mb_params mb_params; 1176 enum _ecore_status_t rc; 1177 int i; 1178 1179 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1180 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1181 "Acking VFs [%08x,...,%08x] - %08x\n", 1182 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); 1183 1184 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1185 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; 1186 mb_params.p_data_src = vfs_to_ack; 1187 mb_params.data_src_size = VF_MAX_STATIC / 8; 1188 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, 1189 &mb_params); 1190 if (rc != ECORE_SUCCESS) { 1191 DP_NOTICE(p_hwfn, false, 1192 "Failed to pass ACK for VF flr to MFW\n"); 1193 return ECORE_TIMEOUT; 1194 } 1195 1196 /* TMP - clear the ACK bits; should be done by MFW */ 1197 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1198 ecore_wr(p_hwfn, p_ptt, 1199 func_addr + 1200 OFFSETOF(struct public_func, drv_ack_vf_disabled) + 1201 i * sizeof(u32), 0); 1202 1203 return rc; 1204 } 1205 1206 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, 1207 struct ecore_ptt *p_ptt) 1208 { 1209 u32 transceiver_state; 1210 1211 transceiver_state = ecore_rd(p_hwfn, p_ptt, 1212 p_hwfn->mcp_info->port_addr + 1213 OFFSETOF(struct public_port, 1214 transceiver_data)); 1215 1216 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), 1217 "Received transceiver state update [0x%08x] from mfw" 1218 " [Addr 0x%x]\n", 1219 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + 1220 OFFSETOF(struct public_port, 1221 transceiver_data))); 1222 1223 transceiver_state = GET_MFW_FIELD(transceiver_state, 1224 ETH_TRANSCEIVER_STATE); 1225 1226 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 1227 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); 1228 else 1229 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); 1230 1231 OSAL_TRANSCEIVER_UPDATE(p_hwfn); 1232 } 1233 1234 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, 1235 struct ecore_ptt *p_ptt, 1236 struct ecore_mcp_link_state *p_link) 1237 { 1238 u32 eee_status, val; 1239 1240 p_link->eee_adv_caps = 0; 1241 p_link->eee_lp_adv_caps = 0; 1242 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1243 OFFSETOF(struct public_port, eee_status)); 1244 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); 1245 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; 1246 if (val & EEE_1G_ADV) 1247 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; 1248 if (val & EEE_10G_ADV) 1249 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; 1250 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; 1251 if (val & EEE_1G_ADV) 1252 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; 1253 if (val & EEE_10G_ADV) 1254 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; 1255 } 1256 1257 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, 1258 struct ecore_ptt *p_ptt, 1259 struct public_func *p_data, 1260 int pfid) 1261 { 1262 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1263 PUBLIC_FUNC); 1264 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1265 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 1266 u32 i, size; 1267 1268 OSAL_MEM_ZERO(p_data, sizeof(*p_data)); 1269 1270 size = OSAL_MIN_T(u32, sizeof(*p_data), 1271 SECTION_SIZE(mfw_path_offsize)); 1272 for (i = 0; i < size / sizeof(u32); i++) 1273 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, 1274 func_addr + (i << 2)); 1275 1276 return size; 1277 } 1278 1279 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, 1280 struct ecore_ptt *p_ptt, 1281 bool b_reset) 1282 { 1283 struct ecore_mcp_link_state *p_link; 1284 u8 max_bw, min_bw; 1285 u32 status = 0; 1286 1287 /* Prevent SW/attentions from doing this at the same time */ 1288 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); 1289 1290 p_link = &p_hwfn->mcp_info->link_output; 1291 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1292 if (!b_reset) { 1293 status = ecore_rd(p_hwfn, p_ptt, 1294 p_hwfn->mcp_info->port_addr + 1295 OFFSETOF(struct public_port, link_status)); 1296 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), 1297 "Received link update [0x%08x] from mfw" 1298 " [Addr 0x%x]\n", 1299 status, (u32)(p_hwfn->mcp_info->port_addr + 1300 OFFSETOF(struct public_port, 1301 link_status))); 1302 } else { 1303 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1304 "Resetting link indications\n"); 1305 goto out; 1306 } 1307 1308 if (p_hwfn->b_drv_link_init) { 1309 /* Link indication with modern MFW arrives as per-PF 1310 * indication. 1311 */ 1312 if (p_hwfn->mcp_info->capabilities & 1313 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { 1314 struct public_func shmem_info; 1315 1316 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1317 MCP_PF_ID(p_hwfn)); 1318 p_link->link_up = !!(shmem_info.status & 1319 FUNC_STATUS_VIRTUAL_LINK_UP); 1320 } else { 1321 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 1322 } 1323 } else { 1324 p_link->link_up = false; 1325 } 1326 1327 p_link->full_duplex = true; 1328 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 1329 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 1330 p_link->speed = 100000; 1331 break; 1332 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 1333 p_link->speed = 50000; 1334 break; 1335 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 1336 p_link->speed = 40000; 1337 break; 1338 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 1339 p_link->speed = 25000; 1340 break; 1341 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 1342 p_link->speed = 20000; 1343 break; 1344 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 1345 p_link->speed = 10000; 1346 break; 1347 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 1348 p_link->full_duplex = false; 1349 /* Fall-through */ 1350 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 1351 p_link->speed = 1000; 1352 break; 1353 default: 1354 p_link->speed = 0; 1355 } 1356 1357 /* We never store total line speed as p_link->speed is 1358 * again changes according to bandwidth allocation. 1359 */ 1360 if (p_link->link_up && p_link->speed) 1361 p_link->line_speed = p_link->speed; 1362 else 1363 p_link->line_speed = 0; 1364 1365 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; 1366 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; 1367 1368 /* Max bandwidth configuration */ 1369 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 1370 p_link, max_bw); 1371 1372 /* Min bandwidth configuration */ 1373 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 1374 p_link, min_bw); 1375 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, 1376 p_link->min_pf_rate); 1377 1378 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 1379 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 1380 p_link->parallel_detection = !!(status & 1381 LINK_STATUS_PARALLEL_DETECTION_USED); 1382 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 1383 1384 p_link->partner_adv_speed |= 1385 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 1386 ECORE_LINK_PARTNER_SPEED_1G_FD : 0; 1387 p_link->partner_adv_speed |= 1388 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 1389 ECORE_LINK_PARTNER_SPEED_1G_HD : 0; 1390 p_link->partner_adv_speed |= 1391 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 1392 ECORE_LINK_PARTNER_SPEED_10G : 0; 1393 p_link->partner_adv_speed |= 1394 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 1395 ECORE_LINK_PARTNER_SPEED_20G : 0; 1396 p_link->partner_adv_speed |= 1397 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? 1398 ECORE_LINK_PARTNER_SPEED_25G : 0; 1399 p_link->partner_adv_speed |= 1400 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 1401 ECORE_LINK_PARTNER_SPEED_40G : 0; 1402 p_link->partner_adv_speed |= 1403 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 1404 ECORE_LINK_PARTNER_SPEED_50G : 0; 1405 p_link->partner_adv_speed |= 1406 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 1407 ECORE_LINK_PARTNER_SPEED_100G : 0; 1408 1409 p_link->partner_tx_flow_ctrl_en = 1410 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 1411 p_link->partner_rx_flow_ctrl_en = 1412 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 1413 1414 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 1415 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 1416 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; 1417 break; 1418 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 1419 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; 1420 break; 1421 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 1422 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; 1423 break; 1424 default: 1425 p_link->partner_adv_pause = 0; 1426 } 1427 1428 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 1429 1430 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) 1431 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); 1432 1433 OSAL_LINK_UPDATE(p_hwfn); 1434 out: 1435 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); 1436 } 1437 1438 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, 1439 struct ecore_ptt *p_ptt, bool b_up) 1440 { 1441 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 1442 struct ecore_mcp_mb_params mb_params; 1443 struct eth_phy_cfg phy_cfg; 1444 enum _ecore_status_t rc = ECORE_SUCCESS; 1445 u32 cmd; 1446 1447 #ifndef ASIC_ONLY 1448 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1449 return ECORE_SUCCESS; 1450 #endif 1451 1452 /* Set the shmem configuration according to params */ 1453 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg)); 1454 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 1455 if (!params->speed.autoneg) 1456 phy_cfg.speed = params->speed.forced_speed; 1457 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; 1458 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; 1459 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1460 phy_cfg.adv_speed = params->speed.advertised_speeds; 1461 phy_cfg.loopback_mode = params->loopback_mode; 1462 1463 /* There are MFWs that share this capability regardless of whether 1464 * this is feasible or not. And given that at the very least adv_caps 1465 * would be set internally by ecore, we want to make sure LFA would 1466 * still work. 1467 */ 1468 if ((p_hwfn->mcp_info->capabilities & 1469 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && 1470 params->eee.enable) { 1471 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1472 if (params->eee.tx_lpi_enable) 1473 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1474 if (params->eee.adv_caps & ECORE_EEE_1G_ADV) 1475 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; 1476 if (params->eee.adv_caps & ECORE_EEE_10G_ADV) 1477 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; 1478 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << 1479 EEE_TX_TIMER_USEC_OFFSET) & 1480 EEE_TX_TIMER_USEC_MASK; 1481 } 1482 1483 p_hwfn->b_drv_link_init = b_up; 1484 1485 if (b_up) 1486 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1487 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n", 1488 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, 1489 phy_cfg.loopback_mode); 1490 else 1491 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); 1492 1493 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1494 mb_params.cmd = cmd; 1495 mb_params.p_data_src = &phy_cfg; 1496 mb_params.data_src_size = sizeof(phy_cfg); 1497 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1498 1499 /* if mcp fails to respond we must abort */ 1500 if (rc != ECORE_SUCCESS) { 1501 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1502 return rc; 1503 } 1504 1505 /* Mimic link-change attention, done for several reasons: 1506 * - On reset, there's no guarantee MFW would trigger 1507 * an attention. 1508 * - On initialization, older MFWs might not indicate link change 1509 * during LFA, so we'll never get an UP indication. 1510 */ 1511 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); 1512 1513 return ECORE_SUCCESS; 1514 } 1515 1516 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, 1517 struct ecore_ptt *p_ptt) 1518 { 1519 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; 1520 1521 /* TODO - Add support for VFs */ 1522 if (IS_VF(p_hwfn->p_dev)) 1523 return ECORE_INVAL; 1524 1525 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1526 PUBLIC_PATH); 1527 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); 1528 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); 1529 1530 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, 1531 path_addr + 1532 OFFSETOF(struct public_path, process_kill)) & 1533 PROCESS_KILL_COUNTER_MASK; 1534 1535 return proc_kill_cnt; 1536 } 1537 1538 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, 1539 struct ecore_ptt *p_ptt) 1540 { 1541 struct ecore_dev *p_dev = p_hwfn->p_dev; 1542 u32 proc_kill_cnt; 1543 1544 /* Prevent possible attentions/interrupts during the recovery handling 1545 * and till its load phase, during which they will be re-enabled. 1546 */ 1547 ecore_int_igu_disable_int(p_hwfn, p_ptt); 1548 1549 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); 1550 1551 /* The following operations should be done once, and thus in CMT mode 1552 * are carried out by only the first HW function. 1553 */ 1554 if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) 1555 return; 1556 1557 if (p_dev->recov_in_prog) { 1558 DP_NOTICE(p_hwfn, false, 1559 "Ignoring the indication since a recovery" 1560 " process is already in progress\n"); 1561 return; 1562 } 1563 1564 p_dev->recov_in_prog = true; 1565 1566 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); 1567 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); 1568 1569 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); 1570 } 1571 1572 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, 1573 struct ecore_ptt *p_ptt, 1574 enum MFW_DRV_MSG_TYPE type) 1575 { 1576 enum ecore_mcp_protocol_type stats_type; 1577 union ecore_mcp_protocol_stats stats; 1578 struct ecore_mcp_mb_params mb_params; 1579 u32 hsi_param; 1580 enum _ecore_status_t rc; 1581 1582 switch (type) { 1583 case MFW_DRV_MSG_GET_LAN_STATS: 1584 stats_type = ECORE_MCP_LAN_STATS; 1585 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; 1586 break; 1587 default: 1588 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1589 "Invalid protocol type %d\n", type); 1590 return; 1591 } 1592 1593 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); 1594 1595 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1596 mb_params.cmd = DRV_MSG_CODE_GET_STATS; 1597 mb_params.param = hsi_param; 1598 mb_params.p_data_src = &stats; 1599 mb_params.data_src_size = sizeof(stats); 1600 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1601 if (rc != ECORE_SUCCESS) 1602 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc); 1603 } 1604 1605 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, 1606 struct public_func *p_shmem_info) 1607 { 1608 struct ecore_mcp_function_info *p_info; 1609 1610 p_info = &p_hwfn->mcp_info->func_info; 1611 1612 /* TODO - bandwidth min/max should have valid values of 1-100, 1613 * as well as some indication that the feature is disabled. 1614 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS 1615 * limit and correct value to min `1' and max `100' if limit isn't in 1616 * range. 1617 */ 1618 p_info->bandwidth_min = (p_shmem_info->config & 1619 FUNC_MF_CFG_MIN_BW_MASK) >> 1620 FUNC_MF_CFG_MIN_BW_OFFSET; 1621 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { 1622 DP_INFO(p_hwfn, 1623 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 1624 p_info->bandwidth_min); 1625 p_info->bandwidth_min = 1; 1626 } 1627 1628 p_info->bandwidth_max = (p_shmem_info->config & 1629 FUNC_MF_CFG_MAX_BW_MASK) >> 1630 FUNC_MF_CFG_MAX_BW_OFFSET; 1631 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { 1632 DP_INFO(p_hwfn, 1633 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 1634 p_info->bandwidth_max); 1635 p_info->bandwidth_max = 100; 1636 } 1637 } 1638 1639 static void 1640 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1641 { 1642 struct ecore_mcp_function_info *p_info; 1643 struct public_func shmem_info; 1644 u32 resp = 0, param = 0; 1645 1646 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1647 1648 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 1649 1650 p_info = &p_hwfn->mcp_info->func_info; 1651 1652 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); 1653 1654 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); 1655 1656 /* Acknowledge the MFW */ 1657 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, 1658 ¶m); 1659 } 1660 1661 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) 1662 { 1663 /* A single notification should be sent to upper driver in CMT mode */ 1664 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1665 return; 1666 1667 DP_NOTICE(p_hwfn, false, 1668 "Fan failure was detected on the network interface card" 1669 " and it's going to be shut down.\n"); 1670 1671 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); 1672 } 1673 1674 struct ecore_mdump_cmd_params { 1675 u32 cmd; 1676 void *p_data_src; 1677 u8 data_src_size; 1678 void *p_data_dst; 1679 u8 data_dst_size; 1680 u32 mcp_resp; 1681 }; 1682 1683 static enum _ecore_status_t 1684 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1685 struct ecore_mdump_cmd_params *p_mdump_cmd_params) 1686 { 1687 struct ecore_mcp_mb_params mb_params; 1688 enum _ecore_status_t rc; 1689 1690 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1691 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; 1692 mb_params.param = p_mdump_cmd_params->cmd; 1693 mb_params.p_data_src = p_mdump_cmd_params->p_data_src; 1694 mb_params.data_src_size = p_mdump_cmd_params->data_src_size; 1695 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; 1696 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; 1697 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1698 if (rc != ECORE_SUCCESS) 1699 return rc; 1700 1701 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; 1702 1703 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { 1704 DP_INFO(p_hwfn, 1705 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", 1706 p_mdump_cmd_params->cmd); 1707 rc = ECORE_NOTIMPL; 1708 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 1709 DP_INFO(p_hwfn, 1710 "The mdump command is not supported by the MFW\n"); 1711 rc = ECORE_NOTIMPL; 1712 } 1713 1714 return rc; 1715 } 1716 1717 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn, 1718 struct ecore_ptt *p_ptt) 1719 { 1720 struct ecore_mdump_cmd_params mdump_cmd_params; 1721 1722 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1723 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; 1724 1725 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1726 } 1727 1728 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, 1729 struct ecore_ptt *p_ptt, 1730 u32 epoch) 1731 { 1732 struct ecore_mdump_cmd_params mdump_cmd_params; 1733 1734 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1735 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES; 1736 mdump_cmd_params.p_data_src = &epoch; 1737 mdump_cmd_params.data_src_size = sizeof(epoch); 1738 1739 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1740 } 1741 1742 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, 1743 struct ecore_ptt *p_ptt) 1744 { 1745 struct ecore_mdump_cmd_params mdump_cmd_params; 1746 1747 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1748 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER; 1749 1750 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1751 } 1752 1753 static enum _ecore_status_t 1754 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1755 struct mdump_config_stc *p_mdump_config) 1756 { 1757 struct ecore_mdump_cmd_params mdump_cmd_params; 1758 enum _ecore_status_t rc; 1759 1760 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1761 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG; 1762 mdump_cmd_params.p_data_dst = p_mdump_config; 1763 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config); 1764 1765 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1766 if (rc != ECORE_SUCCESS) 1767 return rc; 1768 1769 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1770 DP_INFO(p_hwfn, 1771 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n", 1772 mdump_cmd_params.mcp_resp); 1773 rc = ECORE_UNKNOWN_ERROR; 1774 } 1775 1776 return rc; 1777 } 1778 1779 enum _ecore_status_t 1780 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1781 struct ecore_mdump_info *p_mdump_info) 1782 { 1783 u32 addr, global_offsize, global_addr; 1784 struct mdump_config_stc mdump_config; 1785 enum _ecore_status_t rc; 1786 1787 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info)); 1788 1789 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1790 PUBLIC_GLOBAL); 1791 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1792 global_addr = SECTION_ADDR(global_offsize, 0); 1793 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt, 1794 global_addr + 1795 OFFSETOF(struct public_global, 1796 mdump_reason)); 1797 1798 if (p_mdump_info->reason) { 1799 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config); 1800 if (rc != ECORE_SUCCESS) 1801 return rc; 1802 1803 p_mdump_info->version = mdump_config.version; 1804 p_mdump_info->config = mdump_config.config; 1805 p_mdump_info->epoch = mdump_config.epoc; 1806 p_mdump_info->num_of_logs = mdump_config.num_of_logs; 1807 p_mdump_info->valid_logs = mdump_config.valid_logs; 1808 1809 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1810 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n", 1811 p_mdump_info->reason, p_mdump_info->version, 1812 p_mdump_info->config, p_mdump_info->epoch, 1813 p_mdump_info->num_of_logs, p_mdump_info->valid_logs); 1814 } else { 1815 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1816 "MFW mdump info: reason %d\n", p_mdump_info->reason); 1817 } 1818 1819 return ECORE_SUCCESS; 1820 } 1821 1822 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, 1823 struct ecore_ptt *p_ptt) 1824 { 1825 struct ecore_mdump_cmd_params mdump_cmd_params; 1826 1827 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1828 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS; 1829 1830 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1831 } 1832 1833 enum _ecore_status_t 1834 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1835 struct ecore_mdump_retain_data *p_mdump_retain) 1836 { 1837 struct ecore_mdump_cmd_params mdump_cmd_params; 1838 struct mdump_retain_data_stc mfw_mdump_retain; 1839 enum _ecore_status_t rc; 1840 1841 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1842 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; 1843 mdump_cmd_params.p_data_dst = &mfw_mdump_retain; 1844 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain); 1845 1846 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1847 if (rc != ECORE_SUCCESS) 1848 return rc; 1849 1850 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1851 DP_INFO(p_hwfn, 1852 "Failed to get the mdump retained data [mcp_resp 0x%x]\n", 1853 mdump_cmd_params.mcp_resp); 1854 return ECORE_UNKNOWN_ERROR; 1855 } 1856 1857 p_mdump_retain->valid = mfw_mdump_retain.valid; 1858 p_mdump_retain->epoch = mfw_mdump_retain.epoch; 1859 p_mdump_retain->pf = mfw_mdump_retain.pf; 1860 p_mdump_retain->status = mfw_mdump_retain.status; 1861 1862 return ECORE_SUCCESS; 1863 } 1864 1865 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, 1866 struct ecore_ptt *p_ptt) 1867 { 1868 struct ecore_mdump_cmd_params mdump_cmd_params; 1869 1870 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1871 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN; 1872 1873 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1874 } 1875 1876 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, 1877 struct ecore_ptt *p_ptt) 1878 { 1879 struct ecore_mdump_retain_data mdump_retain; 1880 enum _ecore_status_t rc; 1881 1882 /* In CMT mode - no need for more than a single acknowledgment to the 1883 * MFW, and no more than a single notification to the upper driver. 1884 */ 1885 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1886 return; 1887 1888 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); 1889 if (rc == ECORE_SUCCESS && mdump_retain.valid) { 1890 DP_NOTICE(p_hwfn, false, 1891 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", 1892 mdump_retain.epoch, mdump_retain.pf, 1893 mdump_retain.status); 1894 } else { 1895 DP_NOTICE(p_hwfn, false, 1896 "The MFW notified that a critical error occurred in the device\n"); 1897 } 1898 1899 if (p_hwfn->p_dev->allow_mdump) { 1900 DP_NOTICE(p_hwfn, false, 1901 "Not acknowledging the notification to allow the MFW crash dump\n"); 1902 return; 1903 } 1904 1905 DP_NOTICE(p_hwfn, false, 1906 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); 1907 ecore_mcp_mdump_ack(p_hwfn, p_ptt); 1908 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 1909 } 1910 1911 void 1912 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1913 { 1914 struct public_func shmem_info; 1915 u32 port_cfg, val; 1916 1917 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) 1918 return; 1919 1920 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); 1921 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1922 OFFSETOF(struct public_port, oem_cfg_port)); 1923 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); 1924 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) 1925 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", 1926 val); 1927 1928 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); 1929 if (val == OEM_CFG_SCHED_TYPE_ETS) 1930 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; 1931 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) 1932 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; 1933 else 1934 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", 1935 val); 1936 1937 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1938 MCP_PF_ID(p_hwfn)); 1939 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); 1940 p_hwfn->ufp_info.tc = (u8)val; 1941 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, 1942 OEM_CFG_FUNC_HOST_PRI_CTRL); 1943 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) 1944 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; 1945 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) 1946 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; 1947 else 1948 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", 1949 val); 1950 1951 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 1952 "UFP shmem config: mode = %d tc = %d pri_type = %d\n", 1953 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, 1954 p_hwfn->ufp_info.pri_type); 1955 } 1956 1957 static enum _ecore_status_t 1958 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1959 { 1960 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 1961 1962 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { 1963 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; 1964 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; 1965 1966 ecore_qm_reconf(p_hwfn, p_ptt); 1967 } else { 1968 /* Merge UFP TC with the dcbx TC data */ 1969 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 1970 ECORE_DCBX_OPERATIONAL_MIB); 1971 } 1972 1973 /* update storm FW with negotiation results */ 1974 ecore_sp_pf_update_ufp(p_hwfn); 1975 1976 return ECORE_SUCCESS; 1977 } 1978 1979 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, 1980 struct ecore_ptt *p_ptt) 1981 { 1982 struct ecore_mcp_info *info = p_hwfn->mcp_info; 1983 enum _ecore_status_t rc = ECORE_SUCCESS; 1984 bool found = false; 1985 u16 i; 1986 1987 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); 1988 1989 /* Read Messages from MFW */ 1990 ecore_mcp_read_mb(p_hwfn, p_ptt); 1991 1992 /* Compare current messages to old ones */ 1993 for (i = 0; i < info->mfw_mb_length; i++) { 1994 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 1995 continue; 1996 1997 found = true; 1998 1999 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 2000 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 2001 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 2002 2003 switch (i) { 2004 case MFW_DRV_MSG_LINK_CHANGE: 2005 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); 2006 break; 2007 case MFW_DRV_MSG_VF_DISABLED: 2008 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); 2009 break; 2010 case MFW_DRV_MSG_LLDP_DATA_UPDATED: 2011 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2012 ECORE_DCBX_REMOTE_LLDP_MIB); 2013 break; 2014 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: 2015 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2016 ECORE_DCBX_REMOTE_MIB); 2017 break; 2018 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 2019 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2020 ECORE_DCBX_OPERATIONAL_MIB); 2021 /* clear the user-config cache */ 2022 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, 2023 sizeof(struct ecore_dcbx_set)); 2024 break; 2025 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: 2026 ecore_lldp_mib_update_event(p_hwfn, p_ptt); 2027 break; 2028 case MFW_DRV_MSG_OEM_CFG_UPDATE: 2029 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); 2030 break; 2031 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 2032 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); 2033 break; 2034 case MFW_DRV_MSG_ERROR_RECOVERY: 2035 ecore_mcp_handle_process_kill(p_hwfn, p_ptt); 2036 break; 2037 case MFW_DRV_MSG_GET_LAN_STATS: 2038 case MFW_DRV_MSG_GET_FCOE_STATS: 2039 case MFW_DRV_MSG_GET_ISCSI_STATS: 2040 case MFW_DRV_MSG_GET_RDMA_STATS: 2041 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); 2042 break; 2043 case MFW_DRV_MSG_BW_UPDATE: 2044 ecore_mcp_update_bw(p_hwfn, p_ptt); 2045 break; 2046 case MFW_DRV_MSG_FAILURE_DETECTED: 2047 ecore_mcp_handle_fan_failure(p_hwfn); 2048 break; 2049 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: 2050 ecore_mcp_handle_critical_error(p_hwfn, p_ptt); 2051 break; 2052 default: 2053 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); 2054 rc = ECORE_INVAL; 2055 } 2056 } 2057 2058 /* ACK everything */ 2059 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 2060 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); 2061 2062 /* MFW expect answer in BE, so we force write in that format */ 2063 ecore_wr(p_hwfn, p_ptt, 2064 info->mfw_mb_addr + sizeof(u32) + 2065 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 2066 sizeof(u32) + i * sizeof(u32), val); 2067 } 2068 2069 if (!found) { 2070 DP_NOTICE(p_hwfn, false, 2071 "Received an MFW message indication but no" 2072 " new message!\n"); 2073 rc = ECORE_INVAL; 2074 } 2075 2076 /* Copy the new mfw messages into the shadow */ 2077 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 2078 2079 return rc; 2080 } 2081 2082 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, 2083 struct ecore_ptt *p_ptt, 2084 u32 *p_mfw_ver, 2085 u32 *p_running_bundle_id) 2086 { 2087 u32 global_offsize; 2088 2089 #ifndef ASIC_ONLY 2090 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2091 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n"); 2092 return ECORE_SUCCESS; 2093 } 2094 #endif 2095 2096 if (IS_VF(p_hwfn->p_dev)) { 2097 if (p_hwfn->vf_iov_info) { 2098 struct pfvf_acquire_resp_tlv *p_resp; 2099 2100 p_resp = &p_hwfn->vf_iov_info->acquire_resp; 2101 *p_mfw_ver = p_resp->pfdev_info.mfw_ver; 2102 return ECORE_SUCCESS; 2103 } else { 2104 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2105 "VF requested MFW version prior to ACQUIRE\n"); 2106 return ECORE_INVAL; 2107 } 2108 } 2109 2110 global_offsize = ecore_rd(p_hwfn, p_ptt, 2111 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> 2112 public_base, 2113 PUBLIC_GLOBAL)); 2114 *p_mfw_ver = 2115 ecore_rd(p_hwfn, p_ptt, 2116 SECTION_ADDR(global_offsize, 2117 0) + OFFSETOF(struct public_global, mfw_ver)); 2118 2119 if (p_running_bundle_id != OSAL_NULL) { 2120 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, 2121 SECTION_ADDR(global_offsize, 2122 0) + 2123 OFFSETOF(struct public_global, 2124 running_bundle_id)); 2125 } 2126 2127 return ECORE_SUCCESS; 2128 } 2129 2130 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, 2131 struct ecore_ptt *p_ptt, 2132 u32 *p_media_type) 2133 { 2134 enum _ecore_status_t rc = ECORE_SUCCESS; 2135 2136 /* TODO - Add support for VFs */ 2137 if (IS_VF(p_hwfn->p_dev)) 2138 return ECORE_INVAL; 2139 2140 if (!ecore_mcp_is_init(p_hwfn)) { 2141 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); 2142 return ECORE_BUSY; 2143 } 2144 2145 if (!p_ptt) { 2146 *p_media_type = MEDIA_UNSPECIFIED; 2147 rc = ECORE_INVAL; 2148 } else { 2149 *p_media_type = ecore_rd(p_hwfn, p_ptt, 2150 p_hwfn->mcp_info->port_addr + 2151 OFFSETOF(struct public_port, 2152 media_type)); 2153 } 2154 2155 return ECORE_SUCCESS; 2156 } 2157 2158 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, 2159 struct ecore_ptt *p_ptt, 2160 u32 *p_tranceiver_type) 2161 { 2162 enum _ecore_status_t rc = ECORE_SUCCESS; 2163 2164 /* TODO - Add support for VFs */ 2165 if (IS_VF(p_hwfn->p_dev)) 2166 return ECORE_INVAL; 2167 2168 if (!ecore_mcp_is_init(p_hwfn)) { 2169 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); 2170 return ECORE_BUSY; 2171 } 2172 if (!p_ptt) { 2173 *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE; 2174 rc = ECORE_INVAL; 2175 } else { 2176 *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt, 2177 p_hwfn->mcp_info->port_addr + 2178 offsetof(struct public_port, 2179 transceiver_data)); 2180 } 2181 2182 return rc; 2183 } 2184 2185 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) 2186 { 2187 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && 2188 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && 2189 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) 2190 return 1; 2191 2192 return 0; 2193 } 2194 2195 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, 2196 struct ecore_ptt *p_ptt, 2197 u32 *p_speed_mask) 2198 { 2199 u32 transceiver_data, transceiver_type, transceiver_state; 2200 2201 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data); 2202 2203 transceiver_state = GET_MFW_FIELD(transceiver_data, 2204 ETH_TRANSCEIVER_STATE); 2205 2206 transceiver_type = GET_MFW_FIELD(transceiver_data, 2207 ETH_TRANSCEIVER_TYPE); 2208 2209 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) 2210 return ECORE_INVAL; 2211 2212 switch (transceiver_type) { 2213 case ETH_TRANSCEIVER_TYPE_1G_LX: 2214 case ETH_TRANSCEIVER_TYPE_1G_SX: 2215 case ETH_TRANSCEIVER_TYPE_1G_PCC: 2216 case ETH_TRANSCEIVER_TYPE_1G_ACC: 2217 case ETH_TRANSCEIVER_TYPE_1000BASET: 2218 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2219 break; 2220 2221 case ETH_TRANSCEIVER_TYPE_10G_SR: 2222 case ETH_TRANSCEIVER_TYPE_10G_LR: 2223 case ETH_TRANSCEIVER_TYPE_10G_LRM: 2224 case ETH_TRANSCEIVER_TYPE_10G_ER: 2225 case ETH_TRANSCEIVER_TYPE_10G_PCC: 2226 case ETH_TRANSCEIVER_TYPE_10G_ACC: 2227 case ETH_TRANSCEIVER_TYPE_4x10G: 2228 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2229 break; 2230 2231 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2232 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2233 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2234 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2235 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2236 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2237 break; 2238 2239 case ETH_TRANSCEIVER_TYPE_100G_AOC: 2240 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2241 case ETH_TRANSCEIVER_TYPE_100G_LR4: 2242 case ETH_TRANSCEIVER_TYPE_100G_ER4: 2243 case ETH_TRANSCEIVER_TYPE_100G_ACC: 2244 *p_speed_mask = 2245 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2246 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2247 break; 2248 2249 case ETH_TRANSCEIVER_TYPE_25G_SR: 2250 case ETH_TRANSCEIVER_TYPE_25G_LR: 2251 case ETH_TRANSCEIVER_TYPE_25G_AOC: 2252 case ETH_TRANSCEIVER_TYPE_25G_ACC_S: 2253 case ETH_TRANSCEIVER_TYPE_25G_ACC_M: 2254 case ETH_TRANSCEIVER_TYPE_25G_ACC_L: 2255 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2256 break; 2257 2258 case ETH_TRANSCEIVER_TYPE_25G_CA_N: 2259 case ETH_TRANSCEIVER_TYPE_25G_CA_S: 2260 case ETH_TRANSCEIVER_TYPE_25G_CA_L: 2261 case ETH_TRANSCEIVER_TYPE_4x25G_CR: 2262 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2263 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2264 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2265 break; 2266 2267 case ETH_TRANSCEIVER_TYPE_40G_CR4: 2268 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 2269 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2270 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2271 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2272 break; 2273 2274 case ETH_TRANSCEIVER_TYPE_100G_CR4: 2275 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 2276 *p_speed_mask = 2277 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2278 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | 2279 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2280 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2281 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | 2282 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2283 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2284 break; 2285 2286 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2287 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2288 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: 2289 *p_speed_mask = 2290 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2291 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2292 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2293 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2294 break; 2295 2296 case ETH_TRANSCEIVER_TYPE_XLPPI: 2297 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2298 break; 2299 2300 case ETH_TRANSCEIVER_TYPE_10G_BASET: 2301 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2302 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2303 break; 2304 2305 default: 2306 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", 2307 transceiver_type); 2308 *p_speed_mask = 0xff; 2309 break; 2310 } 2311 2312 return ECORE_SUCCESS; 2313 } 2314 2315 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, 2316 struct ecore_ptt *p_ptt, 2317 u32 *p_board_config) 2318 { 2319 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; 2320 enum _ecore_status_t rc = ECORE_SUCCESS; 2321 2322 /* TODO - Add support for VFs */ 2323 if (IS_VF(p_hwfn->p_dev)) 2324 return ECORE_INVAL; 2325 2326 if (!ecore_mcp_is_init(p_hwfn)) { 2327 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); 2328 return ECORE_BUSY; 2329 } 2330 if (!p_ptt) { 2331 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 2332 rc = ECORE_INVAL; 2333 } else { 2334 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, 2335 MISC_REG_GEN_PURP_CR0); 2336 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, 2337 nvm_cfg_addr + 4); 2338 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2339 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 2340 *p_board_config = ecore_rd(p_hwfn, p_ptt, 2341 port_cfg_addr + 2342 offsetof(struct nvm_cfg1_port, 2343 board_cfg)); 2344 } 2345 2346 return rc; 2347 } 2348 2349 /* @DPDK */ 2350 /* Old MFW has a global configuration for all PFs regarding RDMA support */ 2351 static void 2352 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn, 2353 enum ecore_pci_personality *p_proto) 2354 { 2355 *p_proto = ECORE_PCI_ETH; 2356 2357 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2358 "According to Legacy capabilities, L2 personality is %08x\n", 2359 (u32)*p_proto); 2360 } 2361 2362 /* @DPDK */ 2363 static enum _ecore_status_t 2364 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn, 2365 struct ecore_ptt *p_ptt, 2366 enum ecore_pci_personality *p_proto) 2367 { 2368 u32 resp = 0, param = 0; 2369 enum _ecore_status_t rc; 2370 2371 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2372 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", 2373 (u32)*p_proto, resp, param); 2374 return ECORE_SUCCESS; 2375 } 2376 2377 static enum _ecore_status_t 2378 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, 2379 struct public_func *p_info, 2380 struct ecore_ptt *p_ptt, 2381 enum ecore_pci_personality *p_proto) 2382 { 2383 enum _ecore_status_t rc = ECORE_SUCCESS; 2384 2385 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 2386 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 2387 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) != 2388 ECORE_SUCCESS) 2389 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); 2390 break; 2391 default: 2392 rc = ECORE_INVAL; 2393 } 2394 2395 return rc; 2396 } 2397 2398 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, 2399 struct ecore_ptt *p_ptt) 2400 { 2401 struct ecore_mcp_function_info *info; 2402 struct public_func shmem_info; 2403 2404 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 2405 info = &p_hwfn->mcp_info->func_info; 2406 2407 info->pause_on_host = (shmem_info.config & 2408 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 2409 2410 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2411 &info->protocol)) { 2412 DP_ERR(p_hwfn, "Unknown personality %08x\n", 2413 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 2414 return ECORE_INVAL; 2415 } 2416 2417 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 2418 2419 if (shmem_info.mac_upper || shmem_info.mac_lower) { 2420 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 2421 info->mac[1] = (u8)(shmem_info.mac_upper); 2422 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 2423 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 2424 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 2425 info->mac[5] = (u8)(shmem_info.mac_lower); 2426 } else { 2427 /* TODO - are there protocols for which there's no MAC? */ 2428 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); 2429 } 2430 2431 /* TODO - are these calculations true for BE machine? */ 2432 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | 2433 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); 2434 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | 2435 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); 2436 2437 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 2438 2439 info->mtu = (u16)shmem_info.mtu_size; 2440 2441 if (info->mtu == 0) 2442 info->mtu = 1500; 2443 2444 info->mtu = (u16)shmem_info.mtu_size; 2445 2446 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), 2447 "Read configuration from shmem: pause_on_host %02x" 2448 " protocol %02x BW [%02x - %02x]" 2449 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx" 2450 " node %lx ovlan %04x\n", 2451 info->pause_on_host, info->protocol, 2452 info->bandwidth_min, info->bandwidth_max, 2453 info->mac[0], info->mac[1], info->mac[2], 2454 info->mac[3], info->mac[4], info->mac[5], 2455 (unsigned long)info->wwn_port, 2456 (unsigned long)info->wwn_node, info->ovlan); 2457 2458 return ECORE_SUCCESS; 2459 } 2460 2461 struct ecore_mcp_link_params 2462 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) 2463 { 2464 if (!p_hwfn || !p_hwfn->mcp_info) 2465 return OSAL_NULL; 2466 return &p_hwfn->mcp_info->link_input; 2467 } 2468 2469 struct ecore_mcp_link_state 2470 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) 2471 { 2472 if (!p_hwfn || !p_hwfn->mcp_info) 2473 return OSAL_NULL; 2474 2475 #ifndef ASIC_ONLY 2476 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 2477 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); 2478 p_hwfn->mcp_info->link_output.link_up = true; 2479 } 2480 #endif 2481 2482 return &p_hwfn->mcp_info->link_output; 2483 } 2484 2485 struct ecore_mcp_link_capabilities 2486 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) 2487 { 2488 if (!p_hwfn || !p_hwfn->mcp_info) 2489 return OSAL_NULL; 2490 return &p_hwfn->mcp_info->link_capabilities; 2491 } 2492 2493 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, 2494 struct ecore_ptt *p_ptt) 2495 { 2496 u32 resp = 0, param = 0; 2497 enum _ecore_status_t rc; 2498 2499 rc = ecore_mcp_cmd(p_hwfn, p_ptt, 2500 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); 2501 2502 /* Wait for the drain to complete before returning */ 2503 OSAL_MSLEEP(1020); 2504 2505 return rc; 2506 } 2507 2508 const struct ecore_mcp_function_info 2509 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) 2510 { 2511 if (!p_hwfn || !p_hwfn->mcp_info) 2512 return OSAL_NULL; 2513 return &p_hwfn->mcp_info->func_info; 2514 } 2515 2516 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, 2517 struct ecore_ptt *p_ptt, u32 personalities) 2518 { 2519 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; 2520 struct public_func shmem_info; 2521 int i, count = 0, num_pfs; 2522 2523 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); 2524 2525 for (i = 0; i < num_pfs; i++) { 2526 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 2527 MCP_PF_ID_BY_REL(p_hwfn, i)); 2528 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) 2529 continue; 2530 2531 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2532 &protocol) != 2533 ECORE_SUCCESS) 2534 continue; 2535 2536 if ((1 << ((u32)protocol)) & personalities) 2537 count++; 2538 } 2539 2540 return count; 2541 } 2542 2543 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, 2544 struct ecore_ptt *p_ptt, 2545 u32 *p_flash_size) 2546 { 2547 u32 flash_size; 2548 2549 #ifndef ASIC_ONLY 2550 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2551 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n"); 2552 return ECORE_INVAL; 2553 } 2554 #endif 2555 2556 if (IS_VF(p_hwfn->p_dev)) 2557 return ECORE_INVAL; 2558 2559 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 2560 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 2561 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 2562 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); 2563 2564 *p_flash_size = flash_size; 2565 2566 return ECORE_SUCCESS; 2567 } 2568 2569 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, 2570 struct ecore_ptt *p_ptt) 2571 { 2572 struct ecore_dev *p_dev = p_hwfn->p_dev; 2573 2574 if (p_dev->recov_in_prog) { 2575 DP_NOTICE(p_hwfn, false, 2576 "Avoid triggering a recovery since such a process" 2577 " is already in progress\n"); 2578 return ECORE_AGAIN; 2579 } 2580 2581 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); 2582 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); 2583 2584 return ECORE_SUCCESS; 2585 } 2586 2587 static enum _ecore_status_t 2588 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, 2589 struct ecore_ptt *p_ptt, 2590 u8 vf_id, u8 num) 2591 { 2592 u32 resp = 0, param = 0, rc_param = 0; 2593 enum _ecore_status_t rc; 2594 2595 /* Only Leader can configure MSIX, and need to take CMT into account */ 2596 2597 if (!IS_LEAD_HWFN(p_hwfn)) 2598 return ECORE_SUCCESS; 2599 num *= p_hwfn->p_dev->num_hwfns; 2600 2601 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & 2602 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; 2603 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & 2604 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; 2605 2606 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, 2607 &resp, &rc_param); 2608 2609 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { 2610 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", 2611 vf_id); 2612 rc = ECORE_INVAL; 2613 } else { 2614 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2615 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", 2616 num, vf_id); 2617 } 2618 2619 return rc; 2620 } 2621 2622 static enum _ecore_status_t 2623 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, 2624 struct ecore_ptt *p_ptt, 2625 u8 num) 2626 { 2627 u32 resp = 0, param = num, rc_param = 0; 2628 enum _ecore_status_t rc; 2629 2630 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, 2631 param, &resp, &rc_param); 2632 2633 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { 2634 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); 2635 rc = ECORE_INVAL; 2636 } else { 2637 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2638 "Requested 0x%02x MSI-x interrupts for VFs\n", 2639 num); 2640 } 2641 2642 return rc; 2643 } 2644 2645 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, 2646 struct ecore_ptt *p_ptt, 2647 u8 vf_id, u8 num) 2648 { 2649 if (ECORE_IS_BB(p_hwfn->p_dev)) 2650 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); 2651 else 2652 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); 2653 } 2654 2655 enum _ecore_status_t 2656 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2657 struct ecore_mcp_drv_version *p_ver) 2658 { 2659 struct ecore_mcp_mb_params mb_params; 2660 struct drv_version_stc drv_version; 2661 u32 num_words, i; 2662 void *p_name; 2663 OSAL_BE32 val; 2664 enum _ecore_status_t rc; 2665 2666 #ifndef ASIC_ONLY 2667 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 2668 return ECORE_SUCCESS; 2669 #endif 2670 2671 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version)); 2672 drv_version.version = p_ver->version; 2673 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; 2674 for (i = 0; i < num_words; i++) { 2675 /* The driver name is expected to be in a big-endian format */ 2676 p_name = &p_ver->name[i * sizeof(u32)]; 2677 val = OSAL_CPU_TO_BE32(*(u32 *)p_name); 2678 *(u32 *)&drv_version.name[i * sizeof(u32)] = val; 2679 } 2680 2681 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 2682 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 2683 mb_params.p_data_src = &drv_version; 2684 mb_params.data_src_size = sizeof(drv_version); 2685 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2686 if (rc != ECORE_SUCCESS) 2687 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2688 2689 return rc; 2690 } 2691 2692 /* A maximal 100 msec waiting time for the MCP to halt */ 2693 #define ECORE_MCP_HALT_SLEEP_MS 10 2694 #define ECORE_MCP_HALT_MAX_RETRIES 10 2695 2696 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, 2697 struct ecore_ptt *p_ptt) 2698 { 2699 u32 resp = 0, param = 0, cpu_state, cnt = 0; 2700 enum _ecore_status_t rc; 2701 2702 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2703 ¶m); 2704 if (rc != ECORE_SUCCESS) { 2705 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2706 return rc; 2707 } 2708 2709 do { 2710 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); 2711 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2712 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) 2713 break; 2714 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); 2715 2716 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { 2717 DP_NOTICE(p_hwfn, false, 2718 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2719 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); 2720 return ECORE_BUSY; 2721 } 2722 2723 ecore_mcp_cmd_set_blocking(p_hwfn, true); 2724 2725 return ECORE_SUCCESS; 2726 } 2727 2728 #define ECORE_MCP_RESUME_SLEEP_MS 10 2729 2730 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, 2731 struct ecore_ptt *p_ptt) 2732 { 2733 u32 cpu_mode, cpu_state; 2734 2735 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2736 2737 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2738 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; 2739 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); 2740 2741 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); 2742 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2743 2744 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { 2745 DP_NOTICE(p_hwfn, false, 2746 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2747 cpu_mode, cpu_state); 2748 return ECORE_BUSY; 2749 } 2750 2751 ecore_mcp_cmd_set_blocking(p_hwfn, false); 2752 2753 return ECORE_SUCCESS; 2754 } 2755 2756 enum _ecore_status_t 2757 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, 2758 struct ecore_ptt *p_ptt, 2759 enum ecore_ov_client client) 2760 { 2761 u32 resp = 0, param = 0; 2762 u32 drv_mb_param; 2763 enum _ecore_status_t rc; 2764 2765 switch (client) { 2766 case ECORE_OV_CLIENT_DRV: 2767 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; 2768 break; 2769 case ECORE_OV_CLIENT_USER: 2770 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; 2771 break; 2772 case ECORE_OV_CLIENT_VENDOR_SPEC: 2773 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; 2774 break; 2775 default: 2776 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client); 2777 return ECORE_INVAL; 2778 } 2779 2780 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, 2781 drv_mb_param, &resp, ¶m); 2782 if (rc != ECORE_SUCCESS) 2783 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2784 2785 return rc; 2786 } 2787 2788 enum _ecore_status_t 2789 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, 2790 struct ecore_ptt *p_ptt, 2791 enum ecore_ov_driver_state drv_state) 2792 { 2793 u32 resp = 0, param = 0; 2794 u32 drv_mb_param; 2795 enum _ecore_status_t rc; 2796 2797 switch (drv_state) { 2798 case ECORE_OV_DRIVER_STATE_NOT_LOADED: 2799 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; 2800 break; 2801 case ECORE_OV_DRIVER_STATE_DISABLED: 2802 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; 2803 break; 2804 case ECORE_OV_DRIVER_STATE_ACTIVE: 2805 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; 2806 break; 2807 default: 2808 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); 2809 return ECORE_INVAL; 2810 } 2811 2812 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, 2813 drv_mb_param, &resp, ¶m); 2814 if (rc != ECORE_SUCCESS) 2815 DP_ERR(p_hwfn, "Failed to send driver state\n"); 2816 2817 return rc; 2818 } 2819 2820 enum _ecore_status_t 2821 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2822 struct ecore_fc_npiv_tbl *p_table) 2823 { 2824 return 0; 2825 } 2826 2827 enum _ecore_status_t 2828 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, 2829 struct ecore_ptt *p_ptt, u16 mtu) 2830 { 2831 return 0; 2832 } 2833 2834 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, 2835 struct ecore_ptt *p_ptt, 2836 enum ecore_led_mode mode) 2837 { 2838 u32 resp = 0, param = 0, drv_mb_param; 2839 enum _ecore_status_t rc; 2840 2841 switch (mode) { 2842 case ECORE_LED_MODE_ON: 2843 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 2844 break; 2845 case ECORE_LED_MODE_OFF: 2846 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 2847 break; 2848 case ECORE_LED_MODE_RESTORE: 2849 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 2850 break; 2851 default: 2852 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); 2853 return ECORE_INVAL; 2854 } 2855 2856 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 2857 drv_mb_param, &resp, ¶m); 2858 if (rc != ECORE_SUCCESS) 2859 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2860 2861 return rc; 2862 } 2863 2864 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, 2865 struct ecore_ptt *p_ptt, 2866 u32 mask_parities) 2867 { 2868 u32 resp = 0, param = 0; 2869 enum _ecore_status_t rc; 2870 2871 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, 2872 mask_parities, &resp, ¶m); 2873 2874 if (rc != ECORE_SUCCESS) { 2875 DP_ERR(p_hwfn, 2876 "MCP response failure for mask parities, aborting\n"); 2877 } else if (resp != FW_MSG_CODE_OK) { 2878 DP_ERR(p_hwfn, 2879 "MCP did not ack mask parity request. Old MFW?\n"); 2880 rc = ECORE_INVAL; 2881 } 2882 2883 return rc; 2884 } 2885 2886 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, 2887 u8 *p_buf, u32 len) 2888 { 2889 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2890 u32 bytes_left, offset, bytes_to_copy, buf_size; 2891 u32 nvm_offset, resp, param; 2892 struct ecore_ptt *p_ptt; 2893 enum _ecore_status_t rc = ECORE_SUCCESS; 2894 2895 p_ptt = ecore_ptt_acquire(p_hwfn); 2896 if (!p_ptt) 2897 return ECORE_BUSY; 2898 2899 bytes_left = len; 2900 offset = 0; 2901 while (bytes_left > 0) { 2902 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 2903 MCP_DRV_NVM_BUF_LEN); 2904 nvm_offset = (addr + offset) | (bytes_to_copy << 2905 DRV_MB_PARAM_NVM_LEN_OFFSET); 2906 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2907 DRV_MSG_CODE_NVM_READ_NVRAM, 2908 nvm_offset, &resp, ¶m, &buf_size, 2909 (u32 *)(p_buf + offset)); 2910 if (rc != ECORE_SUCCESS) { 2911 DP_NOTICE(p_dev, false, 2912 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", 2913 rc); 2914 resp = FW_MSG_CODE_ERROR; 2915 break; 2916 } 2917 2918 if (resp != FW_MSG_CODE_NVM_OK) { 2919 DP_NOTICE(p_dev, false, 2920 "nvm read failed, resp = 0x%08x\n", resp); 2921 rc = ECORE_UNKNOWN_ERROR; 2922 break; 2923 } 2924 2925 /* This can be a lengthy process, and it's possible scheduler 2926 * isn't preemptible. Sleep a bit to prevent CPU hogging. 2927 */ 2928 if (bytes_left % 0x1000 < 2929 (bytes_left - buf_size) % 0x1000) 2930 OSAL_MSLEEP(1); 2931 2932 offset += buf_size; 2933 bytes_left -= buf_size; 2934 } 2935 2936 p_dev->mcp_nvm_resp = resp; 2937 ecore_ptt_release(p_hwfn, p_ptt); 2938 2939 return rc; 2940 } 2941 2942 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, 2943 u32 addr, u8 *p_buf, u32 len) 2944 { 2945 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2946 struct ecore_ptt *p_ptt; 2947 u32 resp, param; 2948 enum _ecore_status_t rc; 2949 2950 p_ptt = ecore_ptt_acquire(p_hwfn); 2951 if (!p_ptt) 2952 return ECORE_BUSY; 2953 2954 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2955 (cmd == ECORE_PHY_CORE_READ) ? 2956 DRV_MSG_CODE_PHY_CORE_READ : 2957 DRV_MSG_CODE_PHY_RAW_READ, 2958 addr, &resp, ¶m, &len, (u32 *)p_buf); 2959 if (rc != ECORE_SUCCESS) 2960 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 2961 2962 p_dev->mcp_nvm_resp = resp; 2963 ecore_ptt_release(p_hwfn, p_ptt); 2964 2965 return rc; 2966 } 2967 2968 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) 2969 { 2970 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2971 struct ecore_ptt *p_ptt; 2972 2973 p_ptt = ecore_ptt_acquire(p_hwfn); 2974 if (!p_ptt) 2975 return ECORE_BUSY; 2976 2977 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); 2978 ecore_ptt_release(p_hwfn, p_ptt); 2979 2980 return ECORE_SUCCESS; 2981 } 2982 2983 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) 2984 { 2985 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2986 struct ecore_ptt *p_ptt; 2987 u32 resp, param; 2988 enum _ecore_status_t rc; 2989 2990 p_ptt = ecore_ptt_acquire(p_hwfn); 2991 if (!p_ptt) 2992 return ECORE_BUSY; 2993 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, 2994 &resp, ¶m); 2995 p_dev->mcp_nvm_resp = resp; 2996 ecore_ptt_release(p_hwfn, p_ptt); 2997 2998 return rc; 2999 } 3000 3001 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, 3002 u32 addr) 3003 { 3004 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3005 struct ecore_ptt *p_ptt; 3006 u32 resp, param; 3007 enum _ecore_status_t rc; 3008 3009 p_ptt = ecore_ptt_acquire(p_hwfn); 3010 if (!p_ptt) 3011 return ECORE_BUSY; 3012 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, 3013 &resp, ¶m); 3014 p_dev->mcp_nvm_resp = resp; 3015 ecore_ptt_release(p_hwfn, p_ptt); 3016 3017 return rc; 3018 } 3019 3020 /* rc receives ECORE_INVAL as default parameter because 3021 * it might not enter the while loop if the len is 0 3022 */ 3023 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, 3024 u32 addr, u8 *p_buf, u32 len) 3025 { 3026 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param; 3027 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3028 enum _ecore_status_t rc = ECORE_INVAL; 3029 struct ecore_ptt *p_ptt; 3030 3031 p_ptt = ecore_ptt_acquire(p_hwfn); 3032 if (!p_ptt) 3033 return ECORE_BUSY; 3034 3035 switch (cmd) { 3036 case ECORE_PUT_FILE_DATA: 3037 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; 3038 break; 3039 case ECORE_NVM_WRITE_NVRAM: 3040 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; 3041 break; 3042 case ECORE_EXT_PHY_FW_UPGRADE: 3043 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; 3044 break; 3045 default: 3046 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", 3047 cmd); 3048 rc = ECORE_INVAL; 3049 goto out; 3050 } 3051 3052 buf_idx = 0; 3053 while (buf_idx < len) { 3054 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 3055 MCP_DRV_NVM_BUF_LEN); 3056 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | 3057 addr) + 3058 buf_idx; 3059 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, 3060 &resp, ¶m, buf_size, 3061 (u32 *)&p_buf[buf_idx]); 3062 if (rc != ECORE_SUCCESS) { 3063 DP_NOTICE(p_dev, false, 3064 "ecore_mcp_nvm_write() failed, rc = %d\n", 3065 rc); 3066 resp = FW_MSG_CODE_ERROR; 3067 break; 3068 } 3069 3070 if (resp != FW_MSG_CODE_OK && 3071 resp != FW_MSG_CODE_NVM_OK && 3072 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { 3073 DP_NOTICE(p_dev, false, 3074 "nvm write failed, resp = 0x%08x\n", resp); 3075 rc = ECORE_UNKNOWN_ERROR; 3076 break; 3077 } 3078 3079 /* This can be a lengthy process, and it's possible scheduler 3080 * isn't preemptible. Sleep a bit to prevent CPU hogging. 3081 */ 3082 if (buf_idx % 0x1000 > 3083 (buf_idx + buf_size) % 0x1000) 3084 OSAL_MSLEEP(1); 3085 3086 buf_idx += buf_size; 3087 } 3088 3089 p_dev->mcp_nvm_resp = resp; 3090 out: 3091 ecore_ptt_release(p_hwfn, p_ptt); 3092 3093 return rc; 3094 } 3095 3096 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, 3097 u32 addr, u8 *p_buf, u32 len) 3098 { 3099 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3100 struct ecore_ptt *p_ptt; 3101 u32 resp, param, nvm_cmd; 3102 enum _ecore_status_t rc; 3103 3104 p_ptt = ecore_ptt_acquire(p_hwfn); 3105 if (!p_ptt) 3106 return ECORE_BUSY; 3107 3108 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : 3109 DRV_MSG_CODE_PHY_RAW_WRITE; 3110 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, 3111 &resp, ¶m, len, (u32 *)p_buf); 3112 if (rc != ECORE_SUCCESS) 3113 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 3114 p_dev->mcp_nvm_resp = resp; 3115 ecore_ptt_release(p_hwfn, p_ptt); 3116 3117 return rc; 3118 } 3119 3120 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, 3121 u32 addr) 3122 { 3123 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3124 struct ecore_ptt *p_ptt; 3125 u32 resp, param; 3126 enum _ecore_status_t rc; 3127 3128 p_ptt = ecore_ptt_acquire(p_hwfn); 3129 if (!p_ptt) 3130 return ECORE_BUSY; 3131 3132 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, 3133 &resp, ¶m); 3134 p_dev->mcp_nvm_resp = resp; 3135 ecore_ptt_release(p_hwfn, p_ptt); 3136 3137 return rc; 3138 } 3139 3140 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, 3141 struct ecore_ptt *p_ptt, 3142 u32 port, u32 addr, u32 offset, 3143 u32 len, u8 *p_buf) 3144 { 3145 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; 3146 u32 resp, param; 3147 enum _ecore_status_t rc; 3148 3149 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 3150 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 3151 addr = offset; 3152 offset = 0; 3153 bytes_left = len; 3154 while (bytes_left > 0) { 3155 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 3156 MAX_I2C_TRANSACTION_SIZE); 3157 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 3158 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 3159 nvm_offset |= ((addr + offset) << 3160 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 3161 nvm_offset |= (bytes_to_copy << 3162 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 3163 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3164 DRV_MSG_CODE_TRANSCEIVER_READ, 3165 nvm_offset, &resp, ¶m, &buf_size, 3166 (u32 *)(p_buf + offset)); 3167 if (rc != ECORE_SUCCESS) { 3168 DP_NOTICE(p_hwfn, false, 3169 "Failed to send a transceiver read command to the MFW. rc = %d.\n", 3170 rc); 3171 return rc; 3172 } 3173 3174 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3175 return ECORE_NODEV; 3176 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3177 return ECORE_UNKNOWN_ERROR; 3178 3179 offset += buf_size; 3180 bytes_left -= buf_size; 3181 } 3182 3183 return ECORE_SUCCESS; 3184 } 3185 3186 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, 3187 struct ecore_ptt *p_ptt, 3188 u32 port, u32 addr, u32 offset, 3189 u32 len, u8 *p_buf) 3190 { 3191 u32 buf_idx, buf_size, nvm_offset, resp, param; 3192 enum _ecore_status_t rc; 3193 3194 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 3195 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 3196 buf_idx = 0; 3197 while (buf_idx < len) { 3198 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 3199 MAX_I2C_TRANSACTION_SIZE); 3200 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 3201 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 3202 nvm_offset |= ((offset + buf_idx) << 3203 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 3204 nvm_offset |= (buf_size << 3205 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 3206 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, 3207 DRV_MSG_CODE_TRANSCEIVER_WRITE, 3208 nvm_offset, &resp, ¶m, buf_size, 3209 (u32 *)&p_buf[buf_idx]); 3210 if (rc != ECORE_SUCCESS) { 3211 DP_NOTICE(p_hwfn, false, 3212 "Failed to send a transceiver write command to the MFW. rc = %d.\n", 3213 rc); 3214 return rc; 3215 } 3216 3217 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3218 return ECORE_NODEV; 3219 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3220 return ECORE_UNKNOWN_ERROR; 3221 3222 buf_idx += buf_size; 3223 } 3224 3225 return ECORE_SUCCESS; 3226 } 3227 3228 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, 3229 struct ecore_ptt *p_ptt, 3230 u16 gpio, u32 *gpio_val) 3231 { 3232 enum _ecore_status_t rc = ECORE_SUCCESS; 3233 u32 drv_mb_param = 0, rsp; 3234 3235 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); 3236 3237 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, 3238 drv_mb_param, &rsp, gpio_val); 3239 3240 if (rc != ECORE_SUCCESS) 3241 return rc; 3242 3243 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3244 return ECORE_UNKNOWN_ERROR; 3245 3246 return ECORE_SUCCESS; 3247 } 3248 3249 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, 3250 struct ecore_ptt *p_ptt, 3251 u16 gpio, u16 gpio_val) 3252 { 3253 enum _ecore_status_t rc = ECORE_SUCCESS; 3254 u32 drv_mb_param = 0, param, rsp; 3255 3256 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | 3257 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); 3258 3259 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, 3260 drv_mb_param, &rsp, ¶m); 3261 3262 if (rc != ECORE_SUCCESS) 3263 return rc; 3264 3265 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3266 return ECORE_UNKNOWN_ERROR; 3267 3268 return ECORE_SUCCESS; 3269 } 3270 3271 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, 3272 struct ecore_ptt *p_ptt, 3273 u16 gpio, u32 *gpio_direction, 3274 u32 *gpio_ctrl) 3275 { 3276 u32 drv_mb_param = 0, rsp, val = 0; 3277 enum _ecore_status_t rc = ECORE_SUCCESS; 3278 3279 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; 3280 3281 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, 3282 drv_mb_param, &rsp, &val); 3283 if (rc != ECORE_SUCCESS) 3284 return rc; 3285 3286 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> 3287 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; 3288 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> 3289 DRV_MB_PARAM_GPIO_CTRL_OFFSET; 3290 3291 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3292 return ECORE_UNKNOWN_ERROR; 3293 3294 return ECORE_SUCCESS; 3295 } 3296 3297 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, 3298 struct ecore_ptt *p_ptt) 3299 { 3300 u32 drv_mb_param = 0, rsp, param; 3301 enum _ecore_status_t rc = ECORE_SUCCESS; 3302 3303 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << 3304 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3305 3306 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3307 drv_mb_param, &rsp, ¶m); 3308 3309 if (rc != ECORE_SUCCESS) 3310 return rc; 3311 3312 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3313 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3314 rc = ECORE_UNKNOWN_ERROR; 3315 3316 return rc; 3317 } 3318 3319 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, 3320 struct ecore_ptt *p_ptt) 3321 { 3322 u32 drv_mb_param, rsp, param; 3323 enum _ecore_status_t rc = ECORE_SUCCESS; 3324 3325 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << 3326 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3327 3328 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3329 drv_mb_param, &rsp, ¶m); 3330 3331 if (rc != ECORE_SUCCESS) 3332 return rc; 3333 3334 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3335 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3336 rc = ECORE_UNKNOWN_ERROR; 3337 3338 return rc; 3339 } 3340 3341 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( 3342 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images) 3343 { 3344 u32 drv_mb_param = 0, rsp; 3345 enum _ecore_status_t rc = ECORE_SUCCESS; 3346 3347 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << 3348 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3349 3350 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3351 drv_mb_param, &rsp, num_images); 3352 3353 if (rc != ECORE_SUCCESS) 3354 return rc; 3355 3356 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) 3357 rc = ECORE_UNKNOWN_ERROR; 3358 3359 return rc; 3360 } 3361 3362 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( 3363 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3364 struct bist_nvm_image_att *p_image_att, u32 image_index) 3365 { 3366 u32 buf_size, nvm_offset, resp, param; 3367 enum _ecore_status_t rc; 3368 3369 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << 3370 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3371 nvm_offset |= (image_index << 3372 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); 3373 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3374 nvm_offset, &resp, ¶m, &buf_size, 3375 (u32 *)p_image_att); 3376 if (rc != ECORE_SUCCESS) 3377 return rc; 3378 3379 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3380 (p_image_att->return_code != 1)) 3381 rc = ECORE_UNKNOWN_ERROR; 3382 3383 return rc; 3384 } 3385 3386 enum _ecore_status_t 3387 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, 3388 struct ecore_ptt *p_ptt, 3389 struct ecore_temperature_info *p_temp_info) 3390 { 3391 struct ecore_temperature_sensor *p_temp_sensor; 3392 struct temperature_status_stc mfw_temp_info; 3393 struct ecore_mcp_mb_params mb_params; 3394 u32 val; 3395 enum _ecore_status_t rc; 3396 u8 i; 3397 3398 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3399 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE; 3400 mb_params.p_data_dst = &mfw_temp_info; 3401 mb_params.data_dst_size = sizeof(mfw_temp_info); 3402 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3403 if (rc != ECORE_SUCCESS) 3404 return rc; 3405 3406 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS); 3407 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors, 3408 ECORE_MAX_NUM_OF_SENSORS); 3409 for (i = 0; i < p_temp_info->num_sensors; i++) { 3410 val = mfw_temp_info.sensor[i]; 3411 p_temp_sensor = &p_temp_info->sensors[i]; 3412 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> 3413 SENSOR_LOCATION_OFFSET; 3414 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> 3415 THRESHOLD_HIGH_OFFSET; 3416 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> 3417 CRITICAL_TEMPERATURE_OFFSET; 3418 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> 3419 CURRENT_TEMP_OFFSET; 3420 } 3421 3422 return ECORE_SUCCESS; 3423 } 3424 3425 enum _ecore_status_t ecore_mcp_get_mba_versions( 3426 struct ecore_hwfn *p_hwfn, 3427 struct ecore_ptt *p_ptt, 3428 struct ecore_mba_vers *p_mba_vers) 3429 { 3430 u32 buf_size, resp, param; 3431 enum _ecore_status_t rc; 3432 3433 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, 3434 0, &resp, ¶m, &buf_size, 3435 &p_mba_vers->mba_vers[0]); 3436 3437 if (rc != ECORE_SUCCESS) 3438 return rc; 3439 3440 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 3441 rc = ECORE_UNKNOWN_ERROR; 3442 3443 if (buf_size != MCP_DRV_NVM_BUF_LEN) 3444 rc = ECORE_UNKNOWN_ERROR; 3445 3446 return rc; 3447 } 3448 3449 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, 3450 struct ecore_ptt *p_ptt, 3451 u64 *num_events) 3452 { 3453 u32 rsp; 3454 3455 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS, 3456 0, &rsp, (u32 *)num_events); 3457 } 3458 3459 static enum resource_id_enum 3460 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) 3461 { 3462 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; 3463 3464 switch (res_id) { 3465 case ECORE_SB: 3466 mfw_res_id = RESOURCE_NUM_SB_E; 3467 break; 3468 case ECORE_L2_QUEUE: 3469 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; 3470 break; 3471 case ECORE_VPORT: 3472 mfw_res_id = RESOURCE_NUM_VPORT_E; 3473 break; 3474 case ECORE_RSS_ENG: 3475 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; 3476 break; 3477 case ECORE_PQ: 3478 mfw_res_id = RESOURCE_NUM_PQ_E; 3479 break; 3480 case ECORE_RL: 3481 mfw_res_id = RESOURCE_NUM_RL_E; 3482 break; 3483 case ECORE_MAC: 3484 case ECORE_VLAN: 3485 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3486 mfw_res_id = RESOURCE_VFC_FILTER_E; 3487 break; 3488 case ECORE_ILT: 3489 mfw_res_id = RESOURCE_ILT_E; 3490 break; 3491 case ECORE_LL2_QUEUE: 3492 mfw_res_id = RESOURCE_LL2_QUEUE_E; 3493 break; 3494 case ECORE_RDMA_CNQ_RAM: 3495 case ECORE_CMDQS_CQS: 3496 /* CNQ/CMDQS are the same resource */ 3497 mfw_res_id = RESOURCE_CQS_E; 3498 break; 3499 case ECORE_RDMA_STATS_QUEUE: 3500 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; 3501 break; 3502 case ECORE_BDQ: 3503 mfw_res_id = RESOURCE_BDQ_E; 3504 break; 3505 default: 3506 break; 3507 } 3508 3509 return mfw_res_id; 3510 } 3511 3512 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2 3513 #define ECORE_RESC_ALLOC_VERSION_MINOR 0 3514 #define ECORE_RESC_ALLOC_VERSION \ 3515 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ 3516 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ 3517 (ECORE_RESC_ALLOC_VERSION_MINOR << \ 3518 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) 3519 3520 struct ecore_resc_alloc_in_params { 3521 u32 cmd; 3522 enum ecore_resources res_id; 3523 u32 resc_max_val; 3524 }; 3525 3526 struct ecore_resc_alloc_out_params { 3527 u32 mcp_resp; 3528 u32 mcp_param; 3529 u32 resc_num; 3530 u32 resc_start; 3531 u32 vf_resc_num; 3532 u32 vf_resc_start; 3533 u32 flags; 3534 }; 3535 3536 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100 3537 3538 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) 3539 { 3540 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3541 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 3542 enum _ecore_status_t rc; 3543 3544 /* Allow ongoing PCIe transactions to complete */ 3545 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS); 3546 3547 /* Clear the PF's internal FID_enable in the PXP */ 3548 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); 3549 if (rc != ECORE_SUCCESS) 3550 DP_NOTICE(p_hwfn, false, 3551 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 3552 rc); 3553 3554 return rc; 3555 } 3556 3557 static enum _ecore_status_t 3558 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, 3559 struct ecore_ptt *p_ptt, 3560 struct ecore_resc_alloc_in_params *p_in_params, 3561 struct ecore_resc_alloc_out_params *p_out_params) 3562 { 3563 struct ecore_mcp_mb_params mb_params; 3564 struct resource_info mfw_resc_info; 3565 enum _ecore_status_t rc; 3566 3567 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info)); 3568 3569 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id); 3570 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { 3571 DP_ERR(p_hwfn, 3572 "Failed to match resource %d [%s] with the MFW resources\n", 3573 p_in_params->res_id, 3574 ecore_hw_get_resc_name(p_in_params->res_id)); 3575 return ECORE_INVAL; 3576 } 3577 3578 switch (p_in_params->cmd) { 3579 case DRV_MSG_SET_RESOURCE_VALUE_MSG: 3580 mfw_resc_info.size = p_in_params->resc_max_val; 3581 /* Fallthrough */ 3582 case DRV_MSG_GET_RESOURCE_ALLOC_MSG: 3583 break; 3584 default: 3585 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", 3586 p_in_params->cmd); 3587 return ECORE_INVAL; 3588 } 3589 3590 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3591 mb_params.cmd = p_in_params->cmd; 3592 mb_params.param = ECORE_RESC_ALLOC_VERSION; 3593 mb_params.p_data_src = &mfw_resc_info; 3594 mb_params.data_src_size = sizeof(mfw_resc_info); 3595 mb_params.p_data_dst = mb_params.p_data_src; 3596 mb_params.data_dst_size = mb_params.data_src_size; 3597 3598 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3599 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", 3600 p_in_params->cmd, p_in_params->res_id, 3601 ecore_hw_get_resc_name(p_in_params->res_id), 3602 GET_MFW_FIELD(mb_params.param, 3603 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3604 GET_MFW_FIELD(mb_params.param, 3605 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3606 p_in_params->resc_max_val); 3607 3608 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3609 if (rc != ECORE_SUCCESS) 3610 return rc; 3611 3612 p_out_params->mcp_resp = mb_params.mcp_resp; 3613 p_out_params->mcp_param = mb_params.mcp_param; 3614 p_out_params->resc_num = mfw_resc_info.size; 3615 p_out_params->resc_start = mfw_resc_info.offset; 3616 p_out_params->vf_resc_num = mfw_resc_info.vf_size; 3617 p_out_params->vf_resc_start = mfw_resc_info.vf_offset; 3618 p_out_params->flags = mfw_resc_info.flags; 3619 3620 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3621 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", 3622 GET_MFW_FIELD(p_out_params->mcp_param, 3623 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3624 GET_MFW_FIELD(p_out_params->mcp_param, 3625 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3626 p_out_params->resc_num, p_out_params->resc_start, 3627 p_out_params->vf_resc_num, p_out_params->vf_resc_start, 3628 p_out_params->flags); 3629 3630 return ECORE_SUCCESS; 3631 } 3632 3633 enum _ecore_status_t 3634 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3635 enum ecore_resources res_id, u32 resc_max_val, 3636 u32 *p_mcp_resp) 3637 { 3638 struct ecore_resc_alloc_out_params out_params; 3639 struct ecore_resc_alloc_in_params in_params; 3640 enum _ecore_status_t rc; 3641 3642 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3643 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; 3644 in_params.res_id = res_id; 3645 in_params.resc_max_val = resc_max_val; 3646 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3647 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3648 &out_params); 3649 if (rc != ECORE_SUCCESS) 3650 return rc; 3651 3652 *p_mcp_resp = out_params.mcp_resp; 3653 3654 return ECORE_SUCCESS; 3655 } 3656 3657 enum _ecore_status_t 3658 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3659 enum ecore_resources res_id, u32 *p_mcp_resp, 3660 u32 *p_resc_num, u32 *p_resc_start) 3661 { 3662 struct ecore_resc_alloc_out_params out_params; 3663 struct ecore_resc_alloc_in_params in_params; 3664 enum _ecore_status_t rc; 3665 3666 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3667 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; 3668 in_params.res_id = res_id; 3669 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3670 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3671 &out_params); 3672 if (rc != ECORE_SUCCESS) 3673 return rc; 3674 3675 *p_mcp_resp = out_params.mcp_resp; 3676 3677 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3678 *p_resc_num = out_params.resc_num; 3679 *p_resc_start = out_params.resc_start; 3680 } 3681 3682 return ECORE_SUCCESS; 3683 } 3684 3685 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, 3686 struct ecore_ptt *p_ptt) 3687 { 3688 u32 mcp_resp, mcp_param; 3689 3690 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, 3691 &mcp_resp, &mcp_param); 3692 } 3693 3694 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, 3695 struct ecore_ptt *p_ptt, 3696 u32 param, u32 *p_mcp_resp, 3697 u32 *p_mcp_param) 3698 { 3699 enum _ecore_status_t rc; 3700 3701 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, 3702 p_mcp_resp, p_mcp_param); 3703 if (rc != ECORE_SUCCESS) 3704 return rc; 3705 3706 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3707 DP_INFO(p_hwfn, 3708 "The resource command is unsupported by the MFW\n"); 3709 return ECORE_NOTIMPL; 3710 } 3711 3712 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { 3713 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); 3714 3715 DP_NOTICE(p_hwfn, false, 3716 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", 3717 param, opcode); 3718 return ECORE_INVAL; 3719 } 3720 3721 return rc; 3722 } 3723 3724 enum _ecore_status_t 3725 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3726 struct ecore_resc_lock_params *p_params) 3727 { 3728 u32 param = 0, mcp_resp, mcp_param; 3729 u8 opcode; 3730 enum _ecore_status_t rc; 3731 3732 switch (p_params->timeout) { 3733 case ECORE_MCP_RESC_LOCK_TO_DEFAULT: 3734 opcode = RESOURCE_OPCODE_REQ; 3735 p_params->timeout = 0; 3736 break; 3737 case ECORE_MCP_RESC_LOCK_TO_NONE: 3738 opcode = RESOURCE_OPCODE_REQ_WO_AGING; 3739 p_params->timeout = 0; 3740 break; 3741 default: 3742 opcode = RESOURCE_OPCODE_REQ_W_AGING; 3743 break; 3744 } 3745 3746 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3747 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3748 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); 3749 3750 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3751 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", 3752 param, p_params->timeout, opcode, p_params->resource); 3753 3754 /* Attempt to acquire the resource */ 3755 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3756 &mcp_param); 3757 if (rc != ECORE_SUCCESS) 3758 return rc; 3759 3760 /* Analyze the response */ 3761 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); 3762 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3763 3764 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3765 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", 3766 mcp_param, opcode, p_params->owner); 3767 3768 switch (opcode) { 3769 case RESOURCE_OPCODE_GNT: 3770 p_params->b_granted = true; 3771 break; 3772 case RESOURCE_OPCODE_BUSY: 3773 p_params->b_granted = false; 3774 break; 3775 default: 3776 DP_NOTICE(p_hwfn, false, 3777 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", 3778 mcp_param, opcode); 3779 return ECORE_INVAL; 3780 } 3781 3782 return ECORE_SUCCESS; 3783 } 3784 3785 enum _ecore_status_t 3786 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3787 struct ecore_resc_lock_params *p_params) 3788 { 3789 u32 retry_cnt = 0; 3790 enum _ecore_status_t rc; 3791 3792 do { 3793 /* No need for an interval before the first iteration */ 3794 if (retry_cnt) { 3795 if (p_params->sleep_b4_retry) { 3796 u16 retry_interval_in_ms = 3797 DIV_ROUND_UP(p_params->retry_interval, 3798 1000); 3799 3800 OSAL_MSLEEP(retry_interval_in_ms); 3801 } else { 3802 OSAL_UDELAY(p_params->retry_interval); 3803 } 3804 } 3805 3806 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params); 3807 if (rc != ECORE_SUCCESS) 3808 return rc; 3809 3810 if (p_params->b_granted) 3811 break; 3812 } while (retry_cnt++ < p_params->retry_num); 3813 3814 return ECORE_SUCCESS; 3815 } 3816 3817 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, 3818 struct ecore_resc_unlock_params *p_unlock, 3819 enum ecore_resc_lock resource, 3820 bool b_is_permanent) 3821 { 3822 if (p_lock != OSAL_NULL) { 3823 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); 3824 3825 /* Permanent resources don't require aging, and there's no 3826 * point in trying to acquire them more than once since it's 3827 * unexpected another entity would release them. 3828 */ 3829 if (b_is_permanent) { 3830 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; 3831 } else { 3832 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; 3833 p_lock->retry_interval = 3834 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; 3835 p_lock->sleep_b4_retry = true; 3836 } 3837 3838 p_lock->resource = resource; 3839 } 3840 3841 if (p_unlock != OSAL_NULL) { 3842 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); 3843 p_unlock->resource = resource; 3844 } 3845 } 3846 3847 enum _ecore_status_t 3848 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3849 struct ecore_resc_unlock_params *p_params) 3850 { 3851 u32 param = 0, mcp_resp, mcp_param; 3852 u8 opcode; 3853 enum _ecore_status_t rc; 3854 3855 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE 3856 : RESOURCE_OPCODE_RELEASE; 3857 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3858 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3859 3860 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3861 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", 3862 param, opcode, p_params->resource); 3863 3864 /* Attempt to release the resource */ 3865 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3866 &mcp_param); 3867 if (rc != ECORE_SUCCESS) 3868 return rc; 3869 3870 /* Analyze the response */ 3871 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3872 3873 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3874 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", 3875 mcp_param, opcode); 3876 3877 switch (opcode) { 3878 case RESOURCE_OPCODE_RELEASED_PREVIOUS: 3879 DP_INFO(p_hwfn, 3880 "Resource unlock request for an already released resource [%d]\n", 3881 p_params->resource); 3882 /* Fallthrough */ 3883 case RESOURCE_OPCODE_RELEASED: 3884 p_params->b_released = true; 3885 break; 3886 case RESOURCE_OPCODE_WRONG_OWNER: 3887 p_params->b_released = false; 3888 break; 3889 default: 3890 DP_NOTICE(p_hwfn, false, 3891 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", 3892 mcp_param, opcode); 3893 return ECORE_INVAL; 3894 } 3895 3896 return ECORE_SUCCESS; 3897 } 3898 3899 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn) 3900 { 3901 return !!(p_hwfn->mcp_info->capabilities & 3902 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); 3903 } 3904 3905 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, 3906 struct ecore_ptt *p_ptt) 3907 { 3908 u32 mcp_resp; 3909 enum _ecore_status_t rc; 3910 3911 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 3912 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); 3913 if (rc == ECORE_SUCCESS) 3914 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE), 3915 "MFW supported features: %08x\n", 3916 p_hwfn->mcp_info->capabilities); 3917 3918 return rc; 3919 } 3920 3921 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, 3922 struct ecore_ptt *p_ptt) 3923 { 3924 u32 mcp_resp, mcp_param, features; 3925 3926 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | 3927 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | 3928 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; 3929 3930 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, 3931 features, &mcp_resp, &mcp_param); 3932 } 3933 3934 enum _ecore_status_t 3935 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3936 struct ecore_mcp_drv_attr *p_drv_attr) 3937 { 3938 struct attribute_cmd_write_stc attr_cmd_write; 3939 enum _attribute_commands_e mfw_attr_cmd; 3940 struct ecore_mcp_mb_params mb_params; 3941 enum _ecore_status_t rc; 3942 3943 switch (p_drv_attr->attr_cmd) { 3944 case ECORE_MCP_DRV_ATTR_CMD_READ: 3945 mfw_attr_cmd = ATTRIBUTE_CMD_READ; 3946 break; 3947 case ECORE_MCP_DRV_ATTR_CMD_WRITE: 3948 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; 3949 break; 3950 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: 3951 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; 3952 break; 3953 case ECORE_MCP_DRV_ATTR_CMD_CLEAR: 3954 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; 3955 break; 3956 default: 3957 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", 3958 p_drv_attr->attr_cmd); 3959 return ECORE_INVAL; 3960 } 3961 3962 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3963 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; 3964 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, 3965 p_drv_attr->attr_num); 3966 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, 3967 mfw_attr_cmd); 3968 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { 3969 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); 3970 attr_cmd_write.val = p_drv_attr->val; 3971 attr_cmd_write.mask = p_drv_attr->mask; 3972 attr_cmd_write.offset = p_drv_attr->offset; 3973 3974 mb_params.p_data_src = &attr_cmd_write; 3975 mb_params.data_src_size = sizeof(attr_cmd_write); 3976 } 3977 3978 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3979 if (rc != ECORE_SUCCESS) 3980 return rc; 3981 3982 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3983 DP_INFO(p_hwfn, 3984 "The attribute command is not supported by the MFW\n"); 3985 return ECORE_NOTIMPL; 3986 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { 3987 DP_INFO(p_hwfn, 3988 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", 3989 mb_params.mcp_resp, p_drv_attr->attr_cmd, 3990 p_drv_attr->attr_num); 3991 return ECORE_INVAL; 3992 } 3993 3994 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3995 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", 3996 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, 3997 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, 3998 mb_params.mcp_param); 3999 4000 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || 4001 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) 4002 p_drv_attr->val = mb_params.mcp_param; 4003 4004 return ECORE_SUCCESS; 4005 } 4006 4007 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4008 u32 offset, u32 val) 4009 { 4010 struct ecore_mcp_mb_params mb_params = {0}; 4011 enum _ecore_status_t rc = ECORE_SUCCESS; 4012 u32 dword = val; 4013 4014 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; 4015 mb_params.param = offset; 4016 mb_params.p_data_src = &dword; 4017 mb_params.data_src_size = sizeof(dword); 4018 4019 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 4020 if (rc != ECORE_SUCCESS) { 4021 DP_NOTICE(p_hwfn, false, 4022 "Failed to wol write request, rc = %d\n", rc); 4023 } 4024 4025 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { 4026 DP_NOTICE(p_hwfn, false, 4027 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", 4028 val, offset, mb_params.mcp_resp); 4029 rc = ECORE_UNKNOWN_ERROR; 4030 } 4031 } 4032