1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_status.h" 12 #include "ecore_mcp.h" 13 #include "mcp_public.h" 14 #include "reg_addr.h" 15 #include "ecore_hw.h" 16 #include "ecore_init_fw_funcs.h" 17 #include "ecore_sriov.h" 18 #include "ecore_vf.h" 19 #include "ecore_iov_api.h" 20 #include "ecore_gtt_reg_addr.h" 21 #include "ecore_iro.h" 22 #include "ecore_dcbx.h" 23 #include "ecore_sp_commands.h" 24 #include "ecore_cxt.h" 25 26 #define CHIP_MCP_RESP_ITER_US 10 27 #define EMUL_MCP_RESP_ITER_US (1000 * 1000) 28 29 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 30 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 31 32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 33 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ 34 _val) 35 36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 37 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) 38 39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 41 OFFSETOF(struct public_drv_mb, _field), _val) 42 43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 45 OFFSETOF(struct public_drv_mb, _field)) 46 47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 48 DRV_ID_PDA_COMP_VER_OFFSET) 49 50 #define MCP_BYTES_PER_MBIT_OFFSET 17 51 52 #ifndef ASIC_ONLY 53 static int loaded; 54 static int loaded_port[MAX_NUM_PORTS] = { 0 }; 55 #endif 56 57 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) 58 { 59 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 60 return false; 61 return true; 62 } 63 64 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 65 { 66 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 67 PUBLIC_PORT); 68 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); 69 70 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 71 MFW_PORT(p_hwfn)); 72 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 73 "port_addr = 0x%x, port_id 0x%02x\n", 74 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 75 } 76 77 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 78 { 79 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 80 OSAL_BE32 tmp; 81 u32 i; 82 83 #ifndef ASIC_ONLY 84 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) 85 return; 86 #endif 87 88 if (!p_hwfn->mcp_info->public_base) 89 return; 90 91 for (i = 0; i < length; i++) { 92 tmp = ecore_rd(p_hwfn, p_ptt, 93 p_hwfn->mcp_info->mfw_mb_addr + 94 (i << 2) + sizeof(u32)); 95 96 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 97 OSAL_BE32_TO_CPU(tmp); 98 } 99 } 100 101 struct ecore_mcp_cmd_elem { 102 osal_list_entry_t list; 103 struct ecore_mcp_mb_params *p_mb_params; 104 u16 expected_seq_num; 105 bool b_is_completed; 106 }; 107 108 /* Must be called while cmd_lock is acquired */ 109 static struct ecore_mcp_cmd_elem * 110 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn, 111 struct ecore_mcp_mb_params *p_mb_params, 112 u16 expected_seq_num) 113 { 114 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 115 116 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 117 sizeof(*p_cmd_elem)); 118 if (!p_cmd_elem) { 119 DP_NOTICE(p_hwfn, false, 120 "Failed to allocate `struct ecore_mcp_cmd_elem'\n"); 121 goto out; 122 } 123 124 p_cmd_elem->p_mb_params = p_mb_params; 125 p_cmd_elem->expected_seq_num = expected_seq_num; 126 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 127 out: 128 return p_cmd_elem; 129 } 130 131 /* Must be called while cmd_lock is acquired */ 132 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn, 133 struct ecore_mcp_cmd_elem *p_cmd_elem) 134 { 135 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 136 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem); 137 } 138 139 /* Must be called while cmd_lock is acquired */ 140 static struct ecore_mcp_cmd_elem * 141 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num) 142 { 143 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 144 145 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list, 146 struct ecore_mcp_cmd_elem) { 147 if (p_cmd_elem->expected_seq_num == seq_num) 148 return p_cmd_elem; 149 } 150 151 return OSAL_NULL; 152 } 153 154 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) 155 { 156 if (p_hwfn->mcp_info) { 157 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; 158 159 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); 160 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); 161 162 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 163 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, 164 &p_hwfn->mcp_info->cmd_list, list, 165 struct ecore_mcp_cmd_elem) { 166 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 167 } 168 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 169 170 #ifdef CONFIG_ECORE_LOCK_ALLOC 171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); 172 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); 173 #endif 174 } 175 176 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 177 178 return ECORE_SUCCESS; 179 } 180 181 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, 182 struct ecore_ptt *p_ptt) 183 { 184 struct ecore_mcp_info *p_info = p_hwfn->mcp_info; 185 u32 drv_mb_offsize, mfw_mb_offsize; 186 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 187 188 #ifndef ASIC_ONLY 189 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 190 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n"); 191 p_info->public_base = 0; 192 return ECORE_INVAL; 193 } 194 #endif 195 196 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 197 if (!p_info->public_base) 198 return ECORE_INVAL; 199 200 p_info->public_base |= GRCBASE_MCP; 201 202 /* Calculate the driver and MFW mailbox address */ 203 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, 204 SECTION_OFFSIZE_ADDR(p_info->public_base, 205 PUBLIC_DRV_MB)); 206 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 207 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 208 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x" 209 " mcp_pf_id = 0x%x\n", 210 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 211 212 /* Set the MFW MB address */ 213 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, 214 SECTION_OFFSIZE_ADDR(p_info->public_base, 215 PUBLIC_MFW_MB)); 216 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 217 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, 218 p_info->mfw_mb_addr); 219 220 /* Get the current driver mailbox sequence before sending 221 * the first command 222 */ 223 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 224 DRV_MSG_SEQ_NUMBER_MASK; 225 226 /* Get current FW pulse sequence */ 227 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 228 DRV_PULSE_SEQ_MASK; 229 230 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 231 232 return ECORE_SUCCESS; 233 } 234 235 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, 236 struct ecore_ptt *p_ptt) 237 { 238 struct ecore_mcp_info *p_info; 239 u32 size; 240 241 /* Allocate mcp_info structure */ 242 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 243 sizeof(*p_hwfn->mcp_info)); 244 if (!p_hwfn->mcp_info) { 245 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); 246 return ECORE_NOMEM; 247 } 248 p_info = p_hwfn->mcp_info; 249 250 /* Initialize the MFW spinlocks */ 251 #ifdef CONFIG_ECORE_LOCK_ALLOC 252 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { 253 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 254 return ECORE_NOMEM; 255 } 256 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { 257 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); 258 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 259 return ECORE_NOMEM; 260 } 261 #endif 262 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); 263 OSAL_SPIN_LOCK_INIT(&p_info->link_lock); 264 265 OSAL_LIST_INIT(&p_info->cmd_list); 266 267 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { 268 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); 269 /* Do not free mcp_info here, since public_base indicate that 270 * the MCP is not initialized 271 */ 272 return ECORE_SUCCESS; 273 } 274 275 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 276 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 277 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 278 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 279 goto err; 280 281 return ECORE_SUCCESS; 282 283 err: 284 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); 285 ecore_mcp_free(p_hwfn); 286 return ECORE_NOMEM; 287 } 288 289 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn, 290 struct ecore_ptt *p_ptt) 291 { 292 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 293 294 /* Use MCP history register to check if MCP reset occurred between init 295 * time and now. 296 */ 297 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { 298 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 299 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", 300 p_hwfn->mcp_info->mcp_hist, generic_por_0); 301 302 ecore_load_mcp_offsets(p_hwfn, p_ptt); 303 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 304 } 305 } 306 307 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, 308 struct ecore_ptt *p_ptt) 309 { 310 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; 311 enum _ecore_status_t rc = ECORE_SUCCESS; 312 313 #ifndef ASIC_ONLY 314 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 315 delay = EMUL_MCP_RESP_ITER_US; 316 #endif 317 318 if (p_hwfn->mcp_info->b_block_cmd) { 319 DP_NOTICE(p_hwfn, false, 320 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); 321 return ECORE_ABORTED; 322 } 323 324 /* Ensure that only a single thread is accessing the mailbox */ 325 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 326 327 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 328 329 /* Set drv command along with the updated sequence */ 330 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 331 seq = ++p_hwfn->mcp_info->drv_mb_seq; 332 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); 333 334 do { 335 /* Wait for MFW response */ 336 OSAL_UDELAY(delay); 337 /* Give the FW up to 500 second (50*1000*10usec) */ 338 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt, 339 MISCS_REG_GENERIC_POR_0)) && 340 (cnt++ < ECORE_MCP_RESET_RETRIES)); 341 342 if (org_mcp_reset_seq != 343 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 344 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 345 "MCP was reset after %d usec\n", cnt * delay); 346 } else { 347 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 348 rc = ECORE_AGAIN; 349 } 350 351 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 352 353 return rc; 354 } 355 356 /* Must be called while cmd_lock is acquired */ 357 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn) 358 { 359 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 360 361 /* There is at most one pending command at a certain time, and if it 362 * exists - it is placed at the HEAD of the list. 363 */ 364 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) { 365 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list, 366 struct ecore_mcp_cmd_elem, 367 list); 368 return !p_cmd_elem->b_is_completed; 369 } 370 371 return false; 372 } 373 374 /* Must be called while cmd_lock is acquired */ 375 static enum _ecore_status_t 376 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 377 { 378 struct ecore_mcp_mb_params *p_mb_params; 379 struct ecore_mcp_cmd_elem *p_cmd_elem; 380 u32 mcp_resp; 381 u16 seq_num; 382 383 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 384 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); 385 386 /* Return if no new non-handled response has been received */ 387 if (seq_num != p_hwfn->mcp_info->drv_mb_seq) 388 return ECORE_AGAIN; 389 390 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num); 391 if (!p_cmd_elem) { 392 DP_ERR(p_hwfn, 393 "Failed to find a pending mailbox cmd that expects sequence number %d\n", 394 seq_num); 395 return ECORE_UNKNOWN_ERROR; 396 } 397 398 p_mb_params = p_cmd_elem->p_mb_params; 399 400 /* Get the MFW response along with the sequence number */ 401 p_mb_params->mcp_resp = mcp_resp; 402 403 /* Get the MFW param */ 404 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 405 406 /* Get the union data */ 407 if (p_mb_params->p_data_dst != OSAL_NULL && 408 p_mb_params->data_dst_size) { 409 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 410 OFFSETOF(struct public_drv_mb, 411 union_data); 412 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 413 union_data_addr, p_mb_params->data_dst_size); 414 } 415 416 p_cmd_elem->b_is_completed = true; 417 418 return ECORE_SUCCESS; 419 } 420 421 /* Must be called while cmd_lock is acquired */ 422 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 423 struct ecore_ptt *p_ptt, 424 struct ecore_mcp_mb_params *p_mb_params, 425 u16 seq_num) 426 { 427 union drv_union_data union_data; 428 u32 union_data_addr; 429 430 /* Set the union data */ 431 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 432 OFFSETOF(struct public_drv_mb, union_data); 433 OSAL_MEM_ZERO(&union_data, sizeof(union_data)); 434 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size) 435 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src, 436 p_mb_params->data_src_size); 437 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, 438 sizeof(union_data)); 439 440 /* Set the drv param */ 441 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); 442 443 /* Set the drv command along with the sequence number */ 444 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); 445 446 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 447 "MFW mailbox: command 0x%08x param 0x%08x\n", 448 (p_mb_params->cmd | seq_num), p_mb_params->param); 449 } 450 451 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, 452 bool block_cmd) 453 { 454 p_hwfn->mcp_info->b_block_cmd = block_cmd; 455 456 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", 457 block_cmd ? "Block" : "Unblock"); 458 } 459 460 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, 461 struct ecore_ptt *p_ptt) 462 { 463 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; 464 465 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 466 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 467 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 468 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 469 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 470 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 471 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 472 473 DP_NOTICE(p_hwfn, false, 474 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", 475 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); 476 } 477 478 static enum _ecore_status_t 479 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 480 struct ecore_mcp_mb_params *p_mb_params, 481 u32 max_retries, u32 delay) 482 { 483 struct ecore_mcp_cmd_elem *p_cmd_elem; 484 u32 cnt = 0; 485 u16 seq_num; 486 enum _ecore_status_t rc = ECORE_SUCCESS; 487 488 /* Wait until the mailbox is non-occupied */ 489 do { 490 /* Exit the loop if there is no pending command, or if the 491 * pending command is completed during this iteration. 492 * The spinlock stays locked until the command is sent. 493 */ 494 495 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 496 497 if (!ecore_mcp_has_pending_cmd(p_hwfn)) 498 break; 499 500 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 501 if (rc == ECORE_SUCCESS) 502 break; 503 else if (rc != ECORE_AGAIN) 504 goto err; 505 506 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 507 OSAL_UDELAY(delay); 508 OSAL_MFW_CMD_PREEMPT(p_hwfn); 509 } while (++cnt < max_retries); 510 511 if (cnt >= max_retries) { 512 DP_NOTICE(p_hwfn, false, 513 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", 514 p_mb_params->cmd, p_mb_params->param); 515 return ECORE_AGAIN; 516 } 517 518 /* Send the mailbox command */ 519 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 520 seq_num = ++p_hwfn->mcp_info->drv_mb_seq; 521 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); 522 if (!p_cmd_elem) { 523 rc = ECORE_NOMEM; 524 goto err; 525 } 526 527 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); 528 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 529 530 /* Wait for the MFW response */ 531 do { 532 /* Exit the loop if the command is already completed, or if the 533 * command is completed during this iteration. 534 * The spinlock stays locked until the list element is removed. 535 */ 536 537 OSAL_UDELAY(delay); 538 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 539 540 if (p_cmd_elem->b_is_completed) 541 break; 542 543 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 544 if (rc == ECORE_SUCCESS) 545 break; 546 else if (rc != ECORE_AGAIN) 547 goto err; 548 549 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 550 OSAL_MFW_CMD_PREEMPT(p_hwfn); 551 } while (++cnt < max_retries); 552 553 if (cnt >= max_retries) { 554 DP_NOTICE(p_hwfn, false, 555 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 556 p_mb_params->cmd, p_mb_params->param); 557 ecore_mcp_print_cpu_info(p_hwfn, p_ptt); 558 559 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 560 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 561 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 562 563 ecore_mcp_cmd_set_blocking(p_hwfn, true); 564 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); 565 return ECORE_AGAIN; 566 } 567 568 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 569 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 570 571 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 572 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 573 p_mb_params->mcp_resp, p_mb_params->mcp_param, 574 (cnt * delay) / 1000, (cnt * delay) % 1000); 575 576 /* Clear the sequence number from the MFW response */ 577 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 578 579 return ECORE_SUCCESS; 580 581 err: 582 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 583 return rc; 584 } 585 586 static enum _ecore_status_t 587 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 588 struct ecore_ptt *p_ptt, 589 struct ecore_mcp_mb_params *p_mb_params) 590 { 591 osal_size_t union_data_size = sizeof(union drv_union_data); 592 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES; 593 u32 delay = CHIP_MCP_RESP_ITER_US; 594 595 #ifndef ASIC_ONLY 596 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 597 delay = EMUL_MCP_RESP_ITER_US; 598 /* There is a built-in delay of 100usec in each MFW response read */ 599 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 600 max_retries /= 10; 601 #endif 602 603 /* MCP not initialized */ 604 if (!ecore_mcp_is_init(p_hwfn)) { 605 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); 606 return ECORE_BUSY; 607 } 608 609 if (p_mb_params->data_src_size > union_data_size || 610 p_mb_params->data_dst_size > union_data_size) { 611 DP_ERR(p_hwfn, 612 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", 613 p_mb_params->data_src_size, p_mb_params->data_dst_size, 614 union_data_size); 615 return ECORE_INVAL; 616 } 617 618 if (p_hwfn->mcp_info->b_block_cmd) { 619 DP_NOTICE(p_hwfn, false, 620 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", 621 p_mb_params->cmd, p_mb_params->param); 622 return ECORE_ABORTED; 623 } 624 625 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 626 delay); 627 } 628 629 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, 630 struct ecore_ptt *p_ptt, u32 cmd, u32 param, 631 u32 *o_mcp_resp, u32 *o_mcp_param) 632 { 633 struct ecore_mcp_mb_params mb_params; 634 enum _ecore_status_t rc; 635 636 #ifndef ASIC_ONLY 637 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 638 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) { 639 loaded--; 640 loaded_port[p_hwfn->port_id]--; 641 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", 642 loaded); 643 } 644 return ECORE_SUCCESS; 645 } 646 #endif 647 648 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 649 mb_params.cmd = cmd; 650 mb_params.param = param; 651 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 652 if (rc != ECORE_SUCCESS) 653 return rc; 654 655 *o_mcp_resp = mb_params.mcp_resp; 656 *o_mcp_param = mb_params.mcp_param; 657 658 return ECORE_SUCCESS; 659 } 660 661 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, 662 struct ecore_ptt *p_ptt, 663 u32 cmd, 664 u32 param, 665 u32 *o_mcp_resp, 666 u32 *o_mcp_param, 667 u32 i_txn_size, u32 *i_buf) 668 { 669 struct ecore_mcp_mb_params mb_params; 670 enum _ecore_status_t rc; 671 672 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 673 mb_params.cmd = cmd; 674 mb_params.param = param; 675 mb_params.p_data_src = i_buf; 676 mb_params.data_src_size = (u8)i_txn_size; 677 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 678 if (rc != ECORE_SUCCESS) 679 return rc; 680 681 *o_mcp_resp = mb_params.mcp_resp; 682 *o_mcp_param = mb_params.mcp_param; 683 684 return ECORE_SUCCESS; 685 } 686 687 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, 688 struct ecore_ptt *p_ptt, 689 u32 cmd, 690 u32 param, 691 u32 *o_mcp_resp, 692 u32 *o_mcp_param, 693 u32 *o_txn_size, u32 *o_buf) 694 { 695 struct ecore_mcp_mb_params mb_params; 696 u8 raw_data[MCP_DRV_NVM_BUF_LEN]; 697 enum _ecore_status_t rc; 698 699 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 700 mb_params.cmd = cmd; 701 mb_params.param = param; 702 mb_params.p_data_dst = raw_data; 703 704 /* Use the maximal value since the actual one is part of the response */ 705 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; 706 707 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 708 if (rc != ECORE_SUCCESS) 709 return rc; 710 711 *o_mcp_resp = mb_params.mcp_resp; 712 *o_mcp_param = mb_params.mcp_param; 713 714 *o_txn_size = *o_mcp_param; 715 /* @DPDK */ 716 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN)); 717 718 return ECORE_SUCCESS; 719 } 720 721 #ifndef ASIC_ONLY 722 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, 723 u32 *p_load_code) 724 { 725 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 726 727 if (!loaded) 728 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 729 else if (!loaded_port[p_hwfn->port_id]) 730 load_phase = FW_MSG_CODE_DRV_LOAD_PORT; 731 else 732 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION; 733 734 /* On CMT, always tell that it's engine */ 735 if (ECORE_IS_CMT(p_hwfn->p_dev)) 736 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 737 738 *p_load_code = load_phase; 739 loaded++; 740 loaded_port[p_hwfn->port_id]++; 741 742 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 743 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n", 744 *p_load_code, loaded, p_hwfn->port_id, 745 loaded_port[p_hwfn->port_id]); 746 } 747 #endif 748 749 static bool 750 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, 751 enum ecore_override_force_load override_force_load) 752 { 753 bool can_force_load = false; 754 755 switch (override_force_load) { 756 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS: 757 can_force_load = true; 758 break; 759 case ECORE_OVERRIDE_FORCE_LOAD_NEVER: 760 can_force_load = false; 761 break; 762 default: 763 can_force_load = (drv_role == DRV_ROLE_OS && 764 exist_drv_role == DRV_ROLE_PREBOOT) || 765 (drv_role == DRV_ROLE_KDUMP && 766 exist_drv_role == DRV_ROLE_OS); 767 break; 768 } 769 770 return can_force_load; 771 } 772 773 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, 774 struct ecore_ptt *p_ptt) 775 { 776 u32 resp = 0, param = 0; 777 enum _ecore_status_t rc; 778 779 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, 780 &resp, ¶m); 781 if (rc != ECORE_SUCCESS) 782 DP_NOTICE(p_hwfn, false, 783 "Failed to send cancel load request, rc = %d\n", rc); 784 785 return rc; 786 } 787 788 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) 789 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) 790 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) 791 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) 792 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) 793 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) 794 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) 795 796 static u32 ecore_get_config_bitmap(void) 797 { 798 u32 config_bitmap = 0x0; 799 800 #ifdef CONFIG_ECORE_L2 801 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; 802 #endif 803 #ifdef CONFIG_ECORE_SRIOV 804 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; 805 #endif 806 #ifdef CONFIG_ECORE_ROCE 807 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; 808 #endif 809 #ifdef CONFIG_ECORE_IWARP 810 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; 811 #endif 812 #ifdef CONFIG_ECORE_FCOE 813 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; 814 #endif 815 #ifdef CONFIG_ECORE_ISCSI 816 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; 817 #endif 818 #ifdef CONFIG_ECORE_LL2 819 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; 820 #endif 821 822 return config_bitmap; 823 } 824 825 struct ecore_load_req_in_params { 826 u8 hsi_ver; 827 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 828 #define ECORE_LOAD_REQ_HSI_VER_1 1 829 u32 drv_ver_0; 830 u32 drv_ver_1; 831 u32 fw_ver; 832 u8 drv_role; 833 u8 timeout_val; 834 u8 force_cmd; 835 bool avoid_eng_reset; 836 }; 837 838 struct ecore_load_req_out_params { 839 u32 load_code; 840 u32 exist_drv_ver_0; 841 u32 exist_drv_ver_1; 842 u32 exist_fw_ver; 843 u8 exist_drv_role; 844 u8 mfw_hsi_ver; 845 bool drv_exists; 846 }; 847 848 static enum _ecore_status_t 849 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 850 struct ecore_load_req_in_params *p_in_params, 851 struct ecore_load_req_out_params *p_out_params) 852 { 853 struct ecore_mcp_mb_params mb_params; 854 struct load_req_stc load_req; 855 struct load_rsp_stc load_rsp; 856 u32 hsi_ver; 857 enum _ecore_status_t rc; 858 859 OSAL_MEM_ZERO(&load_req, sizeof(load_req)); 860 load_req.drv_ver_0 = p_in_params->drv_ver_0; 861 load_req.drv_ver_1 = p_in_params->drv_ver_1; 862 load_req.fw_ver = p_in_params->fw_ver; 863 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); 864 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, 865 p_in_params->timeout_val); 866 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); 867 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, 868 p_in_params->avoid_eng_reset); 869 870 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? 871 DRV_ID_MCP_HSI_VER_CURRENT : 872 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); 873 874 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 875 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 876 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; 877 mb_params.p_data_src = &load_req; 878 mb_params.data_src_size = sizeof(load_req); 879 mb_params.p_data_dst = &load_rsp; 880 mb_params.data_dst_size = sizeof(load_rsp); 881 882 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 883 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 884 mb_params.param, 885 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), 886 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), 887 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), 888 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); 889 890 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) 891 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 892 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", 893 load_req.drv_ver_0, load_req.drv_ver_1, 894 load_req.fw_ver, load_req.misc0, 895 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), 896 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), 897 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), 898 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); 899 900 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 901 if (rc != ECORE_SUCCESS) { 902 DP_NOTICE(p_hwfn, false, 903 "Failed to send load request, rc = %d\n", rc); 904 return rc; 905 } 906 907 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 908 "Load Response: resp 0x%08x\n", mb_params.mcp_resp); 909 p_out_params->load_code = mb_params.mcp_resp; 910 911 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 912 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 913 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 914 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", 915 load_rsp.drv_ver_0, load_rsp.drv_ver_1, 916 load_rsp.fw_ver, load_rsp.misc0, 917 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), 918 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), 919 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); 920 921 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; 922 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; 923 p_out_params->exist_fw_ver = load_rsp.fw_ver; 924 p_out_params->exist_drv_role = 925 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); 926 p_out_params->mfw_hsi_ver = 927 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); 928 p_out_params->drv_exists = 929 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & 930 LOAD_RSP_FLAGS0_DRV_EXISTS; 931 } 932 933 return ECORE_SUCCESS; 934 } 935 936 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, 937 u8 *p_mfw_drv_role) 938 { 939 switch (drv_role) { 940 case ECORE_DRV_ROLE_OS: 941 *p_mfw_drv_role = DRV_ROLE_OS; 942 break; 943 case ECORE_DRV_ROLE_KDUMP: 944 *p_mfw_drv_role = DRV_ROLE_KDUMP; 945 break; 946 } 947 } 948 949 enum ecore_load_req_force { 950 ECORE_LOAD_REQ_FORCE_NONE, 951 ECORE_LOAD_REQ_FORCE_PF, 952 ECORE_LOAD_REQ_FORCE_ALL, 953 }; 954 955 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, 956 u8 *p_mfw_force_cmd) 957 { 958 switch (force_cmd) { 959 case ECORE_LOAD_REQ_FORCE_NONE: 960 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; 961 break; 962 case ECORE_LOAD_REQ_FORCE_PF: 963 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; 964 break; 965 case ECORE_LOAD_REQ_FORCE_ALL: 966 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; 967 break; 968 } 969 } 970 971 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, 972 struct ecore_ptt *p_ptt, 973 struct ecore_load_req_params *p_params) 974 { 975 struct ecore_load_req_out_params out_params; 976 struct ecore_load_req_in_params in_params; 977 u8 mfw_drv_role = 0, mfw_force_cmd; 978 enum _ecore_status_t rc; 979 980 #ifndef ASIC_ONLY 981 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 982 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code); 983 return ECORE_SUCCESS; 984 } 985 #endif 986 987 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 988 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; 989 in_params.drv_ver_0 = ECORE_VERSION; 990 in_params.drv_ver_1 = ecore_get_config_bitmap(); 991 in_params.fw_ver = STORM_FW_VERSION; 992 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); 993 in_params.drv_role = mfw_drv_role; 994 in_params.timeout_val = p_params->timeout_val; 995 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); 996 in_params.force_cmd = mfw_force_cmd; 997 in_params.avoid_eng_reset = p_params->avoid_eng_reset; 998 999 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1000 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); 1001 if (rc != ECORE_SUCCESS) 1002 return rc; 1003 1004 /* First handle cases where another load request should/might be sent: 1005 * - MFW expects the old interface [HSI version = 1] 1006 * - MFW responds that a force load request is required 1007 */ 1008 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 1009 DP_INFO(p_hwfn, 1010 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); 1011 1012 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; 1013 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1014 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1015 &out_params); 1016 if (rc != ECORE_SUCCESS) 1017 return rc; 1018 } else if (out_params.load_code == 1019 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { 1020 if (ecore_mcp_can_force_load(in_params.drv_role, 1021 out_params.exist_drv_role, 1022 p_params->override_force_load)) { 1023 DP_INFO(p_hwfn, 1024 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n", 1025 in_params.drv_role, in_params.fw_ver, 1026 in_params.drv_ver_0, in_params.drv_ver_1, 1027 out_params.exist_drv_role, 1028 out_params.exist_fw_ver, 1029 out_params.exist_drv_ver_0, 1030 out_params.exist_drv_ver_1); 1031 1032 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, 1033 &mfw_force_cmd); 1034 1035 in_params.force_cmd = mfw_force_cmd; 1036 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1037 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1038 &out_params); 1039 if (rc != ECORE_SUCCESS) 1040 return rc; 1041 } else { 1042 DP_NOTICE(p_hwfn, false, 1043 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", 1044 in_params.drv_role, in_params.fw_ver, 1045 in_params.drv_ver_0, in_params.drv_ver_1, 1046 out_params.exist_drv_role, 1047 out_params.exist_fw_ver, 1048 out_params.exist_drv_ver_0, 1049 out_params.exist_drv_ver_1); 1050 1051 ecore_mcp_cancel_load_req(p_hwfn, p_ptt); 1052 return ECORE_BUSY; 1053 } 1054 } 1055 1056 /* Now handle the other types of responses. 1057 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not 1058 * expected here after the additional revised load requests were sent. 1059 */ 1060 switch (out_params.load_code) { 1061 case FW_MSG_CODE_DRV_LOAD_ENGINE: 1062 case FW_MSG_CODE_DRV_LOAD_PORT: 1063 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1064 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 1065 out_params.drv_exists) { 1066 /* The role and fw/driver version match, but the PF is 1067 * already loaded and has not been unloaded gracefully. 1068 * This is unexpected since a quasi-FLR request was 1069 * previously sent as part of ecore_hw_prepare(). 1070 */ 1071 DP_NOTICE(p_hwfn, false, 1072 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); 1073 return ECORE_INVAL; 1074 } 1075 break; 1076 default: 1077 DP_NOTICE(p_hwfn, false, 1078 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", 1079 out_params.load_code); 1080 return ECORE_BUSY; 1081 } 1082 1083 p_params->load_code = out_params.load_code; 1084 1085 return ECORE_SUCCESS; 1086 } 1087 1088 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, 1089 struct ecore_ptt *p_ptt) 1090 { 1091 u32 resp = 0, param = 0; 1092 enum _ecore_status_t rc; 1093 1094 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, 1095 ¶m); 1096 if (rc != ECORE_SUCCESS) { 1097 DP_NOTICE(p_hwfn, false, 1098 "Failed to send a LOAD_DONE command, rc = %d\n", rc); 1099 return rc; 1100 } 1101 1102 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 1103 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 1104 DP_NOTICE(p_hwfn, false, 1105 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 1106 1107 return ECORE_SUCCESS; 1108 } 1109 1110 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, 1111 struct ecore_ptt *p_ptt) 1112 { 1113 u32 wol_param, mcp_resp, mcp_param; 1114 1115 /* @DPDK */ 1116 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1117 1118 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1119 &mcp_resp, &mcp_param); 1120 } 1121 1122 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, 1123 struct ecore_ptt *p_ptt) 1124 { 1125 struct ecore_mcp_mb_params mb_params; 1126 struct mcp_mac wol_mac; 1127 1128 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1129 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; 1130 1131 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1132 } 1133 1134 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, 1135 struct ecore_ptt *p_ptt) 1136 { 1137 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1138 PUBLIC_PATH); 1139 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1140 u32 path_addr = SECTION_ADDR(mfw_path_offsize, 1141 ECORE_PATH_ID(p_hwfn)); 1142 u32 disabled_vfs[VF_MAX_STATIC / 32]; 1143 int i; 1144 1145 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1146 "Reading Disabled VF information from [offset %08x]," 1147 " path_addr %08x\n", 1148 mfw_path_offsize, path_addr); 1149 1150 for (i = 0; i < (VF_MAX_STATIC / 32); i++) { 1151 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, 1152 path_addr + 1153 OFFSETOF(struct public_path, 1154 mcp_vf_disabled) + 1155 sizeof(u32) * i); 1156 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1157 "FLR-ed VFs [%08x,...,%08x] - %08x\n", 1158 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); 1159 } 1160 1161 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) 1162 OSAL_VF_FLR_UPDATE(p_hwfn); 1163 } 1164 1165 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, 1166 struct ecore_ptt *p_ptt, 1167 u32 *vfs_to_ack) 1168 { 1169 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1170 PUBLIC_FUNC); 1171 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1172 u32 func_addr = SECTION_ADDR(mfw_func_offsize, 1173 MCP_PF_ID(p_hwfn)); 1174 struct ecore_mcp_mb_params mb_params; 1175 enum _ecore_status_t rc; 1176 int i; 1177 1178 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1179 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1180 "Acking VFs [%08x,...,%08x] - %08x\n", 1181 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); 1182 1183 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1184 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; 1185 mb_params.p_data_src = vfs_to_ack; 1186 mb_params.data_src_size = VF_MAX_STATIC / 8; 1187 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, 1188 &mb_params); 1189 if (rc != ECORE_SUCCESS) { 1190 DP_NOTICE(p_hwfn, false, 1191 "Failed to pass ACK for VF flr to MFW\n"); 1192 return ECORE_TIMEOUT; 1193 } 1194 1195 /* TMP - clear the ACK bits; should be done by MFW */ 1196 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1197 ecore_wr(p_hwfn, p_ptt, 1198 func_addr + 1199 OFFSETOF(struct public_func, drv_ack_vf_disabled) + 1200 i * sizeof(u32), 0); 1201 1202 return rc; 1203 } 1204 1205 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, 1206 struct ecore_ptt *p_ptt) 1207 { 1208 u32 transceiver_state; 1209 1210 transceiver_state = ecore_rd(p_hwfn, p_ptt, 1211 p_hwfn->mcp_info->port_addr + 1212 OFFSETOF(struct public_port, 1213 transceiver_data)); 1214 1215 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), 1216 "Received transceiver state update [0x%08x] from mfw" 1217 " [Addr 0x%x]\n", 1218 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + 1219 OFFSETOF(struct public_port, 1220 transceiver_data))); 1221 1222 transceiver_state = GET_MFW_FIELD(transceiver_state, 1223 ETH_TRANSCEIVER_STATE); 1224 1225 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 1226 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); 1227 else 1228 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); 1229 1230 OSAL_TRANSCEIVER_UPDATE(p_hwfn); 1231 } 1232 1233 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, 1234 struct ecore_ptt *p_ptt, 1235 struct ecore_mcp_link_state *p_link) 1236 { 1237 u32 eee_status, val; 1238 1239 p_link->eee_adv_caps = 0; 1240 p_link->eee_lp_adv_caps = 0; 1241 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1242 OFFSETOF(struct public_port, eee_status)); 1243 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); 1244 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; 1245 if (val & EEE_1G_ADV) 1246 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; 1247 if (val & EEE_10G_ADV) 1248 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; 1249 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; 1250 if (val & EEE_1G_ADV) 1251 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; 1252 if (val & EEE_10G_ADV) 1253 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; 1254 } 1255 1256 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, 1257 struct ecore_ptt *p_ptt, 1258 struct public_func *p_data, 1259 int pfid) 1260 { 1261 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1262 PUBLIC_FUNC); 1263 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1264 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 1265 u32 i, size; 1266 1267 OSAL_MEM_ZERO(p_data, sizeof(*p_data)); 1268 1269 size = OSAL_MIN_T(u32, sizeof(*p_data), 1270 SECTION_SIZE(mfw_path_offsize)); 1271 for (i = 0; i < size / sizeof(u32); i++) 1272 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, 1273 func_addr + (i << 2)); 1274 1275 return size; 1276 } 1277 1278 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, 1279 struct ecore_ptt *p_ptt, 1280 bool b_reset) 1281 { 1282 struct ecore_mcp_link_state *p_link; 1283 u8 max_bw, min_bw; 1284 u32 status = 0; 1285 1286 /* Prevent SW/attentions from doing this at the same time */ 1287 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); 1288 1289 p_link = &p_hwfn->mcp_info->link_output; 1290 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1291 if (!b_reset) { 1292 status = ecore_rd(p_hwfn, p_ptt, 1293 p_hwfn->mcp_info->port_addr + 1294 OFFSETOF(struct public_port, link_status)); 1295 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), 1296 "Received link update [0x%08x] from mfw" 1297 " [Addr 0x%x]\n", 1298 status, (u32)(p_hwfn->mcp_info->port_addr + 1299 OFFSETOF(struct public_port, 1300 link_status))); 1301 } else { 1302 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1303 "Resetting link indications\n"); 1304 goto out; 1305 } 1306 1307 if (p_hwfn->b_drv_link_init) { 1308 /* Link indication with modern MFW arrives as per-PF 1309 * indication. 1310 */ 1311 if (p_hwfn->mcp_info->capabilities & 1312 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { 1313 struct public_func shmem_info; 1314 1315 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1316 MCP_PF_ID(p_hwfn)); 1317 p_link->link_up = !!(shmem_info.status & 1318 FUNC_STATUS_VIRTUAL_LINK_UP); 1319 } else { 1320 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 1321 } 1322 } else { 1323 p_link->link_up = false; 1324 } 1325 1326 p_link->full_duplex = true; 1327 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 1328 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 1329 p_link->speed = 100000; 1330 break; 1331 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 1332 p_link->speed = 50000; 1333 break; 1334 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 1335 p_link->speed = 40000; 1336 break; 1337 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 1338 p_link->speed = 25000; 1339 break; 1340 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 1341 p_link->speed = 20000; 1342 break; 1343 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 1344 p_link->speed = 10000; 1345 break; 1346 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 1347 p_link->full_duplex = false; 1348 /* Fall-through */ 1349 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 1350 p_link->speed = 1000; 1351 break; 1352 default: 1353 p_link->speed = 0; 1354 } 1355 1356 /* We never store total line speed as p_link->speed is 1357 * again changes according to bandwidth allocation. 1358 */ 1359 if (p_link->link_up && p_link->speed) 1360 p_link->line_speed = p_link->speed; 1361 else 1362 p_link->line_speed = 0; 1363 1364 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; 1365 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; 1366 1367 /* Max bandwidth configuration */ 1368 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 1369 p_link, max_bw); 1370 1371 /* Min bandwidth configuration */ 1372 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 1373 p_link, min_bw); 1374 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, 1375 p_link->min_pf_rate); 1376 1377 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 1378 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 1379 p_link->parallel_detection = !!(status & 1380 LINK_STATUS_PARALLEL_DETECTION_USED); 1381 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 1382 1383 p_link->partner_adv_speed |= 1384 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 1385 ECORE_LINK_PARTNER_SPEED_1G_FD : 0; 1386 p_link->partner_adv_speed |= 1387 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 1388 ECORE_LINK_PARTNER_SPEED_1G_HD : 0; 1389 p_link->partner_adv_speed |= 1390 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 1391 ECORE_LINK_PARTNER_SPEED_10G : 0; 1392 p_link->partner_adv_speed |= 1393 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 1394 ECORE_LINK_PARTNER_SPEED_20G : 0; 1395 p_link->partner_adv_speed |= 1396 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? 1397 ECORE_LINK_PARTNER_SPEED_25G : 0; 1398 p_link->partner_adv_speed |= 1399 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 1400 ECORE_LINK_PARTNER_SPEED_40G : 0; 1401 p_link->partner_adv_speed |= 1402 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 1403 ECORE_LINK_PARTNER_SPEED_50G : 0; 1404 p_link->partner_adv_speed |= 1405 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 1406 ECORE_LINK_PARTNER_SPEED_100G : 0; 1407 1408 p_link->partner_tx_flow_ctrl_en = 1409 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 1410 p_link->partner_rx_flow_ctrl_en = 1411 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 1412 1413 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 1414 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 1415 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; 1416 break; 1417 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 1418 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; 1419 break; 1420 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 1421 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; 1422 break; 1423 default: 1424 p_link->partner_adv_pause = 0; 1425 } 1426 1427 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 1428 1429 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) 1430 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); 1431 1432 OSAL_LINK_UPDATE(p_hwfn); 1433 out: 1434 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); 1435 } 1436 1437 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, 1438 struct ecore_ptt *p_ptt, bool b_up) 1439 { 1440 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 1441 struct ecore_mcp_mb_params mb_params; 1442 struct eth_phy_cfg phy_cfg; 1443 enum _ecore_status_t rc = ECORE_SUCCESS; 1444 u32 cmd; 1445 1446 #ifndef ASIC_ONLY 1447 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1448 return ECORE_SUCCESS; 1449 #endif 1450 1451 /* Set the shmem configuration according to params */ 1452 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg)); 1453 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 1454 if (!params->speed.autoneg) 1455 phy_cfg.speed = params->speed.forced_speed; 1456 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; 1457 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; 1458 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1459 phy_cfg.adv_speed = params->speed.advertised_speeds; 1460 phy_cfg.loopback_mode = params->loopback_mode; 1461 1462 /* There are MFWs that share this capability regardless of whether 1463 * this is feasible or not. And given that at the very least adv_caps 1464 * would be set internally by ecore, we want to make sure LFA would 1465 * still work. 1466 */ 1467 if ((p_hwfn->mcp_info->capabilities & 1468 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && 1469 params->eee.enable) { 1470 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1471 if (params->eee.tx_lpi_enable) 1472 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1473 if (params->eee.adv_caps & ECORE_EEE_1G_ADV) 1474 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; 1475 if (params->eee.adv_caps & ECORE_EEE_10G_ADV) 1476 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; 1477 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << 1478 EEE_TX_TIMER_USEC_OFFSET) & 1479 EEE_TX_TIMER_USEC_MASK; 1480 } 1481 1482 p_hwfn->b_drv_link_init = b_up; 1483 1484 if (b_up) 1485 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1486 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n", 1487 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, 1488 phy_cfg.loopback_mode); 1489 else 1490 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); 1491 1492 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1493 mb_params.cmd = cmd; 1494 mb_params.p_data_src = &phy_cfg; 1495 mb_params.data_src_size = sizeof(phy_cfg); 1496 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1497 1498 /* if mcp fails to respond we must abort */ 1499 if (rc != ECORE_SUCCESS) { 1500 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1501 return rc; 1502 } 1503 1504 /* Mimic link-change attention, done for several reasons: 1505 * - On reset, there's no guarantee MFW would trigger 1506 * an attention. 1507 * - On initialization, older MFWs might not indicate link change 1508 * during LFA, so we'll never get an UP indication. 1509 */ 1510 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); 1511 1512 return ECORE_SUCCESS; 1513 } 1514 1515 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, 1516 struct ecore_ptt *p_ptt) 1517 { 1518 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; 1519 1520 /* TODO - Add support for VFs */ 1521 if (IS_VF(p_hwfn->p_dev)) 1522 return ECORE_INVAL; 1523 1524 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1525 PUBLIC_PATH); 1526 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); 1527 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); 1528 1529 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, 1530 path_addr + 1531 OFFSETOF(struct public_path, process_kill)) & 1532 PROCESS_KILL_COUNTER_MASK; 1533 1534 return proc_kill_cnt; 1535 } 1536 1537 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, 1538 struct ecore_ptt *p_ptt) 1539 { 1540 struct ecore_dev *p_dev = p_hwfn->p_dev; 1541 u32 proc_kill_cnt; 1542 1543 /* Prevent possible attentions/interrupts during the recovery handling 1544 * and till its load phase, during which they will be re-enabled. 1545 */ 1546 ecore_int_igu_disable_int(p_hwfn, p_ptt); 1547 1548 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); 1549 1550 /* The following operations should be done once, and thus in CMT mode 1551 * are carried out by only the first HW function. 1552 */ 1553 if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) 1554 return; 1555 1556 if (p_dev->recov_in_prog) { 1557 DP_NOTICE(p_hwfn, false, 1558 "Ignoring the indication since a recovery" 1559 " process is already in progress\n"); 1560 return; 1561 } 1562 1563 p_dev->recov_in_prog = true; 1564 1565 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); 1566 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); 1567 1568 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); 1569 } 1570 1571 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, 1572 struct ecore_ptt *p_ptt, 1573 enum MFW_DRV_MSG_TYPE type) 1574 { 1575 enum ecore_mcp_protocol_type stats_type; 1576 union ecore_mcp_protocol_stats stats; 1577 struct ecore_mcp_mb_params mb_params; 1578 u32 hsi_param; 1579 enum _ecore_status_t rc; 1580 1581 switch (type) { 1582 case MFW_DRV_MSG_GET_LAN_STATS: 1583 stats_type = ECORE_MCP_LAN_STATS; 1584 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; 1585 break; 1586 default: 1587 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1588 "Invalid protocol type %d\n", type); 1589 return; 1590 } 1591 1592 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); 1593 1594 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1595 mb_params.cmd = DRV_MSG_CODE_GET_STATS; 1596 mb_params.param = hsi_param; 1597 mb_params.p_data_src = &stats; 1598 mb_params.data_src_size = sizeof(stats); 1599 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1600 if (rc != ECORE_SUCCESS) 1601 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc); 1602 } 1603 1604 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, 1605 struct public_func *p_shmem_info) 1606 { 1607 struct ecore_mcp_function_info *p_info; 1608 1609 p_info = &p_hwfn->mcp_info->func_info; 1610 1611 /* TODO - bandwidth min/max should have valid values of 1-100, 1612 * as well as some indication that the feature is disabled. 1613 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS 1614 * limit and correct value to min `1' and max `100' if limit isn't in 1615 * range. 1616 */ 1617 p_info->bandwidth_min = (p_shmem_info->config & 1618 FUNC_MF_CFG_MIN_BW_MASK) >> 1619 FUNC_MF_CFG_MIN_BW_OFFSET; 1620 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { 1621 DP_INFO(p_hwfn, 1622 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 1623 p_info->bandwidth_min); 1624 p_info->bandwidth_min = 1; 1625 } 1626 1627 p_info->bandwidth_max = (p_shmem_info->config & 1628 FUNC_MF_CFG_MAX_BW_MASK) >> 1629 FUNC_MF_CFG_MAX_BW_OFFSET; 1630 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { 1631 DP_INFO(p_hwfn, 1632 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 1633 p_info->bandwidth_max); 1634 p_info->bandwidth_max = 100; 1635 } 1636 } 1637 1638 static void 1639 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1640 { 1641 struct ecore_mcp_function_info *p_info; 1642 struct public_func shmem_info; 1643 u32 resp = 0, param = 0; 1644 1645 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1646 1647 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 1648 1649 p_info = &p_hwfn->mcp_info->func_info; 1650 1651 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); 1652 1653 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); 1654 1655 /* Acknowledge the MFW */ 1656 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, 1657 ¶m); 1658 } 1659 1660 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) 1661 { 1662 /* A single notification should be sent to upper driver in CMT mode */ 1663 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1664 return; 1665 1666 DP_NOTICE(p_hwfn, false, 1667 "Fan failure was detected on the network interface card" 1668 " and it's going to be shut down.\n"); 1669 1670 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); 1671 } 1672 1673 struct ecore_mdump_cmd_params { 1674 u32 cmd; 1675 void *p_data_src; 1676 u8 data_src_size; 1677 void *p_data_dst; 1678 u8 data_dst_size; 1679 u32 mcp_resp; 1680 }; 1681 1682 static enum _ecore_status_t 1683 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1684 struct ecore_mdump_cmd_params *p_mdump_cmd_params) 1685 { 1686 struct ecore_mcp_mb_params mb_params; 1687 enum _ecore_status_t rc; 1688 1689 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1690 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; 1691 mb_params.param = p_mdump_cmd_params->cmd; 1692 mb_params.p_data_src = p_mdump_cmd_params->p_data_src; 1693 mb_params.data_src_size = p_mdump_cmd_params->data_src_size; 1694 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; 1695 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; 1696 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1697 if (rc != ECORE_SUCCESS) 1698 return rc; 1699 1700 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; 1701 1702 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { 1703 DP_INFO(p_hwfn, 1704 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", 1705 p_mdump_cmd_params->cmd); 1706 rc = ECORE_NOTIMPL; 1707 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 1708 DP_INFO(p_hwfn, 1709 "The mdump command is not supported by the MFW\n"); 1710 rc = ECORE_NOTIMPL; 1711 } 1712 1713 return rc; 1714 } 1715 1716 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn, 1717 struct ecore_ptt *p_ptt) 1718 { 1719 struct ecore_mdump_cmd_params mdump_cmd_params; 1720 1721 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1722 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; 1723 1724 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1725 } 1726 1727 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, 1728 struct ecore_ptt *p_ptt, 1729 u32 epoch) 1730 { 1731 struct ecore_mdump_cmd_params mdump_cmd_params; 1732 1733 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1734 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES; 1735 mdump_cmd_params.p_data_src = &epoch; 1736 mdump_cmd_params.data_src_size = sizeof(epoch); 1737 1738 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1739 } 1740 1741 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, 1742 struct ecore_ptt *p_ptt) 1743 { 1744 struct ecore_mdump_cmd_params mdump_cmd_params; 1745 1746 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1747 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER; 1748 1749 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1750 } 1751 1752 static enum _ecore_status_t 1753 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1754 struct mdump_config_stc *p_mdump_config) 1755 { 1756 struct ecore_mdump_cmd_params mdump_cmd_params; 1757 enum _ecore_status_t rc; 1758 1759 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1760 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG; 1761 mdump_cmd_params.p_data_dst = p_mdump_config; 1762 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config); 1763 1764 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1765 if (rc != ECORE_SUCCESS) 1766 return rc; 1767 1768 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1769 DP_INFO(p_hwfn, 1770 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n", 1771 mdump_cmd_params.mcp_resp); 1772 rc = ECORE_UNKNOWN_ERROR; 1773 } 1774 1775 return rc; 1776 } 1777 1778 enum _ecore_status_t 1779 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1780 struct ecore_mdump_info *p_mdump_info) 1781 { 1782 u32 addr, global_offsize, global_addr; 1783 struct mdump_config_stc mdump_config; 1784 enum _ecore_status_t rc; 1785 1786 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info)); 1787 1788 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1789 PUBLIC_GLOBAL); 1790 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1791 global_addr = SECTION_ADDR(global_offsize, 0); 1792 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt, 1793 global_addr + 1794 OFFSETOF(struct public_global, 1795 mdump_reason)); 1796 1797 if (p_mdump_info->reason) { 1798 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config); 1799 if (rc != ECORE_SUCCESS) 1800 return rc; 1801 1802 p_mdump_info->version = mdump_config.version; 1803 p_mdump_info->config = mdump_config.config; 1804 p_mdump_info->epoch = mdump_config.epoc; 1805 p_mdump_info->num_of_logs = mdump_config.num_of_logs; 1806 p_mdump_info->valid_logs = mdump_config.valid_logs; 1807 1808 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1809 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n", 1810 p_mdump_info->reason, p_mdump_info->version, 1811 p_mdump_info->config, p_mdump_info->epoch, 1812 p_mdump_info->num_of_logs, p_mdump_info->valid_logs); 1813 } else { 1814 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1815 "MFW mdump info: reason %d\n", p_mdump_info->reason); 1816 } 1817 1818 return ECORE_SUCCESS; 1819 } 1820 1821 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, 1822 struct ecore_ptt *p_ptt) 1823 { 1824 struct ecore_mdump_cmd_params mdump_cmd_params; 1825 1826 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1827 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS; 1828 1829 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1830 } 1831 1832 enum _ecore_status_t 1833 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1834 struct ecore_mdump_retain_data *p_mdump_retain) 1835 { 1836 struct ecore_mdump_cmd_params mdump_cmd_params; 1837 struct mdump_retain_data_stc mfw_mdump_retain; 1838 enum _ecore_status_t rc; 1839 1840 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1841 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; 1842 mdump_cmd_params.p_data_dst = &mfw_mdump_retain; 1843 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain); 1844 1845 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1846 if (rc != ECORE_SUCCESS) 1847 return rc; 1848 1849 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1850 DP_INFO(p_hwfn, 1851 "Failed to get the mdump retained data [mcp_resp 0x%x]\n", 1852 mdump_cmd_params.mcp_resp); 1853 return ECORE_UNKNOWN_ERROR; 1854 } 1855 1856 p_mdump_retain->valid = mfw_mdump_retain.valid; 1857 p_mdump_retain->epoch = mfw_mdump_retain.epoch; 1858 p_mdump_retain->pf = mfw_mdump_retain.pf; 1859 p_mdump_retain->status = mfw_mdump_retain.status; 1860 1861 return ECORE_SUCCESS; 1862 } 1863 1864 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, 1865 struct ecore_ptt *p_ptt) 1866 { 1867 struct ecore_mdump_cmd_params mdump_cmd_params; 1868 1869 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1870 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN; 1871 1872 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1873 } 1874 1875 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, 1876 struct ecore_ptt *p_ptt) 1877 { 1878 struct ecore_mdump_retain_data mdump_retain; 1879 enum _ecore_status_t rc; 1880 1881 /* In CMT mode - no need for more than a single acknowledgment to the 1882 * MFW, and no more than a single notification to the upper driver. 1883 */ 1884 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1885 return; 1886 1887 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); 1888 if (rc == ECORE_SUCCESS && mdump_retain.valid) { 1889 DP_NOTICE(p_hwfn, false, 1890 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", 1891 mdump_retain.epoch, mdump_retain.pf, 1892 mdump_retain.status); 1893 } else { 1894 DP_NOTICE(p_hwfn, false, 1895 "The MFW notified that a critical error occurred in the device\n"); 1896 } 1897 1898 if (p_hwfn->p_dev->allow_mdump) { 1899 DP_NOTICE(p_hwfn, false, 1900 "Not acknowledging the notification to allow the MFW crash dump\n"); 1901 return; 1902 } 1903 1904 DP_NOTICE(p_hwfn, false, 1905 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); 1906 ecore_mcp_mdump_ack(p_hwfn, p_ptt); 1907 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 1908 } 1909 1910 void 1911 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1912 { 1913 struct public_func shmem_info; 1914 u32 port_cfg, val; 1915 1916 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) 1917 return; 1918 1919 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); 1920 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1921 OFFSETOF(struct public_port, oem_cfg_port)); 1922 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); 1923 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) 1924 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", 1925 val); 1926 1927 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); 1928 if (val == OEM_CFG_SCHED_TYPE_ETS) 1929 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; 1930 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) 1931 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; 1932 else 1933 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", 1934 val); 1935 1936 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1937 MCP_PF_ID(p_hwfn)); 1938 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); 1939 p_hwfn->ufp_info.tc = (u8)val; 1940 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, 1941 OEM_CFG_FUNC_HOST_PRI_CTRL); 1942 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) 1943 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; 1944 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) 1945 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; 1946 else 1947 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", 1948 val); 1949 1950 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 1951 "UFP shmem config: mode = %d tc = %d pri_type = %d\n", 1952 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, 1953 p_hwfn->ufp_info.pri_type); 1954 } 1955 1956 static enum _ecore_status_t 1957 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1958 { 1959 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 1960 1961 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { 1962 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; 1963 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; 1964 1965 ecore_qm_reconf(p_hwfn, p_ptt); 1966 } else { 1967 /* Merge UFP TC with the dcbx TC data */ 1968 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 1969 ECORE_DCBX_OPERATIONAL_MIB); 1970 } 1971 1972 /* update storm FW with negotiation results */ 1973 ecore_sp_pf_update_ufp(p_hwfn); 1974 1975 return ECORE_SUCCESS; 1976 } 1977 1978 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, 1979 struct ecore_ptt *p_ptt) 1980 { 1981 struct ecore_mcp_info *info = p_hwfn->mcp_info; 1982 enum _ecore_status_t rc = ECORE_SUCCESS; 1983 bool found = false; 1984 u16 i; 1985 1986 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); 1987 1988 /* Read Messages from MFW */ 1989 ecore_mcp_read_mb(p_hwfn, p_ptt); 1990 1991 /* Compare current messages to old ones */ 1992 for (i = 0; i < info->mfw_mb_length; i++) { 1993 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 1994 continue; 1995 1996 found = true; 1997 1998 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1999 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 2000 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 2001 2002 switch (i) { 2003 case MFW_DRV_MSG_LINK_CHANGE: 2004 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); 2005 break; 2006 case MFW_DRV_MSG_VF_DISABLED: 2007 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); 2008 break; 2009 case MFW_DRV_MSG_LLDP_DATA_UPDATED: 2010 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2011 ECORE_DCBX_REMOTE_LLDP_MIB); 2012 break; 2013 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: 2014 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2015 ECORE_DCBX_REMOTE_MIB); 2016 break; 2017 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 2018 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2019 ECORE_DCBX_OPERATIONAL_MIB); 2020 /* clear the user-config cache */ 2021 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, 2022 sizeof(struct ecore_dcbx_set)); 2023 break; 2024 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: 2025 ecore_lldp_mib_update_event(p_hwfn, p_ptt); 2026 break; 2027 case MFW_DRV_MSG_OEM_CFG_UPDATE: 2028 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); 2029 break; 2030 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 2031 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); 2032 break; 2033 case MFW_DRV_MSG_ERROR_RECOVERY: 2034 ecore_mcp_handle_process_kill(p_hwfn, p_ptt); 2035 break; 2036 case MFW_DRV_MSG_GET_LAN_STATS: 2037 case MFW_DRV_MSG_GET_FCOE_STATS: 2038 case MFW_DRV_MSG_GET_ISCSI_STATS: 2039 case MFW_DRV_MSG_GET_RDMA_STATS: 2040 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); 2041 break; 2042 case MFW_DRV_MSG_BW_UPDATE: 2043 ecore_mcp_update_bw(p_hwfn, p_ptt); 2044 break; 2045 case MFW_DRV_MSG_FAILURE_DETECTED: 2046 ecore_mcp_handle_fan_failure(p_hwfn); 2047 break; 2048 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: 2049 ecore_mcp_handle_critical_error(p_hwfn, p_ptt); 2050 break; 2051 default: 2052 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); 2053 rc = ECORE_INVAL; 2054 } 2055 } 2056 2057 /* ACK everything */ 2058 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 2059 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); 2060 2061 /* MFW expect answer in BE, so we force write in that format */ 2062 ecore_wr(p_hwfn, p_ptt, 2063 info->mfw_mb_addr + sizeof(u32) + 2064 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 2065 sizeof(u32) + i * sizeof(u32), val); 2066 } 2067 2068 if (!found) { 2069 DP_NOTICE(p_hwfn, false, 2070 "Received an MFW message indication but no" 2071 " new message!\n"); 2072 rc = ECORE_INVAL; 2073 } 2074 2075 /* Copy the new mfw messages into the shadow */ 2076 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 2077 2078 return rc; 2079 } 2080 2081 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, 2082 struct ecore_ptt *p_ptt, 2083 u32 *p_mfw_ver, 2084 u32 *p_running_bundle_id) 2085 { 2086 u32 global_offsize; 2087 2088 #ifndef ASIC_ONLY 2089 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2090 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n"); 2091 return ECORE_SUCCESS; 2092 } 2093 #endif 2094 2095 if (IS_VF(p_hwfn->p_dev)) { 2096 if (p_hwfn->vf_iov_info) { 2097 struct pfvf_acquire_resp_tlv *p_resp; 2098 2099 p_resp = &p_hwfn->vf_iov_info->acquire_resp; 2100 *p_mfw_ver = p_resp->pfdev_info.mfw_ver; 2101 return ECORE_SUCCESS; 2102 } else { 2103 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2104 "VF requested MFW version prior to ACQUIRE\n"); 2105 return ECORE_INVAL; 2106 } 2107 } 2108 2109 global_offsize = ecore_rd(p_hwfn, p_ptt, 2110 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> 2111 public_base, 2112 PUBLIC_GLOBAL)); 2113 *p_mfw_ver = 2114 ecore_rd(p_hwfn, p_ptt, 2115 SECTION_ADDR(global_offsize, 2116 0) + OFFSETOF(struct public_global, mfw_ver)); 2117 2118 if (p_running_bundle_id != OSAL_NULL) { 2119 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, 2120 SECTION_ADDR(global_offsize, 2121 0) + 2122 OFFSETOF(struct public_global, 2123 running_bundle_id)); 2124 } 2125 2126 return ECORE_SUCCESS; 2127 } 2128 2129 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, 2130 struct ecore_ptt *p_ptt, 2131 u32 *p_media_type) 2132 { 2133 2134 /* TODO - Add support for VFs */ 2135 if (IS_VF(p_hwfn->p_dev)) 2136 return ECORE_INVAL; 2137 2138 if (!ecore_mcp_is_init(p_hwfn)) { 2139 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); 2140 return ECORE_BUSY; 2141 } 2142 2143 if (!p_ptt) { 2144 *p_media_type = MEDIA_UNSPECIFIED; 2145 return ECORE_INVAL; 2146 } else { 2147 *p_media_type = ecore_rd(p_hwfn, p_ptt, 2148 p_hwfn->mcp_info->port_addr + 2149 OFFSETOF(struct public_port, 2150 media_type)); 2151 } 2152 2153 return ECORE_SUCCESS; 2154 } 2155 2156 /* @DPDK */ 2157 /* Old MFW has a global configuration for all PFs regarding RDMA support */ 2158 static void 2159 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn, 2160 enum ecore_pci_personality *p_proto) 2161 { 2162 *p_proto = ECORE_PCI_ETH; 2163 2164 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2165 "According to Legacy capabilities, L2 personality is %08x\n", 2166 (u32)*p_proto); 2167 } 2168 2169 /* @DPDK */ 2170 static enum _ecore_status_t 2171 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn, 2172 struct ecore_ptt *p_ptt, 2173 enum ecore_pci_personality *p_proto) 2174 { 2175 u32 resp = 0, param = 0; 2176 enum _ecore_status_t rc; 2177 2178 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2179 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", 2180 (u32)*p_proto, resp, param); 2181 return ECORE_SUCCESS; 2182 } 2183 2184 static enum _ecore_status_t 2185 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, 2186 struct public_func *p_info, 2187 struct ecore_ptt *p_ptt, 2188 enum ecore_pci_personality *p_proto) 2189 { 2190 enum _ecore_status_t rc = ECORE_SUCCESS; 2191 2192 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 2193 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 2194 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) != 2195 ECORE_SUCCESS) 2196 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); 2197 break; 2198 default: 2199 rc = ECORE_INVAL; 2200 } 2201 2202 return rc; 2203 } 2204 2205 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, 2206 struct ecore_ptt *p_ptt) 2207 { 2208 struct ecore_mcp_function_info *info; 2209 struct public_func shmem_info; 2210 2211 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 2212 info = &p_hwfn->mcp_info->func_info; 2213 2214 info->pause_on_host = (shmem_info.config & 2215 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 2216 2217 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2218 &info->protocol)) { 2219 DP_ERR(p_hwfn, "Unknown personality %08x\n", 2220 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 2221 return ECORE_INVAL; 2222 } 2223 2224 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 2225 2226 if (shmem_info.mac_upper || shmem_info.mac_lower) { 2227 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 2228 info->mac[1] = (u8)(shmem_info.mac_upper); 2229 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 2230 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 2231 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 2232 info->mac[5] = (u8)(shmem_info.mac_lower); 2233 } else { 2234 /* TODO - are there protocols for which there's no MAC? */ 2235 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); 2236 } 2237 2238 /* TODO - are these calculations true for BE machine? */ 2239 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | 2240 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); 2241 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | 2242 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); 2243 2244 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 2245 2246 info->mtu = (u16)shmem_info.mtu_size; 2247 2248 if (info->mtu == 0) 2249 info->mtu = 1500; 2250 2251 info->mtu = (u16)shmem_info.mtu_size; 2252 2253 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), 2254 "Read configuration from shmem: pause_on_host %02x" 2255 " protocol %02x BW [%02x - %02x]" 2256 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx" 2257 " node %lx ovlan %04x\n", 2258 info->pause_on_host, info->protocol, 2259 info->bandwidth_min, info->bandwidth_max, 2260 info->mac[0], info->mac[1], info->mac[2], 2261 info->mac[3], info->mac[4], info->mac[5], 2262 (unsigned long)info->wwn_port, 2263 (unsigned long)info->wwn_node, info->ovlan); 2264 2265 return ECORE_SUCCESS; 2266 } 2267 2268 struct ecore_mcp_link_params 2269 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) 2270 { 2271 if (!p_hwfn || !p_hwfn->mcp_info) 2272 return OSAL_NULL; 2273 return &p_hwfn->mcp_info->link_input; 2274 } 2275 2276 struct ecore_mcp_link_state 2277 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) 2278 { 2279 if (!p_hwfn || !p_hwfn->mcp_info) 2280 return OSAL_NULL; 2281 2282 #ifndef ASIC_ONLY 2283 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 2284 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); 2285 p_hwfn->mcp_info->link_output.link_up = true; 2286 } 2287 #endif 2288 2289 return &p_hwfn->mcp_info->link_output; 2290 } 2291 2292 struct ecore_mcp_link_capabilities 2293 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) 2294 { 2295 if (!p_hwfn || !p_hwfn->mcp_info) 2296 return OSAL_NULL; 2297 return &p_hwfn->mcp_info->link_capabilities; 2298 } 2299 2300 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, 2301 struct ecore_ptt *p_ptt) 2302 { 2303 u32 resp = 0, param = 0; 2304 enum _ecore_status_t rc; 2305 2306 rc = ecore_mcp_cmd(p_hwfn, p_ptt, 2307 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); 2308 2309 /* Wait for the drain to complete before returning */ 2310 OSAL_MSLEEP(1020); 2311 2312 return rc; 2313 } 2314 2315 const struct ecore_mcp_function_info 2316 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) 2317 { 2318 if (!p_hwfn || !p_hwfn->mcp_info) 2319 return OSAL_NULL; 2320 return &p_hwfn->mcp_info->func_info; 2321 } 2322 2323 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, 2324 struct ecore_ptt *p_ptt, u32 personalities) 2325 { 2326 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; 2327 struct public_func shmem_info; 2328 int i, count = 0, num_pfs; 2329 2330 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); 2331 2332 for (i = 0; i < num_pfs; i++) { 2333 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 2334 MCP_PF_ID_BY_REL(p_hwfn, i)); 2335 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) 2336 continue; 2337 2338 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2339 &protocol) != 2340 ECORE_SUCCESS) 2341 continue; 2342 2343 if ((1 << ((u32)protocol)) & personalities) 2344 count++; 2345 } 2346 2347 return count; 2348 } 2349 2350 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, 2351 struct ecore_ptt *p_ptt, 2352 u32 *p_flash_size) 2353 { 2354 u32 flash_size; 2355 2356 #ifndef ASIC_ONLY 2357 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2358 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n"); 2359 return ECORE_INVAL; 2360 } 2361 #endif 2362 2363 if (IS_VF(p_hwfn->p_dev)) 2364 return ECORE_INVAL; 2365 2366 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 2367 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 2368 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 2369 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); 2370 2371 *p_flash_size = flash_size; 2372 2373 return ECORE_SUCCESS; 2374 } 2375 2376 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, 2377 struct ecore_ptt *p_ptt) 2378 { 2379 struct ecore_dev *p_dev = p_hwfn->p_dev; 2380 2381 if (p_dev->recov_in_prog) { 2382 DP_NOTICE(p_hwfn, false, 2383 "Avoid triggering a recovery since such a process" 2384 " is already in progress\n"); 2385 return ECORE_AGAIN; 2386 } 2387 2388 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); 2389 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); 2390 2391 return ECORE_SUCCESS; 2392 } 2393 2394 static enum _ecore_status_t 2395 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, 2396 struct ecore_ptt *p_ptt, 2397 u8 vf_id, u8 num) 2398 { 2399 u32 resp = 0, param = 0, rc_param = 0; 2400 enum _ecore_status_t rc; 2401 2402 /* Only Leader can configure MSIX, and need to take CMT into account */ 2403 2404 if (!IS_LEAD_HWFN(p_hwfn)) 2405 return ECORE_SUCCESS; 2406 num *= p_hwfn->p_dev->num_hwfns; 2407 2408 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & 2409 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; 2410 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & 2411 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; 2412 2413 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, 2414 &resp, &rc_param); 2415 2416 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { 2417 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", 2418 vf_id); 2419 rc = ECORE_INVAL; 2420 } else { 2421 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2422 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", 2423 num, vf_id); 2424 } 2425 2426 return rc; 2427 } 2428 2429 static enum _ecore_status_t 2430 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, 2431 struct ecore_ptt *p_ptt, 2432 u8 num) 2433 { 2434 u32 resp = 0, param = num, rc_param = 0; 2435 enum _ecore_status_t rc; 2436 2437 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, 2438 param, &resp, &rc_param); 2439 2440 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { 2441 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); 2442 rc = ECORE_INVAL; 2443 } else { 2444 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2445 "Requested 0x%02x MSI-x interrupts for VFs\n", 2446 num); 2447 } 2448 2449 return rc; 2450 } 2451 2452 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, 2453 struct ecore_ptt *p_ptt, 2454 u8 vf_id, u8 num) 2455 { 2456 if (ECORE_IS_BB(p_hwfn->p_dev)) 2457 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); 2458 else 2459 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); 2460 } 2461 2462 enum _ecore_status_t 2463 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2464 struct ecore_mcp_drv_version *p_ver) 2465 { 2466 struct ecore_mcp_mb_params mb_params; 2467 struct drv_version_stc drv_version; 2468 u32 num_words, i; 2469 void *p_name; 2470 OSAL_BE32 val; 2471 enum _ecore_status_t rc; 2472 2473 #ifndef ASIC_ONLY 2474 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 2475 return ECORE_SUCCESS; 2476 #endif 2477 2478 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version)); 2479 drv_version.version = p_ver->version; 2480 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; 2481 for (i = 0; i < num_words; i++) { 2482 /* The driver name is expected to be in a big-endian format */ 2483 p_name = &p_ver->name[i * sizeof(u32)]; 2484 val = OSAL_CPU_TO_BE32(*(u32 *)p_name); 2485 *(u32 *)&drv_version.name[i * sizeof(u32)] = val; 2486 } 2487 2488 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 2489 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 2490 mb_params.p_data_src = &drv_version; 2491 mb_params.data_src_size = sizeof(drv_version); 2492 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2493 if (rc != ECORE_SUCCESS) 2494 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2495 2496 return rc; 2497 } 2498 2499 /* A maximal 100 msec waiting time for the MCP to halt */ 2500 #define ECORE_MCP_HALT_SLEEP_MS 10 2501 #define ECORE_MCP_HALT_MAX_RETRIES 10 2502 2503 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, 2504 struct ecore_ptt *p_ptt) 2505 { 2506 u32 resp = 0, param = 0, cpu_state, cnt = 0; 2507 enum _ecore_status_t rc; 2508 2509 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2510 ¶m); 2511 if (rc != ECORE_SUCCESS) { 2512 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2513 return rc; 2514 } 2515 2516 do { 2517 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); 2518 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2519 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) 2520 break; 2521 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); 2522 2523 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { 2524 DP_NOTICE(p_hwfn, false, 2525 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2526 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); 2527 return ECORE_BUSY; 2528 } 2529 2530 ecore_mcp_cmd_set_blocking(p_hwfn, true); 2531 2532 return ECORE_SUCCESS; 2533 } 2534 2535 #define ECORE_MCP_RESUME_SLEEP_MS 10 2536 2537 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, 2538 struct ecore_ptt *p_ptt) 2539 { 2540 u32 cpu_mode, cpu_state; 2541 2542 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2543 2544 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2545 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; 2546 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); 2547 2548 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); 2549 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2550 2551 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { 2552 DP_NOTICE(p_hwfn, false, 2553 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2554 cpu_mode, cpu_state); 2555 return ECORE_BUSY; 2556 } 2557 2558 ecore_mcp_cmd_set_blocking(p_hwfn, false); 2559 2560 return ECORE_SUCCESS; 2561 } 2562 2563 enum _ecore_status_t 2564 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, 2565 struct ecore_ptt *p_ptt, 2566 enum ecore_ov_client client) 2567 { 2568 u32 resp = 0, param = 0; 2569 u32 drv_mb_param; 2570 enum _ecore_status_t rc; 2571 2572 switch (client) { 2573 case ECORE_OV_CLIENT_DRV: 2574 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; 2575 break; 2576 case ECORE_OV_CLIENT_USER: 2577 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; 2578 break; 2579 case ECORE_OV_CLIENT_VENDOR_SPEC: 2580 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; 2581 break; 2582 default: 2583 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client); 2584 return ECORE_INVAL; 2585 } 2586 2587 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, 2588 drv_mb_param, &resp, ¶m); 2589 if (rc != ECORE_SUCCESS) 2590 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2591 2592 return rc; 2593 } 2594 2595 enum _ecore_status_t 2596 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, 2597 struct ecore_ptt *p_ptt, 2598 enum ecore_ov_driver_state drv_state) 2599 { 2600 u32 resp = 0, param = 0; 2601 u32 drv_mb_param; 2602 enum _ecore_status_t rc; 2603 2604 switch (drv_state) { 2605 case ECORE_OV_DRIVER_STATE_NOT_LOADED: 2606 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; 2607 break; 2608 case ECORE_OV_DRIVER_STATE_DISABLED: 2609 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; 2610 break; 2611 case ECORE_OV_DRIVER_STATE_ACTIVE: 2612 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; 2613 break; 2614 default: 2615 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); 2616 return ECORE_INVAL; 2617 } 2618 2619 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, 2620 drv_mb_param, &resp, ¶m); 2621 if (rc != ECORE_SUCCESS) 2622 DP_ERR(p_hwfn, "Failed to send driver state\n"); 2623 2624 return rc; 2625 } 2626 2627 enum _ecore_status_t 2628 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2629 struct ecore_fc_npiv_tbl *p_table) 2630 { 2631 return 0; 2632 } 2633 2634 enum _ecore_status_t 2635 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, 2636 struct ecore_ptt *p_ptt, u16 mtu) 2637 { 2638 return 0; 2639 } 2640 2641 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, 2642 struct ecore_ptt *p_ptt, 2643 enum ecore_led_mode mode) 2644 { 2645 u32 resp = 0, param = 0, drv_mb_param; 2646 enum _ecore_status_t rc; 2647 2648 switch (mode) { 2649 case ECORE_LED_MODE_ON: 2650 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 2651 break; 2652 case ECORE_LED_MODE_OFF: 2653 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 2654 break; 2655 case ECORE_LED_MODE_RESTORE: 2656 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 2657 break; 2658 default: 2659 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); 2660 return ECORE_INVAL; 2661 } 2662 2663 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 2664 drv_mb_param, &resp, ¶m); 2665 if (rc != ECORE_SUCCESS) 2666 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2667 2668 return rc; 2669 } 2670 2671 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, 2672 struct ecore_ptt *p_ptt, 2673 u32 mask_parities) 2674 { 2675 u32 resp = 0, param = 0; 2676 enum _ecore_status_t rc; 2677 2678 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, 2679 mask_parities, &resp, ¶m); 2680 2681 if (rc != ECORE_SUCCESS) { 2682 DP_ERR(p_hwfn, 2683 "MCP response failure for mask parities, aborting\n"); 2684 } else if (resp != FW_MSG_CODE_OK) { 2685 DP_ERR(p_hwfn, 2686 "MCP did not ack mask parity request. Old MFW?\n"); 2687 rc = ECORE_INVAL; 2688 } 2689 2690 return rc; 2691 } 2692 2693 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, 2694 u8 *p_buf, u32 len) 2695 { 2696 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2697 u32 bytes_left, offset, bytes_to_copy, buf_size; 2698 u32 nvm_offset, resp, param; 2699 struct ecore_ptt *p_ptt; 2700 enum _ecore_status_t rc = ECORE_SUCCESS; 2701 2702 p_ptt = ecore_ptt_acquire(p_hwfn); 2703 if (!p_ptt) 2704 return ECORE_BUSY; 2705 2706 bytes_left = len; 2707 offset = 0; 2708 while (bytes_left > 0) { 2709 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 2710 MCP_DRV_NVM_BUF_LEN); 2711 nvm_offset = (addr + offset) | (bytes_to_copy << 2712 DRV_MB_PARAM_NVM_LEN_OFFSET); 2713 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2714 DRV_MSG_CODE_NVM_READ_NVRAM, 2715 nvm_offset, &resp, ¶m, &buf_size, 2716 (u32 *)(p_buf + offset)); 2717 if (rc != ECORE_SUCCESS) { 2718 DP_NOTICE(p_dev, false, 2719 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", 2720 rc); 2721 resp = FW_MSG_CODE_ERROR; 2722 break; 2723 } 2724 2725 if (resp != FW_MSG_CODE_NVM_OK) { 2726 DP_NOTICE(p_dev, false, 2727 "nvm read failed, resp = 0x%08x\n", resp); 2728 rc = ECORE_UNKNOWN_ERROR; 2729 break; 2730 } 2731 2732 /* This can be a lengthy process, and it's possible scheduler 2733 * isn't preemptible. Sleep a bit to prevent CPU hogging. 2734 */ 2735 if (bytes_left % 0x1000 < 2736 (bytes_left - buf_size) % 0x1000) 2737 OSAL_MSLEEP(1); 2738 2739 offset += buf_size; 2740 bytes_left -= buf_size; 2741 } 2742 2743 p_dev->mcp_nvm_resp = resp; 2744 ecore_ptt_release(p_hwfn, p_ptt); 2745 2746 return rc; 2747 } 2748 2749 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, 2750 u32 addr, u8 *p_buf, u32 len) 2751 { 2752 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2753 struct ecore_ptt *p_ptt; 2754 u32 resp, param; 2755 enum _ecore_status_t rc; 2756 2757 p_ptt = ecore_ptt_acquire(p_hwfn); 2758 if (!p_ptt) 2759 return ECORE_BUSY; 2760 2761 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2762 (cmd == ECORE_PHY_CORE_READ) ? 2763 DRV_MSG_CODE_PHY_CORE_READ : 2764 DRV_MSG_CODE_PHY_RAW_READ, 2765 addr, &resp, ¶m, &len, (u32 *)p_buf); 2766 if (rc != ECORE_SUCCESS) 2767 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 2768 2769 p_dev->mcp_nvm_resp = resp; 2770 ecore_ptt_release(p_hwfn, p_ptt); 2771 2772 return rc; 2773 } 2774 2775 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) 2776 { 2777 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2778 struct ecore_ptt *p_ptt; 2779 2780 p_ptt = ecore_ptt_acquire(p_hwfn); 2781 if (!p_ptt) 2782 return ECORE_BUSY; 2783 2784 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); 2785 ecore_ptt_release(p_hwfn, p_ptt); 2786 2787 return ECORE_SUCCESS; 2788 } 2789 2790 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) 2791 { 2792 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2793 struct ecore_ptt *p_ptt; 2794 u32 resp, param; 2795 enum _ecore_status_t rc; 2796 2797 p_ptt = ecore_ptt_acquire(p_hwfn); 2798 if (!p_ptt) 2799 return ECORE_BUSY; 2800 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, 2801 &resp, ¶m); 2802 p_dev->mcp_nvm_resp = resp; 2803 ecore_ptt_release(p_hwfn, p_ptt); 2804 2805 return rc; 2806 } 2807 2808 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, 2809 u32 addr) 2810 { 2811 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2812 struct ecore_ptt *p_ptt; 2813 u32 resp, param; 2814 enum _ecore_status_t rc; 2815 2816 p_ptt = ecore_ptt_acquire(p_hwfn); 2817 if (!p_ptt) 2818 return ECORE_BUSY; 2819 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, 2820 &resp, ¶m); 2821 p_dev->mcp_nvm_resp = resp; 2822 ecore_ptt_release(p_hwfn, p_ptt); 2823 2824 return rc; 2825 } 2826 2827 /* rc receives ECORE_INVAL as default parameter because 2828 * it might not enter the while loop if the len is 0 2829 */ 2830 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, 2831 u32 addr, u8 *p_buf, u32 len) 2832 { 2833 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param; 2834 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2835 enum _ecore_status_t rc = ECORE_INVAL; 2836 struct ecore_ptt *p_ptt; 2837 2838 p_ptt = ecore_ptt_acquire(p_hwfn); 2839 if (!p_ptt) 2840 return ECORE_BUSY; 2841 2842 switch (cmd) { 2843 case ECORE_PUT_FILE_DATA: 2844 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; 2845 break; 2846 case ECORE_NVM_WRITE_NVRAM: 2847 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; 2848 break; 2849 case ECORE_EXT_PHY_FW_UPGRADE: 2850 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; 2851 break; 2852 default: 2853 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", 2854 cmd); 2855 rc = ECORE_INVAL; 2856 goto out; 2857 } 2858 2859 buf_idx = 0; 2860 while (buf_idx < len) { 2861 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 2862 MCP_DRV_NVM_BUF_LEN); 2863 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | 2864 addr) + 2865 buf_idx; 2866 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, 2867 &resp, ¶m, buf_size, 2868 (u32 *)&p_buf[buf_idx]); 2869 if (rc != ECORE_SUCCESS) { 2870 DP_NOTICE(p_dev, false, 2871 "ecore_mcp_nvm_write() failed, rc = %d\n", 2872 rc); 2873 resp = FW_MSG_CODE_ERROR; 2874 break; 2875 } 2876 2877 if (resp != FW_MSG_CODE_OK && 2878 resp != FW_MSG_CODE_NVM_OK && 2879 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { 2880 DP_NOTICE(p_dev, false, 2881 "nvm write failed, resp = 0x%08x\n", resp); 2882 rc = ECORE_UNKNOWN_ERROR; 2883 break; 2884 } 2885 2886 /* This can be a lengthy process, and it's possible scheduler 2887 * isn't preemptible. Sleep a bit to prevent CPU hogging. 2888 */ 2889 if (buf_idx % 0x1000 > 2890 (buf_idx + buf_size) % 0x1000) 2891 OSAL_MSLEEP(1); 2892 2893 buf_idx += buf_size; 2894 } 2895 2896 p_dev->mcp_nvm_resp = resp; 2897 out: 2898 ecore_ptt_release(p_hwfn, p_ptt); 2899 2900 return rc; 2901 } 2902 2903 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, 2904 u32 addr, u8 *p_buf, u32 len) 2905 { 2906 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2907 struct ecore_ptt *p_ptt; 2908 u32 resp, param, nvm_cmd; 2909 enum _ecore_status_t rc; 2910 2911 p_ptt = ecore_ptt_acquire(p_hwfn); 2912 if (!p_ptt) 2913 return ECORE_BUSY; 2914 2915 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : 2916 DRV_MSG_CODE_PHY_RAW_WRITE; 2917 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, 2918 &resp, ¶m, len, (u32 *)p_buf); 2919 if (rc != ECORE_SUCCESS) 2920 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 2921 p_dev->mcp_nvm_resp = resp; 2922 ecore_ptt_release(p_hwfn, p_ptt); 2923 2924 return rc; 2925 } 2926 2927 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, 2928 u32 addr) 2929 { 2930 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2931 struct ecore_ptt *p_ptt; 2932 u32 resp, param; 2933 enum _ecore_status_t rc; 2934 2935 p_ptt = ecore_ptt_acquire(p_hwfn); 2936 if (!p_ptt) 2937 return ECORE_BUSY; 2938 2939 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, 2940 &resp, ¶m); 2941 p_dev->mcp_nvm_resp = resp; 2942 ecore_ptt_release(p_hwfn, p_ptt); 2943 2944 return rc; 2945 } 2946 2947 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, 2948 struct ecore_ptt *p_ptt, 2949 u32 port, u32 addr, u32 offset, 2950 u32 len, u8 *p_buf) 2951 { 2952 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; 2953 u32 resp, param; 2954 enum _ecore_status_t rc; 2955 2956 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 2957 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 2958 addr = offset; 2959 offset = 0; 2960 bytes_left = len; 2961 while (bytes_left > 0) { 2962 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 2963 MAX_I2C_TRANSACTION_SIZE); 2964 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 2965 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 2966 nvm_offset |= ((addr + offset) << 2967 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 2968 nvm_offset |= (bytes_to_copy << 2969 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 2970 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2971 DRV_MSG_CODE_TRANSCEIVER_READ, 2972 nvm_offset, &resp, ¶m, &buf_size, 2973 (u32 *)(p_buf + offset)); 2974 if (rc != ECORE_SUCCESS) { 2975 DP_NOTICE(p_hwfn, false, 2976 "Failed to send a transceiver read command to the MFW. rc = %d.\n", 2977 rc); 2978 return rc; 2979 } 2980 2981 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 2982 return ECORE_NODEV; 2983 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 2984 return ECORE_UNKNOWN_ERROR; 2985 2986 offset += buf_size; 2987 bytes_left -= buf_size; 2988 } 2989 2990 return ECORE_SUCCESS; 2991 } 2992 2993 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, 2994 struct ecore_ptt *p_ptt, 2995 u32 port, u32 addr, u32 offset, 2996 u32 len, u8 *p_buf) 2997 { 2998 u32 buf_idx, buf_size, nvm_offset, resp, param; 2999 enum _ecore_status_t rc; 3000 3001 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 3002 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 3003 buf_idx = 0; 3004 while (buf_idx < len) { 3005 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 3006 MAX_I2C_TRANSACTION_SIZE); 3007 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 3008 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 3009 nvm_offset |= ((offset + buf_idx) << 3010 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 3011 nvm_offset |= (buf_size << 3012 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 3013 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, 3014 DRV_MSG_CODE_TRANSCEIVER_WRITE, 3015 nvm_offset, &resp, ¶m, buf_size, 3016 (u32 *)&p_buf[buf_idx]); 3017 if (rc != ECORE_SUCCESS) { 3018 DP_NOTICE(p_hwfn, false, 3019 "Failed to send a transceiver write command to the MFW. rc = %d.\n", 3020 rc); 3021 return rc; 3022 } 3023 3024 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3025 return ECORE_NODEV; 3026 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3027 return ECORE_UNKNOWN_ERROR; 3028 3029 buf_idx += buf_size; 3030 } 3031 3032 return ECORE_SUCCESS; 3033 } 3034 3035 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, 3036 struct ecore_ptt *p_ptt, 3037 u16 gpio, u32 *gpio_val) 3038 { 3039 enum _ecore_status_t rc = ECORE_SUCCESS; 3040 u32 drv_mb_param = 0, rsp; 3041 3042 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); 3043 3044 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, 3045 drv_mb_param, &rsp, gpio_val); 3046 3047 if (rc != ECORE_SUCCESS) 3048 return rc; 3049 3050 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3051 return ECORE_UNKNOWN_ERROR; 3052 3053 return ECORE_SUCCESS; 3054 } 3055 3056 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, 3057 struct ecore_ptt *p_ptt, 3058 u16 gpio, u16 gpio_val) 3059 { 3060 enum _ecore_status_t rc = ECORE_SUCCESS; 3061 u32 drv_mb_param = 0, param, rsp; 3062 3063 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | 3064 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); 3065 3066 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, 3067 drv_mb_param, &rsp, ¶m); 3068 3069 if (rc != ECORE_SUCCESS) 3070 return rc; 3071 3072 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3073 return ECORE_UNKNOWN_ERROR; 3074 3075 return ECORE_SUCCESS; 3076 } 3077 3078 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, 3079 struct ecore_ptt *p_ptt, 3080 u16 gpio, u32 *gpio_direction, 3081 u32 *gpio_ctrl) 3082 { 3083 u32 drv_mb_param = 0, rsp, val = 0; 3084 enum _ecore_status_t rc = ECORE_SUCCESS; 3085 3086 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; 3087 3088 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, 3089 drv_mb_param, &rsp, &val); 3090 if (rc != ECORE_SUCCESS) 3091 return rc; 3092 3093 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> 3094 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; 3095 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> 3096 DRV_MB_PARAM_GPIO_CTRL_OFFSET; 3097 3098 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3099 return ECORE_UNKNOWN_ERROR; 3100 3101 return ECORE_SUCCESS; 3102 } 3103 3104 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, 3105 struct ecore_ptt *p_ptt) 3106 { 3107 u32 drv_mb_param = 0, rsp, param; 3108 enum _ecore_status_t rc = ECORE_SUCCESS; 3109 3110 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << 3111 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3112 3113 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3114 drv_mb_param, &rsp, ¶m); 3115 3116 if (rc != ECORE_SUCCESS) 3117 return rc; 3118 3119 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3120 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3121 rc = ECORE_UNKNOWN_ERROR; 3122 3123 return rc; 3124 } 3125 3126 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, 3127 struct ecore_ptt *p_ptt) 3128 { 3129 u32 drv_mb_param, rsp, param; 3130 enum _ecore_status_t rc = ECORE_SUCCESS; 3131 3132 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << 3133 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3134 3135 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3136 drv_mb_param, &rsp, ¶m); 3137 3138 if (rc != ECORE_SUCCESS) 3139 return rc; 3140 3141 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3142 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3143 rc = ECORE_UNKNOWN_ERROR; 3144 3145 return rc; 3146 } 3147 3148 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( 3149 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images) 3150 { 3151 u32 drv_mb_param = 0, rsp; 3152 enum _ecore_status_t rc = ECORE_SUCCESS; 3153 3154 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << 3155 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3156 3157 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3158 drv_mb_param, &rsp, num_images); 3159 3160 if (rc != ECORE_SUCCESS) 3161 return rc; 3162 3163 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) 3164 rc = ECORE_UNKNOWN_ERROR; 3165 3166 return rc; 3167 } 3168 3169 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( 3170 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3171 struct bist_nvm_image_att *p_image_att, u32 image_index) 3172 { 3173 u32 buf_size, nvm_offset, resp, param; 3174 enum _ecore_status_t rc; 3175 3176 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << 3177 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3178 nvm_offset |= (image_index << 3179 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); 3180 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3181 nvm_offset, &resp, ¶m, &buf_size, 3182 (u32 *)p_image_att); 3183 if (rc != ECORE_SUCCESS) 3184 return rc; 3185 3186 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3187 (p_image_att->return_code != 1)) 3188 rc = ECORE_UNKNOWN_ERROR; 3189 3190 return rc; 3191 } 3192 3193 enum _ecore_status_t 3194 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, 3195 struct ecore_ptt *p_ptt, 3196 struct ecore_temperature_info *p_temp_info) 3197 { 3198 struct ecore_temperature_sensor *p_temp_sensor; 3199 struct temperature_status_stc mfw_temp_info; 3200 struct ecore_mcp_mb_params mb_params; 3201 u32 val; 3202 enum _ecore_status_t rc; 3203 u8 i; 3204 3205 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3206 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE; 3207 mb_params.p_data_dst = &mfw_temp_info; 3208 mb_params.data_dst_size = sizeof(mfw_temp_info); 3209 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3210 if (rc != ECORE_SUCCESS) 3211 return rc; 3212 3213 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS); 3214 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors, 3215 ECORE_MAX_NUM_OF_SENSORS); 3216 for (i = 0; i < p_temp_info->num_sensors; i++) { 3217 val = mfw_temp_info.sensor[i]; 3218 p_temp_sensor = &p_temp_info->sensors[i]; 3219 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> 3220 SENSOR_LOCATION_OFFSET; 3221 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> 3222 THRESHOLD_HIGH_OFFSET; 3223 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> 3224 CRITICAL_TEMPERATURE_OFFSET; 3225 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> 3226 CURRENT_TEMP_OFFSET; 3227 } 3228 3229 return ECORE_SUCCESS; 3230 } 3231 3232 enum _ecore_status_t ecore_mcp_get_mba_versions( 3233 struct ecore_hwfn *p_hwfn, 3234 struct ecore_ptt *p_ptt, 3235 struct ecore_mba_vers *p_mba_vers) 3236 { 3237 u32 buf_size, resp, param; 3238 enum _ecore_status_t rc; 3239 3240 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, 3241 0, &resp, ¶m, &buf_size, 3242 &p_mba_vers->mba_vers[0]); 3243 3244 if (rc != ECORE_SUCCESS) 3245 return rc; 3246 3247 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 3248 rc = ECORE_UNKNOWN_ERROR; 3249 3250 if (buf_size != MCP_DRV_NVM_BUF_LEN) 3251 rc = ECORE_UNKNOWN_ERROR; 3252 3253 return rc; 3254 } 3255 3256 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, 3257 struct ecore_ptt *p_ptt, 3258 u64 *num_events) 3259 { 3260 u32 rsp; 3261 3262 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS, 3263 0, &rsp, (u32 *)num_events); 3264 } 3265 3266 static enum resource_id_enum 3267 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) 3268 { 3269 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; 3270 3271 switch (res_id) { 3272 case ECORE_SB: 3273 mfw_res_id = RESOURCE_NUM_SB_E; 3274 break; 3275 case ECORE_L2_QUEUE: 3276 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; 3277 break; 3278 case ECORE_VPORT: 3279 mfw_res_id = RESOURCE_NUM_VPORT_E; 3280 break; 3281 case ECORE_RSS_ENG: 3282 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; 3283 break; 3284 case ECORE_PQ: 3285 mfw_res_id = RESOURCE_NUM_PQ_E; 3286 break; 3287 case ECORE_RL: 3288 mfw_res_id = RESOURCE_NUM_RL_E; 3289 break; 3290 case ECORE_MAC: 3291 case ECORE_VLAN: 3292 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3293 mfw_res_id = RESOURCE_VFC_FILTER_E; 3294 break; 3295 case ECORE_ILT: 3296 mfw_res_id = RESOURCE_ILT_E; 3297 break; 3298 case ECORE_LL2_QUEUE: 3299 mfw_res_id = RESOURCE_LL2_QUEUE_E; 3300 break; 3301 case ECORE_RDMA_CNQ_RAM: 3302 case ECORE_CMDQS_CQS: 3303 /* CNQ/CMDQS are the same resource */ 3304 mfw_res_id = RESOURCE_CQS_E; 3305 break; 3306 case ECORE_RDMA_STATS_QUEUE: 3307 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; 3308 break; 3309 case ECORE_BDQ: 3310 mfw_res_id = RESOURCE_BDQ_E; 3311 break; 3312 default: 3313 break; 3314 } 3315 3316 return mfw_res_id; 3317 } 3318 3319 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2 3320 #define ECORE_RESC_ALLOC_VERSION_MINOR 0 3321 #define ECORE_RESC_ALLOC_VERSION \ 3322 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ 3323 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ 3324 (ECORE_RESC_ALLOC_VERSION_MINOR << \ 3325 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) 3326 3327 struct ecore_resc_alloc_in_params { 3328 u32 cmd; 3329 enum ecore_resources res_id; 3330 u32 resc_max_val; 3331 }; 3332 3333 struct ecore_resc_alloc_out_params { 3334 u32 mcp_resp; 3335 u32 mcp_param; 3336 u32 resc_num; 3337 u32 resc_start; 3338 u32 vf_resc_num; 3339 u32 vf_resc_start; 3340 u32 flags; 3341 }; 3342 3343 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100 3344 3345 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) 3346 { 3347 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3348 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 3349 enum _ecore_status_t rc; 3350 3351 /* Allow ongoing PCIe transactions to complete */ 3352 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS); 3353 3354 /* Clear the PF's internal FID_enable in the PXP */ 3355 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); 3356 if (rc != ECORE_SUCCESS) 3357 DP_NOTICE(p_hwfn, false, 3358 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 3359 rc); 3360 3361 return rc; 3362 } 3363 3364 static enum _ecore_status_t 3365 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, 3366 struct ecore_ptt *p_ptt, 3367 struct ecore_resc_alloc_in_params *p_in_params, 3368 struct ecore_resc_alloc_out_params *p_out_params) 3369 { 3370 struct ecore_mcp_mb_params mb_params; 3371 struct resource_info mfw_resc_info; 3372 enum _ecore_status_t rc; 3373 3374 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info)); 3375 3376 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id); 3377 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { 3378 DP_ERR(p_hwfn, 3379 "Failed to match resource %d [%s] with the MFW resources\n", 3380 p_in_params->res_id, 3381 ecore_hw_get_resc_name(p_in_params->res_id)); 3382 return ECORE_INVAL; 3383 } 3384 3385 switch (p_in_params->cmd) { 3386 case DRV_MSG_SET_RESOURCE_VALUE_MSG: 3387 mfw_resc_info.size = p_in_params->resc_max_val; 3388 /* Fallthrough */ 3389 case DRV_MSG_GET_RESOURCE_ALLOC_MSG: 3390 break; 3391 default: 3392 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", 3393 p_in_params->cmd); 3394 return ECORE_INVAL; 3395 } 3396 3397 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3398 mb_params.cmd = p_in_params->cmd; 3399 mb_params.param = ECORE_RESC_ALLOC_VERSION; 3400 mb_params.p_data_src = &mfw_resc_info; 3401 mb_params.data_src_size = sizeof(mfw_resc_info); 3402 mb_params.p_data_dst = mb_params.p_data_src; 3403 mb_params.data_dst_size = mb_params.data_src_size; 3404 3405 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3406 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", 3407 p_in_params->cmd, p_in_params->res_id, 3408 ecore_hw_get_resc_name(p_in_params->res_id), 3409 GET_MFW_FIELD(mb_params.param, 3410 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3411 GET_MFW_FIELD(mb_params.param, 3412 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3413 p_in_params->resc_max_val); 3414 3415 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3416 if (rc != ECORE_SUCCESS) 3417 return rc; 3418 3419 p_out_params->mcp_resp = mb_params.mcp_resp; 3420 p_out_params->mcp_param = mb_params.mcp_param; 3421 p_out_params->resc_num = mfw_resc_info.size; 3422 p_out_params->resc_start = mfw_resc_info.offset; 3423 p_out_params->vf_resc_num = mfw_resc_info.vf_size; 3424 p_out_params->vf_resc_start = mfw_resc_info.vf_offset; 3425 p_out_params->flags = mfw_resc_info.flags; 3426 3427 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3428 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", 3429 GET_MFW_FIELD(p_out_params->mcp_param, 3430 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3431 GET_MFW_FIELD(p_out_params->mcp_param, 3432 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3433 p_out_params->resc_num, p_out_params->resc_start, 3434 p_out_params->vf_resc_num, p_out_params->vf_resc_start, 3435 p_out_params->flags); 3436 3437 return ECORE_SUCCESS; 3438 } 3439 3440 enum _ecore_status_t 3441 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3442 enum ecore_resources res_id, u32 resc_max_val, 3443 u32 *p_mcp_resp) 3444 { 3445 struct ecore_resc_alloc_out_params out_params; 3446 struct ecore_resc_alloc_in_params in_params; 3447 enum _ecore_status_t rc; 3448 3449 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3450 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; 3451 in_params.res_id = res_id; 3452 in_params.resc_max_val = resc_max_val; 3453 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3454 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3455 &out_params); 3456 if (rc != ECORE_SUCCESS) 3457 return rc; 3458 3459 *p_mcp_resp = out_params.mcp_resp; 3460 3461 return ECORE_SUCCESS; 3462 } 3463 3464 enum _ecore_status_t 3465 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3466 enum ecore_resources res_id, u32 *p_mcp_resp, 3467 u32 *p_resc_num, u32 *p_resc_start) 3468 { 3469 struct ecore_resc_alloc_out_params out_params; 3470 struct ecore_resc_alloc_in_params in_params; 3471 enum _ecore_status_t rc; 3472 3473 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3474 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; 3475 in_params.res_id = res_id; 3476 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3477 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3478 &out_params); 3479 if (rc != ECORE_SUCCESS) 3480 return rc; 3481 3482 *p_mcp_resp = out_params.mcp_resp; 3483 3484 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3485 *p_resc_num = out_params.resc_num; 3486 *p_resc_start = out_params.resc_start; 3487 } 3488 3489 return ECORE_SUCCESS; 3490 } 3491 3492 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, 3493 struct ecore_ptt *p_ptt) 3494 { 3495 u32 mcp_resp, mcp_param; 3496 3497 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, 3498 &mcp_resp, &mcp_param); 3499 } 3500 3501 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, 3502 struct ecore_ptt *p_ptt, 3503 u32 param, u32 *p_mcp_resp, 3504 u32 *p_mcp_param) 3505 { 3506 enum _ecore_status_t rc; 3507 3508 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, 3509 p_mcp_resp, p_mcp_param); 3510 if (rc != ECORE_SUCCESS) 3511 return rc; 3512 3513 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3514 DP_INFO(p_hwfn, 3515 "The resource command is unsupported by the MFW\n"); 3516 return ECORE_NOTIMPL; 3517 } 3518 3519 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { 3520 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); 3521 3522 DP_NOTICE(p_hwfn, false, 3523 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", 3524 param, opcode); 3525 return ECORE_INVAL; 3526 } 3527 3528 return rc; 3529 } 3530 3531 enum _ecore_status_t 3532 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3533 struct ecore_resc_lock_params *p_params) 3534 { 3535 u32 param = 0, mcp_resp, mcp_param; 3536 u8 opcode; 3537 enum _ecore_status_t rc; 3538 3539 switch (p_params->timeout) { 3540 case ECORE_MCP_RESC_LOCK_TO_DEFAULT: 3541 opcode = RESOURCE_OPCODE_REQ; 3542 p_params->timeout = 0; 3543 break; 3544 case ECORE_MCP_RESC_LOCK_TO_NONE: 3545 opcode = RESOURCE_OPCODE_REQ_WO_AGING; 3546 p_params->timeout = 0; 3547 break; 3548 default: 3549 opcode = RESOURCE_OPCODE_REQ_W_AGING; 3550 break; 3551 } 3552 3553 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3554 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3555 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); 3556 3557 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3558 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", 3559 param, p_params->timeout, opcode, p_params->resource); 3560 3561 /* Attempt to acquire the resource */ 3562 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3563 &mcp_param); 3564 if (rc != ECORE_SUCCESS) 3565 return rc; 3566 3567 /* Analyze the response */ 3568 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); 3569 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3570 3571 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3572 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", 3573 mcp_param, opcode, p_params->owner); 3574 3575 switch (opcode) { 3576 case RESOURCE_OPCODE_GNT: 3577 p_params->b_granted = true; 3578 break; 3579 case RESOURCE_OPCODE_BUSY: 3580 p_params->b_granted = false; 3581 break; 3582 default: 3583 DP_NOTICE(p_hwfn, false, 3584 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", 3585 mcp_param, opcode); 3586 return ECORE_INVAL; 3587 } 3588 3589 return ECORE_SUCCESS; 3590 } 3591 3592 enum _ecore_status_t 3593 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3594 struct ecore_resc_lock_params *p_params) 3595 { 3596 u32 retry_cnt = 0; 3597 enum _ecore_status_t rc; 3598 3599 do { 3600 /* No need for an interval before the first iteration */ 3601 if (retry_cnt) { 3602 if (p_params->sleep_b4_retry) { 3603 u16 retry_interval_in_ms = 3604 DIV_ROUND_UP(p_params->retry_interval, 3605 1000); 3606 3607 OSAL_MSLEEP(retry_interval_in_ms); 3608 } else { 3609 OSAL_UDELAY(p_params->retry_interval); 3610 } 3611 } 3612 3613 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params); 3614 if (rc != ECORE_SUCCESS) 3615 return rc; 3616 3617 if (p_params->b_granted) 3618 break; 3619 } while (retry_cnt++ < p_params->retry_num); 3620 3621 return ECORE_SUCCESS; 3622 } 3623 3624 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, 3625 struct ecore_resc_unlock_params *p_unlock, 3626 enum ecore_resc_lock resource, 3627 bool b_is_permanent) 3628 { 3629 if (p_lock != OSAL_NULL) { 3630 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); 3631 3632 /* Permanent resources don't require aging, and there's no 3633 * point in trying to acquire them more than once since it's 3634 * unexpected another entity would release them. 3635 */ 3636 if (b_is_permanent) { 3637 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; 3638 } else { 3639 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; 3640 p_lock->retry_interval = 3641 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; 3642 p_lock->sleep_b4_retry = true; 3643 } 3644 3645 p_lock->resource = resource; 3646 } 3647 3648 if (p_unlock != OSAL_NULL) { 3649 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); 3650 p_unlock->resource = resource; 3651 } 3652 } 3653 3654 enum _ecore_status_t 3655 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3656 struct ecore_resc_unlock_params *p_params) 3657 { 3658 u32 param = 0, mcp_resp, mcp_param; 3659 u8 opcode; 3660 enum _ecore_status_t rc; 3661 3662 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE 3663 : RESOURCE_OPCODE_RELEASE; 3664 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3665 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3666 3667 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3668 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", 3669 param, opcode, p_params->resource); 3670 3671 /* Attempt to release the resource */ 3672 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3673 &mcp_param); 3674 if (rc != ECORE_SUCCESS) 3675 return rc; 3676 3677 /* Analyze the response */ 3678 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3679 3680 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3681 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", 3682 mcp_param, opcode); 3683 3684 switch (opcode) { 3685 case RESOURCE_OPCODE_RELEASED_PREVIOUS: 3686 DP_INFO(p_hwfn, 3687 "Resource unlock request for an already released resource [%d]\n", 3688 p_params->resource); 3689 /* Fallthrough */ 3690 case RESOURCE_OPCODE_RELEASED: 3691 p_params->b_released = true; 3692 break; 3693 case RESOURCE_OPCODE_WRONG_OWNER: 3694 p_params->b_released = false; 3695 break; 3696 default: 3697 DP_NOTICE(p_hwfn, false, 3698 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", 3699 mcp_param, opcode); 3700 return ECORE_INVAL; 3701 } 3702 3703 return ECORE_SUCCESS; 3704 } 3705 3706 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn) 3707 { 3708 return !!(p_hwfn->mcp_info->capabilities & 3709 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); 3710 } 3711 3712 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, 3713 struct ecore_ptt *p_ptt) 3714 { 3715 u32 mcp_resp; 3716 enum _ecore_status_t rc; 3717 3718 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 3719 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); 3720 if (rc == ECORE_SUCCESS) 3721 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE), 3722 "MFW supported features: %08x\n", 3723 p_hwfn->mcp_info->capabilities); 3724 3725 return rc; 3726 } 3727 3728 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, 3729 struct ecore_ptt *p_ptt) 3730 { 3731 u32 mcp_resp, mcp_param, features; 3732 3733 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | 3734 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | 3735 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; 3736 3737 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, 3738 features, &mcp_resp, &mcp_param); 3739 } 3740 3741 enum _ecore_status_t 3742 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3743 struct ecore_mcp_drv_attr *p_drv_attr) 3744 { 3745 struct attribute_cmd_write_stc attr_cmd_write; 3746 enum _attribute_commands_e mfw_attr_cmd; 3747 struct ecore_mcp_mb_params mb_params; 3748 enum _ecore_status_t rc; 3749 3750 switch (p_drv_attr->attr_cmd) { 3751 case ECORE_MCP_DRV_ATTR_CMD_READ: 3752 mfw_attr_cmd = ATTRIBUTE_CMD_READ; 3753 break; 3754 case ECORE_MCP_DRV_ATTR_CMD_WRITE: 3755 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; 3756 break; 3757 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: 3758 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; 3759 break; 3760 case ECORE_MCP_DRV_ATTR_CMD_CLEAR: 3761 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; 3762 break; 3763 default: 3764 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", 3765 p_drv_attr->attr_cmd); 3766 return ECORE_INVAL; 3767 } 3768 3769 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3770 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; 3771 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, 3772 p_drv_attr->attr_num); 3773 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, 3774 mfw_attr_cmd); 3775 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { 3776 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); 3777 attr_cmd_write.val = p_drv_attr->val; 3778 attr_cmd_write.mask = p_drv_attr->mask; 3779 attr_cmd_write.offset = p_drv_attr->offset; 3780 3781 mb_params.p_data_src = &attr_cmd_write; 3782 mb_params.data_src_size = sizeof(attr_cmd_write); 3783 } 3784 3785 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3786 if (rc != ECORE_SUCCESS) 3787 return rc; 3788 3789 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3790 DP_INFO(p_hwfn, 3791 "The attribute command is not supported by the MFW\n"); 3792 return ECORE_NOTIMPL; 3793 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { 3794 DP_INFO(p_hwfn, 3795 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", 3796 mb_params.mcp_resp, p_drv_attr->attr_cmd, 3797 p_drv_attr->attr_num); 3798 return ECORE_INVAL; 3799 } 3800 3801 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3802 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", 3803 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, 3804 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, 3805 mb_params.mcp_param); 3806 3807 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || 3808 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) 3809 p_drv_attr->val = mb_params.mcp_param; 3810 3811 return ECORE_SUCCESS; 3812 } 3813 3814 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3815 u32 offset, u32 val) 3816 { 3817 struct ecore_mcp_mb_params mb_params = {0}; 3818 enum _ecore_status_t rc = ECORE_SUCCESS; 3819 u32 dword = val; 3820 3821 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; 3822 mb_params.param = offset; 3823 mb_params.p_data_src = &dword; 3824 mb_params.data_src_size = sizeof(dword); 3825 3826 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3827 if (rc != ECORE_SUCCESS) { 3828 DP_NOTICE(p_hwfn, false, 3829 "Failed to wol write request, rc = %d\n", rc); 3830 } 3831 3832 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { 3833 DP_NOTICE(p_hwfn, false, 3834 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", 3835 val, offset, mb_params.mcp_resp); 3836 rc = ECORE_UNKNOWN_ERROR; 3837 } 3838 } 3839