1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_status.h" 12 #include "ecore_mcp.h" 13 #include "mcp_public.h" 14 #include "reg_addr.h" 15 #include "ecore_hw.h" 16 #include "ecore_init_fw_funcs.h" 17 #include "ecore_sriov.h" 18 #include "ecore_vf.h" 19 #include "ecore_iov_api.h" 20 #include "ecore_gtt_reg_addr.h" 21 #include "ecore_iro.h" 22 #include "ecore_dcbx.h" 23 #include "ecore_sp_commands.h" 24 #include "ecore_cxt.h" 25 26 #define CHIP_MCP_RESP_ITER_US 10 27 #define EMUL_MCP_RESP_ITER_US (1000 * 1000) 28 29 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 30 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 31 32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 33 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ 34 _val) 35 36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 37 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) 38 39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 41 OFFSETOF(struct public_drv_mb, _field), _val) 42 43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 45 OFFSETOF(struct public_drv_mb, _field)) 46 47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 48 DRV_ID_PDA_COMP_VER_OFFSET) 49 50 #define MCP_BYTES_PER_MBIT_OFFSET 17 51 52 #ifndef ASIC_ONLY 53 static int loaded; 54 static int loaded_port[MAX_NUM_PORTS] = { 0 }; 55 #endif 56 57 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) 58 { 59 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 60 return false; 61 return true; 62 } 63 64 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 65 { 66 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 67 PUBLIC_PORT); 68 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); 69 70 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 71 MFW_PORT(p_hwfn)); 72 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 73 "port_addr = 0x%x, port_id 0x%02x\n", 74 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 75 } 76 77 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 78 { 79 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 80 OSAL_BE32 tmp; 81 u32 i; 82 83 #ifndef ASIC_ONLY 84 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) 85 return; 86 #endif 87 88 if (!p_hwfn->mcp_info->public_base) 89 return; 90 91 for (i = 0; i < length; i++) { 92 tmp = ecore_rd(p_hwfn, p_ptt, 93 p_hwfn->mcp_info->mfw_mb_addr + 94 (i << 2) + sizeof(u32)); 95 96 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 97 OSAL_BE32_TO_CPU(tmp); 98 } 99 } 100 101 struct ecore_mcp_cmd_elem { 102 osal_list_entry_t list; 103 struct ecore_mcp_mb_params *p_mb_params; 104 u16 expected_seq_num; 105 bool b_is_completed; 106 }; 107 108 /* Must be called while cmd_lock is acquired */ 109 static struct ecore_mcp_cmd_elem * 110 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn, 111 struct ecore_mcp_mb_params *p_mb_params, 112 u16 expected_seq_num) 113 { 114 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 115 116 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 117 sizeof(*p_cmd_elem)); 118 if (!p_cmd_elem) { 119 DP_NOTICE(p_hwfn, false, 120 "Failed to allocate `struct ecore_mcp_cmd_elem'\n"); 121 goto out; 122 } 123 124 p_cmd_elem->p_mb_params = p_mb_params; 125 p_cmd_elem->expected_seq_num = expected_seq_num; 126 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 127 out: 128 return p_cmd_elem; 129 } 130 131 /* Must be called while cmd_lock is acquired */ 132 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn, 133 struct ecore_mcp_cmd_elem *p_cmd_elem) 134 { 135 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 136 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem); 137 } 138 139 /* Must be called while cmd_lock is acquired */ 140 static struct ecore_mcp_cmd_elem * 141 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num) 142 { 143 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 144 145 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list, 146 struct ecore_mcp_cmd_elem) { 147 if (p_cmd_elem->expected_seq_num == seq_num) 148 return p_cmd_elem; 149 } 150 151 return OSAL_NULL; 152 } 153 154 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) 155 { 156 if (p_hwfn->mcp_info) { 157 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; 158 159 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); 160 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); 161 162 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 163 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, 164 &p_hwfn->mcp_info->cmd_list, list, 165 struct ecore_mcp_cmd_elem) { 166 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 167 } 168 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 169 170 #ifdef CONFIG_ECORE_LOCK_ALLOC 171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); 172 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); 173 #endif 174 } 175 176 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 177 178 return ECORE_SUCCESS; 179 } 180 181 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, 182 struct ecore_ptt *p_ptt) 183 { 184 struct ecore_mcp_info *p_info = p_hwfn->mcp_info; 185 u32 drv_mb_offsize, mfw_mb_offsize; 186 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 187 188 #ifndef ASIC_ONLY 189 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 190 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n"); 191 p_info->public_base = 0; 192 return ECORE_INVAL; 193 } 194 #endif 195 196 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 197 if (!p_info->public_base) 198 return ECORE_INVAL; 199 200 p_info->public_base |= GRCBASE_MCP; 201 202 /* Calculate the driver and MFW mailbox address */ 203 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, 204 SECTION_OFFSIZE_ADDR(p_info->public_base, 205 PUBLIC_DRV_MB)); 206 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 207 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 208 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x" 209 " mcp_pf_id = 0x%x\n", 210 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 211 212 /* Set the MFW MB address */ 213 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, 214 SECTION_OFFSIZE_ADDR(p_info->public_base, 215 PUBLIC_MFW_MB)); 216 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 217 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, 218 p_info->mfw_mb_addr); 219 220 /* Get the current driver mailbox sequence before sending 221 * the first command 222 */ 223 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 224 DRV_MSG_SEQ_NUMBER_MASK; 225 226 /* Get current FW pulse sequence */ 227 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 228 DRV_PULSE_SEQ_MASK; 229 230 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 231 232 return ECORE_SUCCESS; 233 } 234 235 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, 236 struct ecore_ptt *p_ptt) 237 { 238 struct ecore_mcp_info *p_info; 239 u32 size; 240 241 /* Allocate mcp_info structure */ 242 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 243 sizeof(*p_hwfn->mcp_info)); 244 if (!p_hwfn->mcp_info) 245 goto err; 246 p_info = p_hwfn->mcp_info; 247 248 /* Initialize the MFW spinlocks */ 249 #ifdef CONFIG_ECORE_LOCK_ALLOC 250 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock); 251 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock); 252 #endif 253 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); 254 OSAL_SPIN_LOCK_INIT(&p_info->link_lock); 255 256 OSAL_LIST_INIT(&p_info->cmd_list); 257 258 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { 259 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); 260 /* Do not free mcp_info here, since public_base indicate that 261 * the MCP is not initialized 262 */ 263 return ECORE_SUCCESS; 264 } 265 266 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 267 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 268 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 269 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 270 goto err; 271 272 return ECORE_SUCCESS; 273 274 err: 275 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); 276 ecore_mcp_free(p_hwfn); 277 return ECORE_NOMEM; 278 } 279 280 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn, 281 struct ecore_ptt *p_ptt) 282 { 283 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 284 285 /* Use MCP history register to check if MCP reset occurred between init 286 * time and now. 287 */ 288 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { 289 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 290 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", 291 p_hwfn->mcp_info->mcp_hist, generic_por_0); 292 293 ecore_load_mcp_offsets(p_hwfn, p_ptt); 294 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 295 } 296 } 297 298 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, 299 struct ecore_ptt *p_ptt) 300 { 301 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; 302 enum _ecore_status_t rc = ECORE_SUCCESS; 303 304 #ifndef ASIC_ONLY 305 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 306 delay = EMUL_MCP_RESP_ITER_US; 307 #endif 308 309 if (p_hwfn->mcp_info->b_block_cmd) { 310 DP_NOTICE(p_hwfn, false, 311 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); 312 return ECORE_ABORTED; 313 } 314 315 /* Ensure that only a single thread is accessing the mailbox */ 316 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 317 318 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 319 320 /* Set drv command along with the updated sequence */ 321 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 322 seq = ++p_hwfn->mcp_info->drv_mb_seq; 323 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); 324 325 do { 326 /* Wait for MFW response */ 327 OSAL_UDELAY(delay); 328 /* Give the FW up to 500 second (50*1000*10usec) */ 329 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt, 330 MISCS_REG_GENERIC_POR_0)) && 331 (cnt++ < ECORE_MCP_RESET_RETRIES)); 332 333 if (org_mcp_reset_seq != 334 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 335 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 336 "MCP was reset after %d usec\n", cnt * delay); 337 } else { 338 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 339 rc = ECORE_AGAIN; 340 } 341 342 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 343 344 return rc; 345 } 346 347 /* Must be called while cmd_lock is acquired */ 348 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn) 349 { 350 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 351 352 /* There is at most one pending command at a certain time, and if it 353 * exists - it is placed at the HEAD of the list. 354 */ 355 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) { 356 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list, 357 struct ecore_mcp_cmd_elem, 358 list); 359 return !p_cmd_elem->b_is_completed; 360 } 361 362 return false; 363 } 364 365 /* Must be called while cmd_lock is acquired */ 366 static enum _ecore_status_t 367 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 368 { 369 struct ecore_mcp_mb_params *p_mb_params; 370 struct ecore_mcp_cmd_elem *p_cmd_elem; 371 u32 mcp_resp; 372 u16 seq_num; 373 374 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 375 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); 376 377 /* Return if no new non-handled response has been received */ 378 if (seq_num != p_hwfn->mcp_info->drv_mb_seq) 379 return ECORE_AGAIN; 380 381 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num); 382 if (!p_cmd_elem) { 383 DP_ERR(p_hwfn, 384 "Failed to find a pending mailbox cmd that expects sequence number %d\n", 385 seq_num); 386 return ECORE_UNKNOWN_ERROR; 387 } 388 389 p_mb_params = p_cmd_elem->p_mb_params; 390 391 /* Get the MFW response along with the sequence number */ 392 p_mb_params->mcp_resp = mcp_resp; 393 394 /* Get the MFW param */ 395 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 396 397 /* Get the union data */ 398 if (p_mb_params->p_data_dst != OSAL_NULL && 399 p_mb_params->data_dst_size) { 400 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 401 OFFSETOF(struct public_drv_mb, 402 union_data); 403 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 404 union_data_addr, p_mb_params->data_dst_size); 405 } 406 407 p_cmd_elem->b_is_completed = true; 408 409 return ECORE_SUCCESS; 410 } 411 412 /* Must be called while cmd_lock is acquired */ 413 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 414 struct ecore_ptt *p_ptt, 415 struct ecore_mcp_mb_params *p_mb_params, 416 u16 seq_num) 417 { 418 union drv_union_data union_data; 419 u32 union_data_addr; 420 421 /* Set the union data */ 422 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 423 OFFSETOF(struct public_drv_mb, union_data); 424 OSAL_MEM_ZERO(&union_data, sizeof(union_data)); 425 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size) 426 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src, 427 p_mb_params->data_src_size); 428 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, 429 sizeof(union_data)); 430 431 /* Set the drv param */ 432 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); 433 434 /* Set the drv command along with the sequence number */ 435 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); 436 437 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 438 "MFW mailbox: command 0x%08x param 0x%08x\n", 439 (p_mb_params->cmd | seq_num), p_mb_params->param); 440 } 441 442 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, 443 bool block_cmd) 444 { 445 p_hwfn->mcp_info->b_block_cmd = block_cmd; 446 447 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", 448 block_cmd ? "Block" : "Unblock"); 449 } 450 451 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, 452 struct ecore_ptt *p_ptt) 453 { 454 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; 455 456 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 457 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 458 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 459 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 460 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 461 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 462 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 463 464 DP_NOTICE(p_hwfn, false, 465 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", 466 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); 467 } 468 469 static enum _ecore_status_t 470 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 471 struct ecore_mcp_mb_params *p_mb_params, 472 u32 max_retries, u32 delay) 473 { 474 struct ecore_mcp_cmd_elem *p_cmd_elem; 475 u32 cnt = 0; 476 u16 seq_num; 477 enum _ecore_status_t rc = ECORE_SUCCESS; 478 479 /* Wait until the mailbox is non-occupied */ 480 do { 481 /* Exit the loop if there is no pending command, or if the 482 * pending command is completed during this iteration. 483 * The spinlock stays locked until the command is sent. 484 */ 485 486 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 487 488 if (!ecore_mcp_has_pending_cmd(p_hwfn)) 489 break; 490 491 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 492 if (rc == ECORE_SUCCESS) 493 break; 494 else if (rc != ECORE_AGAIN) 495 goto err; 496 497 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 498 OSAL_UDELAY(delay); 499 OSAL_MFW_CMD_PREEMPT(p_hwfn); 500 } while (++cnt < max_retries); 501 502 if (cnt >= max_retries) { 503 DP_NOTICE(p_hwfn, false, 504 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", 505 p_mb_params->cmd, p_mb_params->param); 506 return ECORE_AGAIN; 507 } 508 509 /* Send the mailbox command */ 510 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 511 seq_num = ++p_hwfn->mcp_info->drv_mb_seq; 512 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); 513 if (!p_cmd_elem) { 514 rc = ECORE_NOMEM; 515 goto err; 516 } 517 518 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); 519 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 520 521 /* Wait for the MFW response */ 522 do { 523 /* Exit the loop if the command is already completed, or if the 524 * command is completed during this iteration. 525 * The spinlock stays locked until the list element is removed. 526 */ 527 528 OSAL_UDELAY(delay); 529 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 530 531 if (p_cmd_elem->b_is_completed) 532 break; 533 534 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 535 if (rc == ECORE_SUCCESS) 536 break; 537 else if (rc != ECORE_AGAIN) 538 goto err; 539 540 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 541 OSAL_MFW_CMD_PREEMPT(p_hwfn); 542 } while (++cnt < max_retries); 543 544 if (cnt >= max_retries) { 545 DP_NOTICE(p_hwfn, false, 546 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 547 p_mb_params->cmd, p_mb_params->param); 548 ecore_mcp_print_cpu_info(p_hwfn, p_ptt); 549 550 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 551 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 552 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 553 554 ecore_mcp_cmd_set_blocking(p_hwfn, true); 555 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); 556 return ECORE_AGAIN; 557 } 558 559 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 560 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 561 562 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 563 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 564 p_mb_params->mcp_resp, p_mb_params->mcp_param, 565 (cnt * delay) / 1000, (cnt * delay) % 1000); 566 567 /* Clear the sequence number from the MFW response */ 568 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 569 570 return ECORE_SUCCESS; 571 572 err: 573 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 574 return rc; 575 } 576 577 static enum _ecore_status_t 578 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 579 struct ecore_ptt *p_ptt, 580 struct ecore_mcp_mb_params *p_mb_params) 581 { 582 osal_size_t union_data_size = sizeof(union drv_union_data); 583 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES; 584 u32 delay = CHIP_MCP_RESP_ITER_US; 585 586 #ifndef ASIC_ONLY 587 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 588 delay = EMUL_MCP_RESP_ITER_US; 589 /* There is a built-in delay of 100usec in each MFW response read */ 590 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 591 max_retries /= 10; 592 #endif 593 594 /* MCP not initialized */ 595 if (!ecore_mcp_is_init(p_hwfn)) { 596 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); 597 return ECORE_BUSY; 598 } 599 600 if (p_mb_params->data_src_size > union_data_size || 601 p_mb_params->data_dst_size > union_data_size) { 602 DP_ERR(p_hwfn, 603 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", 604 p_mb_params->data_src_size, p_mb_params->data_dst_size, 605 union_data_size); 606 return ECORE_INVAL; 607 } 608 609 if (p_hwfn->mcp_info->b_block_cmd) { 610 DP_NOTICE(p_hwfn, false, 611 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", 612 p_mb_params->cmd, p_mb_params->param); 613 return ECORE_ABORTED; 614 } 615 616 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 617 delay); 618 } 619 620 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, 621 struct ecore_ptt *p_ptt, u32 cmd, u32 param, 622 u32 *o_mcp_resp, u32 *o_mcp_param) 623 { 624 struct ecore_mcp_mb_params mb_params; 625 enum _ecore_status_t rc; 626 627 #ifndef ASIC_ONLY 628 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 629 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) { 630 loaded--; 631 loaded_port[p_hwfn->port_id]--; 632 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", 633 loaded); 634 } 635 return ECORE_SUCCESS; 636 } 637 #endif 638 639 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 640 mb_params.cmd = cmd; 641 mb_params.param = param; 642 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 643 if (rc != ECORE_SUCCESS) 644 return rc; 645 646 *o_mcp_resp = mb_params.mcp_resp; 647 *o_mcp_param = mb_params.mcp_param; 648 649 return ECORE_SUCCESS; 650 } 651 652 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, 653 struct ecore_ptt *p_ptt, 654 u32 cmd, 655 u32 param, 656 u32 *o_mcp_resp, 657 u32 *o_mcp_param, 658 u32 i_txn_size, u32 *i_buf) 659 { 660 struct ecore_mcp_mb_params mb_params; 661 enum _ecore_status_t rc; 662 663 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 664 mb_params.cmd = cmd; 665 mb_params.param = param; 666 mb_params.p_data_src = i_buf; 667 mb_params.data_src_size = (u8)i_txn_size; 668 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 669 if (rc != ECORE_SUCCESS) 670 return rc; 671 672 *o_mcp_resp = mb_params.mcp_resp; 673 *o_mcp_param = mb_params.mcp_param; 674 675 return ECORE_SUCCESS; 676 } 677 678 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, 679 struct ecore_ptt *p_ptt, 680 u32 cmd, 681 u32 param, 682 u32 *o_mcp_resp, 683 u32 *o_mcp_param, 684 u32 *o_txn_size, u32 *o_buf) 685 { 686 struct ecore_mcp_mb_params mb_params; 687 u8 raw_data[MCP_DRV_NVM_BUF_LEN]; 688 enum _ecore_status_t rc; 689 690 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 691 mb_params.cmd = cmd; 692 mb_params.param = param; 693 mb_params.p_data_dst = raw_data; 694 695 /* Use the maximal value since the actual one is part of the response */ 696 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; 697 698 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 699 if (rc != ECORE_SUCCESS) 700 return rc; 701 702 *o_mcp_resp = mb_params.mcp_resp; 703 *o_mcp_param = mb_params.mcp_param; 704 705 *o_txn_size = *o_mcp_param; 706 /* @DPDK */ 707 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN)); 708 709 return ECORE_SUCCESS; 710 } 711 712 #ifndef ASIC_ONLY 713 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, 714 u32 *p_load_code) 715 { 716 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 717 718 if (!loaded) 719 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 720 else if (!loaded_port[p_hwfn->port_id]) 721 load_phase = FW_MSG_CODE_DRV_LOAD_PORT; 722 else 723 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION; 724 725 /* On CMT, always tell that it's engine */ 726 if (ECORE_IS_CMT(p_hwfn->p_dev)) 727 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 728 729 *p_load_code = load_phase; 730 loaded++; 731 loaded_port[p_hwfn->port_id]++; 732 733 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 734 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n", 735 *p_load_code, loaded, p_hwfn->port_id, 736 loaded_port[p_hwfn->port_id]); 737 } 738 #endif 739 740 static bool 741 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, 742 enum ecore_override_force_load override_force_load) 743 { 744 bool can_force_load = false; 745 746 switch (override_force_load) { 747 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS: 748 can_force_load = true; 749 break; 750 case ECORE_OVERRIDE_FORCE_LOAD_NEVER: 751 can_force_load = false; 752 break; 753 default: 754 can_force_load = (drv_role == DRV_ROLE_OS && 755 exist_drv_role == DRV_ROLE_PREBOOT) || 756 (drv_role == DRV_ROLE_KDUMP && 757 exist_drv_role == DRV_ROLE_OS); 758 break; 759 } 760 761 return can_force_load; 762 } 763 764 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, 765 struct ecore_ptt *p_ptt) 766 { 767 u32 resp = 0, param = 0; 768 enum _ecore_status_t rc; 769 770 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, 771 &resp, ¶m); 772 if (rc != ECORE_SUCCESS) 773 DP_NOTICE(p_hwfn, false, 774 "Failed to send cancel load request, rc = %d\n", rc); 775 776 return rc; 777 } 778 779 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) 780 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) 781 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) 782 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) 783 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) 784 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) 785 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) 786 787 static u32 ecore_get_config_bitmap(void) 788 { 789 u32 config_bitmap = 0x0; 790 791 #ifdef CONFIG_ECORE_L2 792 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; 793 #endif 794 #ifdef CONFIG_ECORE_SRIOV 795 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; 796 #endif 797 #ifdef CONFIG_ECORE_ROCE 798 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; 799 #endif 800 #ifdef CONFIG_ECORE_IWARP 801 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; 802 #endif 803 #ifdef CONFIG_ECORE_FCOE 804 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; 805 #endif 806 #ifdef CONFIG_ECORE_ISCSI 807 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; 808 #endif 809 #ifdef CONFIG_ECORE_LL2 810 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; 811 #endif 812 813 return config_bitmap; 814 } 815 816 struct ecore_load_req_in_params { 817 u8 hsi_ver; 818 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 819 #define ECORE_LOAD_REQ_HSI_VER_1 1 820 u32 drv_ver_0; 821 u32 drv_ver_1; 822 u32 fw_ver; 823 u8 drv_role; 824 u8 timeout_val; 825 u8 force_cmd; 826 bool avoid_eng_reset; 827 }; 828 829 struct ecore_load_req_out_params { 830 u32 load_code; 831 u32 exist_drv_ver_0; 832 u32 exist_drv_ver_1; 833 u32 exist_fw_ver; 834 u8 exist_drv_role; 835 u8 mfw_hsi_ver; 836 bool drv_exists; 837 }; 838 839 static enum _ecore_status_t 840 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 841 struct ecore_load_req_in_params *p_in_params, 842 struct ecore_load_req_out_params *p_out_params) 843 { 844 struct ecore_mcp_mb_params mb_params; 845 struct load_req_stc load_req; 846 struct load_rsp_stc load_rsp; 847 u32 hsi_ver; 848 enum _ecore_status_t rc; 849 850 OSAL_MEM_ZERO(&load_req, sizeof(load_req)); 851 load_req.drv_ver_0 = p_in_params->drv_ver_0; 852 load_req.drv_ver_1 = p_in_params->drv_ver_1; 853 load_req.fw_ver = p_in_params->fw_ver; 854 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); 855 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, 856 p_in_params->timeout_val); 857 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); 858 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, 859 p_in_params->avoid_eng_reset); 860 861 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? 862 DRV_ID_MCP_HSI_VER_CURRENT : 863 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); 864 865 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 866 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 867 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; 868 mb_params.p_data_src = &load_req; 869 mb_params.data_src_size = sizeof(load_req); 870 mb_params.p_data_dst = &load_rsp; 871 mb_params.data_dst_size = sizeof(load_rsp); 872 873 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 874 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 875 mb_params.param, 876 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), 877 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), 878 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), 879 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); 880 881 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) 882 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 883 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", 884 load_req.drv_ver_0, load_req.drv_ver_1, 885 load_req.fw_ver, load_req.misc0, 886 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), 887 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), 888 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), 889 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); 890 891 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 892 if (rc != ECORE_SUCCESS) { 893 DP_NOTICE(p_hwfn, false, 894 "Failed to send load request, rc = %d\n", rc); 895 return rc; 896 } 897 898 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 899 "Load Response: resp 0x%08x\n", mb_params.mcp_resp); 900 p_out_params->load_code = mb_params.mcp_resp; 901 902 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 903 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 904 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 905 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", 906 load_rsp.drv_ver_0, load_rsp.drv_ver_1, 907 load_rsp.fw_ver, load_rsp.misc0, 908 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), 909 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), 910 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); 911 912 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; 913 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; 914 p_out_params->exist_fw_ver = load_rsp.fw_ver; 915 p_out_params->exist_drv_role = 916 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); 917 p_out_params->mfw_hsi_ver = 918 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); 919 p_out_params->drv_exists = 920 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & 921 LOAD_RSP_FLAGS0_DRV_EXISTS; 922 } 923 924 return ECORE_SUCCESS; 925 } 926 927 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, 928 u8 *p_mfw_drv_role) 929 { 930 switch (drv_role) { 931 case ECORE_DRV_ROLE_OS: 932 *p_mfw_drv_role = DRV_ROLE_OS; 933 break; 934 case ECORE_DRV_ROLE_KDUMP: 935 *p_mfw_drv_role = DRV_ROLE_KDUMP; 936 break; 937 } 938 } 939 940 enum ecore_load_req_force { 941 ECORE_LOAD_REQ_FORCE_NONE, 942 ECORE_LOAD_REQ_FORCE_PF, 943 ECORE_LOAD_REQ_FORCE_ALL, 944 }; 945 946 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, 947 u8 *p_mfw_force_cmd) 948 { 949 switch (force_cmd) { 950 case ECORE_LOAD_REQ_FORCE_NONE: 951 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; 952 break; 953 case ECORE_LOAD_REQ_FORCE_PF: 954 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; 955 break; 956 case ECORE_LOAD_REQ_FORCE_ALL: 957 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; 958 break; 959 } 960 } 961 962 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, 963 struct ecore_ptt *p_ptt, 964 struct ecore_load_req_params *p_params) 965 { 966 struct ecore_load_req_out_params out_params; 967 struct ecore_load_req_in_params in_params; 968 u8 mfw_drv_role = 0, mfw_force_cmd; 969 enum _ecore_status_t rc; 970 971 #ifndef ASIC_ONLY 972 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 973 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code); 974 return ECORE_SUCCESS; 975 } 976 #endif 977 978 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 979 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; 980 in_params.drv_ver_0 = ECORE_VERSION; 981 in_params.drv_ver_1 = ecore_get_config_bitmap(); 982 in_params.fw_ver = STORM_FW_VERSION; 983 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); 984 in_params.drv_role = mfw_drv_role; 985 in_params.timeout_val = p_params->timeout_val; 986 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); 987 in_params.force_cmd = mfw_force_cmd; 988 in_params.avoid_eng_reset = p_params->avoid_eng_reset; 989 990 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 991 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); 992 if (rc != ECORE_SUCCESS) 993 return rc; 994 995 /* First handle cases where another load request should/might be sent: 996 * - MFW expects the old interface [HSI version = 1] 997 * - MFW responds that a force load request is required 998 */ 999 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 1000 DP_INFO(p_hwfn, 1001 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); 1002 1003 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; 1004 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1005 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1006 &out_params); 1007 if (rc != ECORE_SUCCESS) 1008 return rc; 1009 } else if (out_params.load_code == 1010 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { 1011 if (ecore_mcp_can_force_load(in_params.drv_role, 1012 out_params.exist_drv_role, 1013 p_params->override_force_load)) { 1014 DP_INFO(p_hwfn, 1015 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n", 1016 in_params.drv_role, in_params.fw_ver, 1017 in_params.drv_ver_0, in_params.drv_ver_1, 1018 out_params.exist_drv_role, 1019 out_params.exist_fw_ver, 1020 out_params.exist_drv_ver_0, 1021 out_params.exist_drv_ver_1); 1022 1023 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, 1024 &mfw_force_cmd); 1025 1026 in_params.force_cmd = mfw_force_cmd; 1027 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1028 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1029 &out_params); 1030 if (rc != ECORE_SUCCESS) 1031 return rc; 1032 } else { 1033 DP_NOTICE(p_hwfn, false, 1034 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", 1035 in_params.drv_role, in_params.fw_ver, 1036 in_params.drv_ver_0, in_params.drv_ver_1, 1037 out_params.exist_drv_role, 1038 out_params.exist_fw_ver, 1039 out_params.exist_drv_ver_0, 1040 out_params.exist_drv_ver_1); 1041 1042 ecore_mcp_cancel_load_req(p_hwfn, p_ptt); 1043 return ECORE_BUSY; 1044 } 1045 } 1046 1047 /* Now handle the other types of responses. 1048 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not 1049 * expected here after the additional revised load requests were sent. 1050 */ 1051 switch (out_params.load_code) { 1052 case FW_MSG_CODE_DRV_LOAD_ENGINE: 1053 case FW_MSG_CODE_DRV_LOAD_PORT: 1054 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1055 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 1056 out_params.drv_exists) { 1057 /* The role and fw/driver version match, but the PF is 1058 * already loaded and has not been unloaded gracefully. 1059 * This is unexpected since a quasi-FLR request was 1060 * previously sent as part of ecore_hw_prepare(). 1061 */ 1062 DP_NOTICE(p_hwfn, false, 1063 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); 1064 return ECORE_INVAL; 1065 } 1066 break; 1067 default: 1068 DP_NOTICE(p_hwfn, false, 1069 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", 1070 out_params.load_code); 1071 return ECORE_BUSY; 1072 } 1073 1074 p_params->load_code = out_params.load_code; 1075 1076 return ECORE_SUCCESS; 1077 } 1078 1079 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, 1080 struct ecore_ptt *p_ptt) 1081 { 1082 u32 resp = 0, param = 0; 1083 enum _ecore_status_t rc; 1084 1085 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, 1086 ¶m); 1087 if (rc != ECORE_SUCCESS) { 1088 DP_NOTICE(p_hwfn, false, 1089 "Failed to send a LOAD_DONE command, rc = %d\n", rc); 1090 return rc; 1091 } 1092 1093 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 1094 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 1095 DP_NOTICE(p_hwfn, false, 1096 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 1097 1098 return ECORE_SUCCESS; 1099 } 1100 1101 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, 1102 struct ecore_ptt *p_ptt) 1103 { 1104 u32 wol_param, mcp_resp, mcp_param; 1105 1106 /* @DPDK */ 1107 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1108 1109 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1110 &mcp_resp, &mcp_param); 1111 } 1112 1113 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, 1114 struct ecore_ptt *p_ptt) 1115 { 1116 struct ecore_mcp_mb_params mb_params; 1117 struct mcp_mac wol_mac; 1118 1119 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1120 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; 1121 1122 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1123 } 1124 1125 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, 1126 struct ecore_ptt *p_ptt) 1127 { 1128 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1129 PUBLIC_PATH); 1130 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1131 u32 path_addr = SECTION_ADDR(mfw_path_offsize, 1132 ECORE_PATH_ID(p_hwfn)); 1133 u32 disabled_vfs[VF_MAX_STATIC / 32]; 1134 int i; 1135 1136 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1137 "Reading Disabled VF information from [offset %08x]," 1138 " path_addr %08x\n", 1139 mfw_path_offsize, path_addr); 1140 1141 for (i = 0; i < (VF_MAX_STATIC / 32); i++) { 1142 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, 1143 path_addr + 1144 OFFSETOF(struct public_path, 1145 mcp_vf_disabled) + 1146 sizeof(u32) * i); 1147 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1148 "FLR-ed VFs [%08x,...,%08x] - %08x\n", 1149 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); 1150 } 1151 1152 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) 1153 OSAL_VF_FLR_UPDATE(p_hwfn); 1154 } 1155 1156 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, 1157 struct ecore_ptt *p_ptt, 1158 u32 *vfs_to_ack) 1159 { 1160 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1161 PUBLIC_FUNC); 1162 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1163 u32 func_addr = SECTION_ADDR(mfw_func_offsize, 1164 MCP_PF_ID(p_hwfn)); 1165 struct ecore_mcp_mb_params mb_params; 1166 enum _ecore_status_t rc; 1167 int i; 1168 1169 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1170 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1171 "Acking VFs [%08x,...,%08x] - %08x\n", 1172 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); 1173 1174 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1175 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; 1176 mb_params.p_data_src = vfs_to_ack; 1177 mb_params.data_src_size = VF_MAX_STATIC / 8; 1178 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, 1179 &mb_params); 1180 if (rc != ECORE_SUCCESS) { 1181 DP_NOTICE(p_hwfn, false, 1182 "Failed to pass ACK for VF flr to MFW\n"); 1183 return ECORE_TIMEOUT; 1184 } 1185 1186 /* TMP - clear the ACK bits; should be done by MFW */ 1187 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1188 ecore_wr(p_hwfn, p_ptt, 1189 func_addr + 1190 OFFSETOF(struct public_func, drv_ack_vf_disabled) + 1191 i * sizeof(u32), 0); 1192 1193 return rc; 1194 } 1195 1196 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, 1197 struct ecore_ptt *p_ptt) 1198 { 1199 u32 transceiver_state; 1200 1201 transceiver_state = ecore_rd(p_hwfn, p_ptt, 1202 p_hwfn->mcp_info->port_addr + 1203 OFFSETOF(struct public_port, 1204 transceiver_data)); 1205 1206 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), 1207 "Received transceiver state update [0x%08x] from mfw" 1208 " [Addr 0x%x]\n", 1209 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + 1210 OFFSETOF(struct public_port, 1211 transceiver_data))); 1212 1213 transceiver_state = GET_MFW_FIELD(transceiver_state, 1214 ETH_TRANSCEIVER_STATE); 1215 1216 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 1217 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); 1218 else 1219 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); 1220 1221 OSAL_TRANSCEIVER_UPDATE(p_hwfn); 1222 } 1223 1224 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, 1225 struct ecore_ptt *p_ptt, 1226 struct ecore_mcp_link_state *p_link) 1227 { 1228 u32 eee_status, val; 1229 1230 p_link->eee_adv_caps = 0; 1231 p_link->eee_lp_adv_caps = 0; 1232 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1233 OFFSETOF(struct public_port, eee_status)); 1234 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); 1235 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; 1236 if (val & EEE_1G_ADV) 1237 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; 1238 if (val & EEE_10G_ADV) 1239 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; 1240 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; 1241 if (val & EEE_1G_ADV) 1242 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; 1243 if (val & EEE_10G_ADV) 1244 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; 1245 } 1246 1247 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, 1248 struct ecore_ptt *p_ptt, 1249 struct public_func *p_data, 1250 int pfid) 1251 { 1252 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1253 PUBLIC_FUNC); 1254 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1255 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 1256 u32 i, size; 1257 1258 OSAL_MEM_ZERO(p_data, sizeof(*p_data)); 1259 1260 size = OSAL_MIN_T(u32, sizeof(*p_data), 1261 SECTION_SIZE(mfw_path_offsize)); 1262 for (i = 0; i < size / sizeof(u32); i++) 1263 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, 1264 func_addr + (i << 2)); 1265 1266 return size; 1267 } 1268 1269 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, 1270 struct ecore_ptt *p_ptt, 1271 bool b_reset) 1272 { 1273 struct ecore_mcp_link_state *p_link; 1274 u8 max_bw, min_bw; 1275 u32 status = 0; 1276 1277 /* Prevent SW/attentions from doing this at the same time */ 1278 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); 1279 1280 p_link = &p_hwfn->mcp_info->link_output; 1281 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1282 if (!b_reset) { 1283 status = ecore_rd(p_hwfn, p_ptt, 1284 p_hwfn->mcp_info->port_addr + 1285 OFFSETOF(struct public_port, link_status)); 1286 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), 1287 "Received link update [0x%08x] from mfw" 1288 " [Addr 0x%x]\n", 1289 status, (u32)(p_hwfn->mcp_info->port_addr + 1290 OFFSETOF(struct public_port, 1291 link_status))); 1292 } else { 1293 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1294 "Resetting link indications\n"); 1295 goto out; 1296 } 1297 1298 if (p_hwfn->b_drv_link_init) { 1299 /* Link indication with modern MFW arrives as per-PF 1300 * indication. 1301 */ 1302 if (p_hwfn->mcp_info->capabilities & 1303 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { 1304 struct public_func shmem_info; 1305 1306 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1307 MCP_PF_ID(p_hwfn)); 1308 p_link->link_up = !!(shmem_info.status & 1309 FUNC_STATUS_VIRTUAL_LINK_UP); 1310 } else { 1311 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 1312 } 1313 } else { 1314 p_link->link_up = false; 1315 } 1316 1317 p_link->full_duplex = true; 1318 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 1319 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 1320 p_link->speed = 100000; 1321 break; 1322 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 1323 p_link->speed = 50000; 1324 break; 1325 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 1326 p_link->speed = 40000; 1327 break; 1328 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 1329 p_link->speed = 25000; 1330 break; 1331 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 1332 p_link->speed = 20000; 1333 break; 1334 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 1335 p_link->speed = 10000; 1336 break; 1337 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 1338 p_link->full_duplex = false; 1339 /* Fall-through */ 1340 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 1341 p_link->speed = 1000; 1342 break; 1343 default: 1344 p_link->speed = 0; 1345 } 1346 1347 /* We never store total line speed as p_link->speed is 1348 * again changes according to bandwidth allocation. 1349 */ 1350 if (p_link->link_up && p_link->speed) 1351 p_link->line_speed = p_link->speed; 1352 else 1353 p_link->line_speed = 0; 1354 1355 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; 1356 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; 1357 1358 /* Max bandwidth configuration */ 1359 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 1360 p_link, max_bw); 1361 1362 /* Min bandwidth configuration */ 1363 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 1364 p_link, min_bw); 1365 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, 1366 p_link->min_pf_rate); 1367 1368 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 1369 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 1370 p_link->parallel_detection = !!(status & 1371 LINK_STATUS_PARALLEL_DETECTION_USED); 1372 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 1373 1374 p_link->partner_adv_speed |= 1375 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 1376 ECORE_LINK_PARTNER_SPEED_1G_FD : 0; 1377 p_link->partner_adv_speed |= 1378 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 1379 ECORE_LINK_PARTNER_SPEED_1G_HD : 0; 1380 p_link->partner_adv_speed |= 1381 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 1382 ECORE_LINK_PARTNER_SPEED_10G : 0; 1383 p_link->partner_adv_speed |= 1384 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 1385 ECORE_LINK_PARTNER_SPEED_20G : 0; 1386 p_link->partner_adv_speed |= 1387 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? 1388 ECORE_LINK_PARTNER_SPEED_25G : 0; 1389 p_link->partner_adv_speed |= 1390 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 1391 ECORE_LINK_PARTNER_SPEED_40G : 0; 1392 p_link->partner_adv_speed |= 1393 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 1394 ECORE_LINK_PARTNER_SPEED_50G : 0; 1395 p_link->partner_adv_speed |= 1396 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 1397 ECORE_LINK_PARTNER_SPEED_100G : 0; 1398 1399 p_link->partner_tx_flow_ctrl_en = 1400 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 1401 p_link->partner_rx_flow_ctrl_en = 1402 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 1403 1404 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 1405 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 1406 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; 1407 break; 1408 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 1409 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; 1410 break; 1411 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 1412 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; 1413 break; 1414 default: 1415 p_link->partner_adv_pause = 0; 1416 } 1417 1418 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 1419 1420 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) 1421 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); 1422 1423 OSAL_LINK_UPDATE(p_hwfn); 1424 out: 1425 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); 1426 } 1427 1428 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, 1429 struct ecore_ptt *p_ptt, bool b_up) 1430 { 1431 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 1432 struct ecore_mcp_mb_params mb_params; 1433 struct eth_phy_cfg phy_cfg; 1434 enum _ecore_status_t rc = ECORE_SUCCESS; 1435 u32 cmd; 1436 1437 #ifndef ASIC_ONLY 1438 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1439 return ECORE_SUCCESS; 1440 #endif 1441 1442 /* Set the shmem configuration according to params */ 1443 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg)); 1444 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 1445 if (!params->speed.autoneg) 1446 phy_cfg.speed = params->speed.forced_speed; 1447 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; 1448 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; 1449 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1450 phy_cfg.adv_speed = params->speed.advertised_speeds; 1451 phy_cfg.loopback_mode = params->loopback_mode; 1452 1453 /* There are MFWs that share this capability regardless of whether 1454 * this is feasible or not. And given that at the very least adv_caps 1455 * would be set internally by ecore, we want to make sure LFA would 1456 * still work. 1457 */ 1458 if ((p_hwfn->mcp_info->capabilities & 1459 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && 1460 params->eee.enable) { 1461 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1462 if (params->eee.tx_lpi_enable) 1463 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1464 if (params->eee.adv_caps & ECORE_EEE_1G_ADV) 1465 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; 1466 if (params->eee.adv_caps & ECORE_EEE_10G_ADV) 1467 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; 1468 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << 1469 EEE_TX_TIMER_USEC_OFFSET) & 1470 EEE_TX_TIMER_USEC_MASK; 1471 } 1472 1473 p_hwfn->b_drv_link_init = b_up; 1474 1475 if (b_up) 1476 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1477 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n", 1478 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, 1479 phy_cfg.loopback_mode); 1480 else 1481 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); 1482 1483 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1484 mb_params.cmd = cmd; 1485 mb_params.p_data_src = &phy_cfg; 1486 mb_params.data_src_size = sizeof(phy_cfg); 1487 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1488 1489 /* if mcp fails to respond we must abort */ 1490 if (rc != ECORE_SUCCESS) { 1491 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1492 return rc; 1493 } 1494 1495 /* Mimic link-change attention, done for several reasons: 1496 * - On reset, there's no guarantee MFW would trigger 1497 * an attention. 1498 * - On initialization, older MFWs might not indicate link change 1499 * during LFA, so we'll never get an UP indication. 1500 */ 1501 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); 1502 1503 return ECORE_SUCCESS; 1504 } 1505 1506 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, 1507 struct ecore_ptt *p_ptt) 1508 { 1509 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; 1510 1511 /* TODO - Add support for VFs */ 1512 if (IS_VF(p_hwfn->p_dev)) 1513 return ECORE_INVAL; 1514 1515 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1516 PUBLIC_PATH); 1517 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); 1518 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); 1519 1520 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, 1521 path_addr + 1522 OFFSETOF(struct public_path, process_kill)) & 1523 PROCESS_KILL_COUNTER_MASK; 1524 1525 return proc_kill_cnt; 1526 } 1527 1528 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, 1529 struct ecore_ptt *p_ptt) 1530 { 1531 struct ecore_dev *p_dev = p_hwfn->p_dev; 1532 u32 proc_kill_cnt; 1533 1534 /* Prevent possible attentions/interrupts during the recovery handling 1535 * and till its load phase, during which they will be re-enabled. 1536 */ 1537 ecore_int_igu_disable_int(p_hwfn, p_ptt); 1538 1539 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); 1540 1541 /* The following operations should be done once, and thus in CMT mode 1542 * are carried out by only the first HW function. 1543 */ 1544 if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) 1545 return; 1546 1547 if (p_dev->recov_in_prog) { 1548 DP_NOTICE(p_hwfn, false, 1549 "Ignoring the indication since a recovery" 1550 " process is already in progress\n"); 1551 return; 1552 } 1553 1554 p_dev->recov_in_prog = true; 1555 1556 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); 1557 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); 1558 1559 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); 1560 } 1561 1562 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, 1563 struct ecore_ptt *p_ptt, 1564 enum MFW_DRV_MSG_TYPE type) 1565 { 1566 enum ecore_mcp_protocol_type stats_type; 1567 union ecore_mcp_protocol_stats stats; 1568 struct ecore_mcp_mb_params mb_params; 1569 u32 hsi_param; 1570 enum _ecore_status_t rc; 1571 1572 switch (type) { 1573 case MFW_DRV_MSG_GET_LAN_STATS: 1574 stats_type = ECORE_MCP_LAN_STATS; 1575 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; 1576 break; 1577 default: 1578 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1579 "Invalid protocol type %d\n", type); 1580 return; 1581 } 1582 1583 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); 1584 1585 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1586 mb_params.cmd = DRV_MSG_CODE_GET_STATS; 1587 mb_params.param = hsi_param; 1588 mb_params.p_data_src = &stats; 1589 mb_params.data_src_size = sizeof(stats); 1590 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1591 if (rc != ECORE_SUCCESS) 1592 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc); 1593 } 1594 1595 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, 1596 struct public_func *p_shmem_info) 1597 { 1598 struct ecore_mcp_function_info *p_info; 1599 1600 p_info = &p_hwfn->mcp_info->func_info; 1601 1602 /* TODO - bandwidth min/max should have valid values of 1-100, 1603 * as well as some indication that the feature is disabled. 1604 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS 1605 * limit and correct value to min `1' and max `100' if limit isn't in 1606 * range. 1607 */ 1608 p_info->bandwidth_min = (p_shmem_info->config & 1609 FUNC_MF_CFG_MIN_BW_MASK) >> 1610 FUNC_MF_CFG_MIN_BW_OFFSET; 1611 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { 1612 DP_INFO(p_hwfn, 1613 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 1614 p_info->bandwidth_min); 1615 p_info->bandwidth_min = 1; 1616 } 1617 1618 p_info->bandwidth_max = (p_shmem_info->config & 1619 FUNC_MF_CFG_MAX_BW_MASK) >> 1620 FUNC_MF_CFG_MAX_BW_OFFSET; 1621 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { 1622 DP_INFO(p_hwfn, 1623 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 1624 p_info->bandwidth_max); 1625 p_info->bandwidth_max = 100; 1626 } 1627 } 1628 1629 static void 1630 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1631 { 1632 struct ecore_mcp_function_info *p_info; 1633 struct public_func shmem_info; 1634 u32 resp = 0, param = 0; 1635 1636 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1637 1638 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 1639 1640 p_info = &p_hwfn->mcp_info->func_info; 1641 1642 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); 1643 1644 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); 1645 1646 /* Acknowledge the MFW */ 1647 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, 1648 ¶m); 1649 } 1650 1651 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) 1652 { 1653 /* A single notification should be sent to upper driver in CMT mode */ 1654 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1655 return; 1656 1657 DP_NOTICE(p_hwfn, false, 1658 "Fan failure was detected on the network interface card" 1659 " and it's going to be shut down.\n"); 1660 1661 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); 1662 } 1663 1664 struct ecore_mdump_cmd_params { 1665 u32 cmd; 1666 void *p_data_src; 1667 u8 data_src_size; 1668 void *p_data_dst; 1669 u8 data_dst_size; 1670 u32 mcp_resp; 1671 }; 1672 1673 static enum _ecore_status_t 1674 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1675 struct ecore_mdump_cmd_params *p_mdump_cmd_params) 1676 { 1677 struct ecore_mcp_mb_params mb_params; 1678 enum _ecore_status_t rc; 1679 1680 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1681 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; 1682 mb_params.param = p_mdump_cmd_params->cmd; 1683 mb_params.p_data_src = p_mdump_cmd_params->p_data_src; 1684 mb_params.data_src_size = p_mdump_cmd_params->data_src_size; 1685 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; 1686 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; 1687 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1688 if (rc != ECORE_SUCCESS) 1689 return rc; 1690 1691 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; 1692 1693 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { 1694 DP_INFO(p_hwfn, 1695 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", 1696 p_mdump_cmd_params->cmd); 1697 rc = ECORE_NOTIMPL; 1698 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 1699 DP_INFO(p_hwfn, 1700 "The mdump command is not supported by the MFW\n"); 1701 rc = ECORE_NOTIMPL; 1702 } 1703 1704 return rc; 1705 } 1706 1707 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn, 1708 struct ecore_ptt *p_ptt) 1709 { 1710 struct ecore_mdump_cmd_params mdump_cmd_params; 1711 1712 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1713 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; 1714 1715 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1716 } 1717 1718 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, 1719 struct ecore_ptt *p_ptt, 1720 u32 epoch) 1721 { 1722 struct ecore_mdump_cmd_params mdump_cmd_params; 1723 1724 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1725 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES; 1726 mdump_cmd_params.p_data_src = &epoch; 1727 mdump_cmd_params.data_src_size = sizeof(epoch); 1728 1729 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1730 } 1731 1732 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, 1733 struct ecore_ptt *p_ptt) 1734 { 1735 struct ecore_mdump_cmd_params mdump_cmd_params; 1736 1737 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1738 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER; 1739 1740 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1741 } 1742 1743 static enum _ecore_status_t 1744 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1745 struct mdump_config_stc *p_mdump_config) 1746 { 1747 struct ecore_mdump_cmd_params mdump_cmd_params; 1748 enum _ecore_status_t rc; 1749 1750 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1751 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG; 1752 mdump_cmd_params.p_data_dst = p_mdump_config; 1753 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config); 1754 1755 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1756 if (rc != ECORE_SUCCESS) 1757 return rc; 1758 1759 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1760 DP_INFO(p_hwfn, 1761 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n", 1762 mdump_cmd_params.mcp_resp); 1763 rc = ECORE_UNKNOWN_ERROR; 1764 } 1765 1766 return rc; 1767 } 1768 1769 enum _ecore_status_t 1770 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1771 struct ecore_mdump_info *p_mdump_info) 1772 { 1773 u32 addr, global_offsize, global_addr; 1774 struct mdump_config_stc mdump_config; 1775 enum _ecore_status_t rc; 1776 1777 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info)); 1778 1779 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1780 PUBLIC_GLOBAL); 1781 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1782 global_addr = SECTION_ADDR(global_offsize, 0); 1783 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt, 1784 global_addr + 1785 OFFSETOF(struct public_global, 1786 mdump_reason)); 1787 1788 if (p_mdump_info->reason) { 1789 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config); 1790 if (rc != ECORE_SUCCESS) 1791 return rc; 1792 1793 p_mdump_info->version = mdump_config.version; 1794 p_mdump_info->config = mdump_config.config; 1795 p_mdump_info->epoch = mdump_config.epoc; 1796 p_mdump_info->num_of_logs = mdump_config.num_of_logs; 1797 p_mdump_info->valid_logs = mdump_config.valid_logs; 1798 1799 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1800 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n", 1801 p_mdump_info->reason, p_mdump_info->version, 1802 p_mdump_info->config, p_mdump_info->epoch, 1803 p_mdump_info->num_of_logs, p_mdump_info->valid_logs); 1804 } else { 1805 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1806 "MFW mdump info: reason %d\n", p_mdump_info->reason); 1807 } 1808 1809 return ECORE_SUCCESS; 1810 } 1811 1812 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, 1813 struct ecore_ptt *p_ptt) 1814 { 1815 struct ecore_mdump_cmd_params mdump_cmd_params; 1816 1817 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1818 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS; 1819 1820 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1821 } 1822 1823 enum _ecore_status_t 1824 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1825 struct ecore_mdump_retain_data *p_mdump_retain) 1826 { 1827 struct ecore_mdump_cmd_params mdump_cmd_params; 1828 struct mdump_retain_data_stc mfw_mdump_retain; 1829 enum _ecore_status_t rc; 1830 1831 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1832 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; 1833 mdump_cmd_params.p_data_dst = &mfw_mdump_retain; 1834 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain); 1835 1836 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1837 if (rc != ECORE_SUCCESS) 1838 return rc; 1839 1840 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1841 DP_INFO(p_hwfn, 1842 "Failed to get the mdump retained data [mcp_resp 0x%x]\n", 1843 mdump_cmd_params.mcp_resp); 1844 return ECORE_UNKNOWN_ERROR; 1845 } 1846 1847 p_mdump_retain->valid = mfw_mdump_retain.valid; 1848 p_mdump_retain->epoch = mfw_mdump_retain.epoch; 1849 p_mdump_retain->pf = mfw_mdump_retain.pf; 1850 p_mdump_retain->status = mfw_mdump_retain.status; 1851 1852 return ECORE_SUCCESS; 1853 } 1854 1855 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, 1856 struct ecore_ptt *p_ptt) 1857 { 1858 struct ecore_mdump_cmd_params mdump_cmd_params; 1859 1860 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1861 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN; 1862 1863 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1864 } 1865 1866 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, 1867 struct ecore_ptt *p_ptt) 1868 { 1869 struct ecore_mdump_retain_data mdump_retain; 1870 enum _ecore_status_t rc; 1871 1872 /* In CMT mode - no need for more than a single acknowledgment to the 1873 * MFW, and no more than a single notification to the upper driver. 1874 */ 1875 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1876 return; 1877 1878 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); 1879 if (rc == ECORE_SUCCESS && mdump_retain.valid) { 1880 DP_NOTICE(p_hwfn, false, 1881 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", 1882 mdump_retain.epoch, mdump_retain.pf, 1883 mdump_retain.status); 1884 } else { 1885 DP_NOTICE(p_hwfn, false, 1886 "The MFW notified that a critical error occurred in the device\n"); 1887 } 1888 1889 if (p_hwfn->p_dev->allow_mdump) { 1890 DP_NOTICE(p_hwfn, false, 1891 "Not acknowledging the notification to allow the MFW crash dump\n"); 1892 return; 1893 } 1894 1895 DP_NOTICE(p_hwfn, false, 1896 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); 1897 ecore_mcp_mdump_ack(p_hwfn, p_ptt); 1898 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 1899 } 1900 1901 void 1902 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1903 { 1904 struct public_func shmem_info; 1905 u32 port_cfg, val; 1906 1907 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) 1908 return; 1909 1910 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); 1911 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1912 OFFSETOF(struct public_port, oem_cfg_port)); 1913 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); 1914 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) 1915 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", 1916 val); 1917 1918 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); 1919 if (val == OEM_CFG_SCHED_TYPE_ETS) 1920 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; 1921 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) 1922 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; 1923 else 1924 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", 1925 val); 1926 1927 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1928 MCP_PF_ID(p_hwfn)); 1929 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); 1930 p_hwfn->ufp_info.tc = (u8)val; 1931 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, 1932 OEM_CFG_FUNC_HOST_PRI_CTRL); 1933 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) 1934 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; 1935 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) 1936 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; 1937 else 1938 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", 1939 val); 1940 1941 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 1942 "UFP shmem config: mode = %d tc = %d pri_type = %d\n", 1943 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, 1944 p_hwfn->ufp_info.pri_type); 1945 } 1946 1947 static enum _ecore_status_t 1948 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1949 { 1950 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 1951 1952 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { 1953 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; 1954 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; 1955 1956 ecore_qm_reconf(p_hwfn, p_ptt); 1957 } else { 1958 /* Merge UFP TC with the dcbx TC data */ 1959 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 1960 ECORE_DCBX_OPERATIONAL_MIB); 1961 } 1962 1963 /* update storm FW with negotiation results */ 1964 ecore_sp_pf_update_ufp(p_hwfn); 1965 1966 return ECORE_SUCCESS; 1967 } 1968 1969 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, 1970 struct ecore_ptt *p_ptt) 1971 { 1972 struct ecore_mcp_info *info = p_hwfn->mcp_info; 1973 enum _ecore_status_t rc = ECORE_SUCCESS; 1974 bool found = false; 1975 u16 i; 1976 1977 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); 1978 1979 /* Read Messages from MFW */ 1980 ecore_mcp_read_mb(p_hwfn, p_ptt); 1981 1982 /* Compare current messages to old ones */ 1983 for (i = 0; i < info->mfw_mb_length; i++) { 1984 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 1985 continue; 1986 1987 found = true; 1988 1989 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1990 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 1991 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 1992 1993 switch (i) { 1994 case MFW_DRV_MSG_LINK_CHANGE: 1995 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); 1996 break; 1997 case MFW_DRV_MSG_VF_DISABLED: 1998 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); 1999 break; 2000 case MFW_DRV_MSG_LLDP_DATA_UPDATED: 2001 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2002 ECORE_DCBX_REMOTE_LLDP_MIB); 2003 break; 2004 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: 2005 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2006 ECORE_DCBX_REMOTE_MIB); 2007 break; 2008 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 2009 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2010 ECORE_DCBX_OPERATIONAL_MIB); 2011 /* clear the user-config cache */ 2012 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, 2013 sizeof(struct ecore_dcbx_set)); 2014 break; 2015 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: 2016 ecore_lldp_mib_update_event(p_hwfn, p_ptt); 2017 break; 2018 case MFW_DRV_MSG_OEM_CFG_UPDATE: 2019 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); 2020 break; 2021 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 2022 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); 2023 break; 2024 case MFW_DRV_MSG_ERROR_RECOVERY: 2025 ecore_mcp_handle_process_kill(p_hwfn, p_ptt); 2026 break; 2027 case MFW_DRV_MSG_GET_LAN_STATS: 2028 case MFW_DRV_MSG_GET_FCOE_STATS: 2029 case MFW_DRV_MSG_GET_ISCSI_STATS: 2030 case MFW_DRV_MSG_GET_RDMA_STATS: 2031 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); 2032 break; 2033 case MFW_DRV_MSG_BW_UPDATE: 2034 ecore_mcp_update_bw(p_hwfn, p_ptt); 2035 break; 2036 case MFW_DRV_MSG_FAILURE_DETECTED: 2037 ecore_mcp_handle_fan_failure(p_hwfn); 2038 break; 2039 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: 2040 ecore_mcp_handle_critical_error(p_hwfn, p_ptt); 2041 break; 2042 default: 2043 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); 2044 rc = ECORE_INVAL; 2045 } 2046 } 2047 2048 /* ACK everything */ 2049 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 2050 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); 2051 2052 /* MFW expect answer in BE, so we force write in that format */ 2053 ecore_wr(p_hwfn, p_ptt, 2054 info->mfw_mb_addr + sizeof(u32) + 2055 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 2056 sizeof(u32) + i * sizeof(u32), val); 2057 } 2058 2059 if (!found) { 2060 DP_NOTICE(p_hwfn, false, 2061 "Received an MFW message indication but no" 2062 " new message!\n"); 2063 rc = ECORE_INVAL; 2064 } 2065 2066 /* Copy the new mfw messages into the shadow */ 2067 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 2068 2069 return rc; 2070 } 2071 2072 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, 2073 struct ecore_ptt *p_ptt, 2074 u32 *p_mfw_ver, 2075 u32 *p_running_bundle_id) 2076 { 2077 u32 global_offsize; 2078 2079 #ifndef ASIC_ONLY 2080 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2081 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n"); 2082 return ECORE_SUCCESS; 2083 } 2084 #endif 2085 2086 if (IS_VF(p_hwfn->p_dev)) { 2087 if (p_hwfn->vf_iov_info) { 2088 struct pfvf_acquire_resp_tlv *p_resp; 2089 2090 p_resp = &p_hwfn->vf_iov_info->acquire_resp; 2091 *p_mfw_ver = p_resp->pfdev_info.mfw_ver; 2092 return ECORE_SUCCESS; 2093 } else { 2094 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2095 "VF requested MFW version prior to ACQUIRE\n"); 2096 return ECORE_INVAL; 2097 } 2098 } 2099 2100 global_offsize = ecore_rd(p_hwfn, p_ptt, 2101 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> 2102 public_base, 2103 PUBLIC_GLOBAL)); 2104 *p_mfw_ver = 2105 ecore_rd(p_hwfn, p_ptt, 2106 SECTION_ADDR(global_offsize, 2107 0) + OFFSETOF(struct public_global, mfw_ver)); 2108 2109 if (p_running_bundle_id != OSAL_NULL) { 2110 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, 2111 SECTION_ADDR(global_offsize, 2112 0) + 2113 OFFSETOF(struct public_global, 2114 running_bundle_id)); 2115 } 2116 2117 return ECORE_SUCCESS; 2118 } 2119 2120 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, 2121 struct ecore_ptt *p_ptt, 2122 u32 *p_media_type) 2123 { 2124 2125 /* TODO - Add support for VFs */ 2126 if (IS_VF(p_hwfn->p_dev)) 2127 return ECORE_INVAL; 2128 2129 if (!ecore_mcp_is_init(p_hwfn)) { 2130 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); 2131 return ECORE_BUSY; 2132 } 2133 2134 if (!p_ptt) { 2135 *p_media_type = MEDIA_UNSPECIFIED; 2136 return ECORE_INVAL; 2137 } else { 2138 *p_media_type = ecore_rd(p_hwfn, p_ptt, 2139 p_hwfn->mcp_info->port_addr + 2140 OFFSETOF(struct public_port, 2141 media_type)); 2142 } 2143 2144 return ECORE_SUCCESS; 2145 } 2146 2147 /* @DPDK */ 2148 /* Old MFW has a global configuration for all PFs regarding RDMA support */ 2149 static void 2150 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn, 2151 enum ecore_pci_personality *p_proto) 2152 { 2153 *p_proto = ECORE_PCI_ETH; 2154 2155 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2156 "According to Legacy capabilities, L2 personality is %08x\n", 2157 (u32)*p_proto); 2158 } 2159 2160 /* @DPDK */ 2161 static enum _ecore_status_t 2162 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn, 2163 struct ecore_ptt *p_ptt, 2164 enum ecore_pci_personality *p_proto) 2165 { 2166 u32 resp = 0, param = 0; 2167 enum _ecore_status_t rc; 2168 2169 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2170 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", 2171 (u32)*p_proto, resp, param); 2172 return ECORE_SUCCESS; 2173 } 2174 2175 static enum _ecore_status_t 2176 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, 2177 struct public_func *p_info, 2178 struct ecore_ptt *p_ptt, 2179 enum ecore_pci_personality *p_proto) 2180 { 2181 enum _ecore_status_t rc = ECORE_SUCCESS; 2182 2183 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 2184 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 2185 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) != 2186 ECORE_SUCCESS) 2187 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); 2188 break; 2189 default: 2190 rc = ECORE_INVAL; 2191 } 2192 2193 return rc; 2194 } 2195 2196 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, 2197 struct ecore_ptt *p_ptt) 2198 { 2199 struct ecore_mcp_function_info *info; 2200 struct public_func shmem_info; 2201 2202 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 2203 info = &p_hwfn->mcp_info->func_info; 2204 2205 info->pause_on_host = (shmem_info.config & 2206 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 2207 2208 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2209 &info->protocol)) { 2210 DP_ERR(p_hwfn, "Unknown personality %08x\n", 2211 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 2212 return ECORE_INVAL; 2213 } 2214 2215 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 2216 2217 if (shmem_info.mac_upper || shmem_info.mac_lower) { 2218 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 2219 info->mac[1] = (u8)(shmem_info.mac_upper); 2220 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 2221 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 2222 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 2223 info->mac[5] = (u8)(shmem_info.mac_lower); 2224 } else { 2225 /* TODO - are there protocols for which there's no MAC? */ 2226 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); 2227 } 2228 2229 /* TODO - are these calculations true for BE machine? */ 2230 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | 2231 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); 2232 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | 2233 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); 2234 2235 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 2236 2237 info->mtu = (u16)shmem_info.mtu_size; 2238 2239 if (info->mtu == 0) 2240 info->mtu = 1500; 2241 2242 info->mtu = (u16)shmem_info.mtu_size; 2243 2244 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), 2245 "Read configuration from shmem: pause_on_host %02x" 2246 " protocol %02x BW [%02x - %02x]" 2247 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx" 2248 " node %lx ovlan %04x\n", 2249 info->pause_on_host, info->protocol, 2250 info->bandwidth_min, info->bandwidth_max, 2251 info->mac[0], info->mac[1], info->mac[2], 2252 info->mac[3], info->mac[4], info->mac[5], 2253 (unsigned long)info->wwn_port, 2254 (unsigned long)info->wwn_node, info->ovlan); 2255 2256 return ECORE_SUCCESS; 2257 } 2258 2259 struct ecore_mcp_link_params 2260 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) 2261 { 2262 if (!p_hwfn || !p_hwfn->mcp_info) 2263 return OSAL_NULL; 2264 return &p_hwfn->mcp_info->link_input; 2265 } 2266 2267 struct ecore_mcp_link_state 2268 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) 2269 { 2270 if (!p_hwfn || !p_hwfn->mcp_info) 2271 return OSAL_NULL; 2272 2273 #ifndef ASIC_ONLY 2274 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 2275 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); 2276 p_hwfn->mcp_info->link_output.link_up = true; 2277 } 2278 #endif 2279 2280 return &p_hwfn->mcp_info->link_output; 2281 } 2282 2283 struct ecore_mcp_link_capabilities 2284 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) 2285 { 2286 if (!p_hwfn || !p_hwfn->mcp_info) 2287 return OSAL_NULL; 2288 return &p_hwfn->mcp_info->link_capabilities; 2289 } 2290 2291 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, 2292 struct ecore_ptt *p_ptt) 2293 { 2294 u32 resp = 0, param = 0; 2295 enum _ecore_status_t rc; 2296 2297 rc = ecore_mcp_cmd(p_hwfn, p_ptt, 2298 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); 2299 2300 /* Wait for the drain to complete before returning */ 2301 OSAL_MSLEEP(1020); 2302 2303 return rc; 2304 } 2305 2306 const struct ecore_mcp_function_info 2307 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) 2308 { 2309 if (!p_hwfn || !p_hwfn->mcp_info) 2310 return OSAL_NULL; 2311 return &p_hwfn->mcp_info->func_info; 2312 } 2313 2314 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, 2315 struct ecore_ptt *p_ptt, u32 personalities) 2316 { 2317 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; 2318 struct public_func shmem_info; 2319 int i, count = 0, num_pfs; 2320 2321 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); 2322 2323 for (i = 0; i < num_pfs; i++) { 2324 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 2325 MCP_PF_ID_BY_REL(p_hwfn, i)); 2326 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) 2327 continue; 2328 2329 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2330 &protocol) != 2331 ECORE_SUCCESS) 2332 continue; 2333 2334 if ((1 << ((u32)protocol)) & personalities) 2335 count++; 2336 } 2337 2338 return count; 2339 } 2340 2341 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, 2342 struct ecore_ptt *p_ptt, 2343 u32 *p_flash_size) 2344 { 2345 u32 flash_size; 2346 2347 #ifndef ASIC_ONLY 2348 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2349 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n"); 2350 return ECORE_INVAL; 2351 } 2352 #endif 2353 2354 if (IS_VF(p_hwfn->p_dev)) 2355 return ECORE_INVAL; 2356 2357 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 2358 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 2359 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 2360 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); 2361 2362 *p_flash_size = flash_size; 2363 2364 return ECORE_SUCCESS; 2365 } 2366 2367 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, 2368 struct ecore_ptt *p_ptt) 2369 { 2370 struct ecore_dev *p_dev = p_hwfn->p_dev; 2371 2372 if (p_dev->recov_in_prog) { 2373 DP_NOTICE(p_hwfn, false, 2374 "Avoid triggering a recovery since such a process" 2375 " is already in progress\n"); 2376 return ECORE_AGAIN; 2377 } 2378 2379 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); 2380 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); 2381 2382 return ECORE_SUCCESS; 2383 } 2384 2385 static enum _ecore_status_t 2386 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, 2387 struct ecore_ptt *p_ptt, 2388 u8 vf_id, u8 num) 2389 { 2390 u32 resp = 0, param = 0, rc_param = 0; 2391 enum _ecore_status_t rc; 2392 2393 /* Only Leader can configure MSIX, and need to take CMT into account */ 2394 2395 if (!IS_LEAD_HWFN(p_hwfn)) 2396 return ECORE_SUCCESS; 2397 num *= p_hwfn->p_dev->num_hwfns; 2398 2399 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & 2400 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; 2401 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & 2402 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; 2403 2404 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, 2405 &resp, &rc_param); 2406 2407 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { 2408 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", 2409 vf_id); 2410 rc = ECORE_INVAL; 2411 } else { 2412 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2413 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", 2414 num, vf_id); 2415 } 2416 2417 return rc; 2418 } 2419 2420 static enum _ecore_status_t 2421 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, 2422 struct ecore_ptt *p_ptt, 2423 u8 num) 2424 { 2425 u32 resp = 0, param = num, rc_param = 0; 2426 enum _ecore_status_t rc; 2427 2428 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, 2429 param, &resp, &rc_param); 2430 2431 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { 2432 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); 2433 rc = ECORE_INVAL; 2434 } else { 2435 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2436 "Requested 0x%02x MSI-x interrupts for VFs\n", 2437 num); 2438 } 2439 2440 return rc; 2441 } 2442 2443 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, 2444 struct ecore_ptt *p_ptt, 2445 u8 vf_id, u8 num) 2446 { 2447 if (ECORE_IS_BB(p_hwfn->p_dev)) 2448 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); 2449 else 2450 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); 2451 } 2452 2453 enum _ecore_status_t 2454 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2455 struct ecore_mcp_drv_version *p_ver) 2456 { 2457 struct ecore_mcp_mb_params mb_params; 2458 struct drv_version_stc drv_version; 2459 u32 num_words, i; 2460 void *p_name; 2461 OSAL_BE32 val; 2462 enum _ecore_status_t rc; 2463 2464 #ifndef ASIC_ONLY 2465 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 2466 return ECORE_SUCCESS; 2467 #endif 2468 2469 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version)); 2470 drv_version.version = p_ver->version; 2471 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; 2472 for (i = 0; i < num_words; i++) { 2473 /* The driver name is expected to be in a big-endian format */ 2474 p_name = &p_ver->name[i * sizeof(u32)]; 2475 val = OSAL_CPU_TO_BE32(*(u32 *)p_name); 2476 *(u32 *)&drv_version.name[i * sizeof(u32)] = val; 2477 } 2478 2479 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 2480 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 2481 mb_params.p_data_src = &drv_version; 2482 mb_params.data_src_size = sizeof(drv_version); 2483 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2484 if (rc != ECORE_SUCCESS) 2485 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2486 2487 return rc; 2488 } 2489 2490 /* A maximal 100 msec waiting time for the MCP to halt */ 2491 #define ECORE_MCP_HALT_SLEEP_MS 10 2492 #define ECORE_MCP_HALT_MAX_RETRIES 10 2493 2494 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, 2495 struct ecore_ptt *p_ptt) 2496 { 2497 u32 resp = 0, param = 0, cpu_state, cnt = 0; 2498 enum _ecore_status_t rc; 2499 2500 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2501 ¶m); 2502 if (rc != ECORE_SUCCESS) { 2503 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2504 return rc; 2505 } 2506 2507 do { 2508 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); 2509 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2510 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) 2511 break; 2512 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); 2513 2514 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { 2515 DP_NOTICE(p_hwfn, false, 2516 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2517 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); 2518 return ECORE_BUSY; 2519 } 2520 2521 ecore_mcp_cmd_set_blocking(p_hwfn, true); 2522 2523 return ECORE_SUCCESS; 2524 } 2525 2526 #define ECORE_MCP_RESUME_SLEEP_MS 10 2527 2528 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, 2529 struct ecore_ptt *p_ptt) 2530 { 2531 u32 cpu_mode, cpu_state; 2532 2533 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2534 2535 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2536 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; 2537 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); 2538 2539 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); 2540 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2541 2542 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { 2543 DP_NOTICE(p_hwfn, false, 2544 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2545 cpu_mode, cpu_state); 2546 return ECORE_BUSY; 2547 } 2548 2549 ecore_mcp_cmd_set_blocking(p_hwfn, false); 2550 2551 return ECORE_SUCCESS; 2552 } 2553 2554 enum _ecore_status_t 2555 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, 2556 struct ecore_ptt *p_ptt, 2557 enum ecore_ov_client client) 2558 { 2559 u32 resp = 0, param = 0; 2560 u32 drv_mb_param; 2561 enum _ecore_status_t rc; 2562 2563 switch (client) { 2564 case ECORE_OV_CLIENT_DRV: 2565 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; 2566 break; 2567 case ECORE_OV_CLIENT_USER: 2568 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; 2569 break; 2570 case ECORE_OV_CLIENT_VENDOR_SPEC: 2571 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; 2572 break; 2573 default: 2574 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client); 2575 return ECORE_INVAL; 2576 } 2577 2578 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, 2579 drv_mb_param, &resp, ¶m); 2580 if (rc != ECORE_SUCCESS) 2581 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2582 2583 return rc; 2584 } 2585 2586 enum _ecore_status_t 2587 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, 2588 struct ecore_ptt *p_ptt, 2589 enum ecore_ov_driver_state drv_state) 2590 { 2591 u32 resp = 0, param = 0; 2592 u32 drv_mb_param; 2593 enum _ecore_status_t rc; 2594 2595 switch (drv_state) { 2596 case ECORE_OV_DRIVER_STATE_NOT_LOADED: 2597 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; 2598 break; 2599 case ECORE_OV_DRIVER_STATE_DISABLED: 2600 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; 2601 break; 2602 case ECORE_OV_DRIVER_STATE_ACTIVE: 2603 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; 2604 break; 2605 default: 2606 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); 2607 return ECORE_INVAL; 2608 } 2609 2610 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, 2611 drv_mb_param, &resp, ¶m); 2612 if (rc != ECORE_SUCCESS) 2613 DP_ERR(p_hwfn, "Failed to send driver state\n"); 2614 2615 return rc; 2616 } 2617 2618 enum _ecore_status_t 2619 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2620 struct ecore_fc_npiv_tbl *p_table) 2621 { 2622 return 0; 2623 } 2624 2625 enum _ecore_status_t 2626 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, 2627 struct ecore_ptt *p_ptt, u16 mtu) 2628 { 2629 return 0; 2630 } 2631 2632 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, 2633 struct ecore_ptt *p_ptt, 2634 enum ecore_led_mode mode) 2635 { 2636 u32 resp = 0, param = 0, drv_mb_param; 2637 enum _ecore_status_t rc; 2638 2639 switch (mode) { 2640 case ECORE_LED_MODE_ON: 2641 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 2642 break; 2643 case ECORE_LED_MODE_OFF: 2644 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 2645 break; 2646 case ECORE_LED_MODE_RESTORE: 2647 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 2648 break; 2649 default: 2650 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); 2651 return ECORE_INVAL; 2652 } 2653 2654 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 2655 drv_mb_param, &resp, ¶m); 2656 if (rc != ECORE_SUCCESS) 2657 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2658 2659 return rc; 2660 } 2661 2662 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, 2663 struct ecore_ptt *p_ptt, 2664 u32 mask_parities) 2665 { 2666 u32 resp = 0, param = 0; 2667 enum _ecore_status_t rc; 2668 2669 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, 2670 mask_parities, &resp, ¶m); 2671 2672 if (rc != ECORE_SUCCESS) { 2673 DP_ERR(p_hwfn, 2674 "MCP response failure for mask parities, aborting\n"); 2675 } else if (resp != FW_MSG_CODE_OK) { 2676 DP_ERR(p_hwfn, 2677 "MCP did not ack mask parity request. Old MFW?\n"); 2678 rc = ECORE_INVAL; 2679 } 2680 2681 return rc; 2682 } 2683 2684 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, 2685 u8 *p_buf, u32 len) 2686 { 2687 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2688 u32 bytes_left, offset, bytes_to_copy, buf_size; 2689 u32 nvm_offset, resp, param; 2690 struct ecore_ptt *p_ptt; 2691 enum _ecore_status_t rc = ECORE_SUCCESS; 2692 2693 p_ptt = ecore_ptt_acquire(p_hwfn); 2694 if (!p_ptt) 2695 return ECORE_BUSY; 2696 2697 bytes_left = len; 2698 offset = 0; 2699 while (bytes_left > 0) { 2700 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 2701 MCP_DRV_NVM_BUF_LEN); 2702 nvm_offset = (addr + offset) | (bytes_to_copy << 2703 DRV_MB_PARAM_NVM_LEN_OFFSET); 2704 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2705 DRV_MSG_CODE_NVM_READ_NVRAM, 2706 nvm_offset, &resp, ¶m, &buf_size, 2707 (u32 *)(p_buf + offset)); 2708 if (rc != ECORE_SUCCESS) { 2709 DP_NOTICE(p_dev, false, 2710 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", 2711 rc); 2712 resp = FW_MSG_CODE_ERROR; 2713 break; 2714 } 2715 2716 if (resp != FW_MSG_CODE_NVM_OK) { 2717 DP_NOTICE(p_dev, false, 2718 "nvm read failed, resp = 0x%08x\n", resp); 2719 rc = ECORE_UNKNOWN_ERROR; 2720 break; 2721 } 2722 2723 /* This can be a lengthy process, and it's possible scheduler 2724 * isn't preemptible. Sleep a bit to prevent CPU hogging. 2725 */ 2726 if (bytes_left % 0x1000 < 2727 (bytes_left - buf_size) % 0x1000) 2728 OSAL_MSLEEP(1); 2729 2730 offset += buf_size; 2731 bytes_left -= buf_size; 2732 } 2733 2734 p_dev->mcp_nvm_resp = resp; 2735 ecore_ptt_release(p_hwfn, p_ptt); 2736 2737 return rc; 2738 } 2739 2740 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, 2741 u32 addr, u8 *p_buf, u32 len) 2742 { 2743 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2744 struct ecore_ptt *p_ptt; 2745 u32 resp, param; 2746 enum _ecore_status_t rc; 2747 2748 p_ptt = ecore_ptt_acquire(p_hwfn); 2749 if (!p_ptt) 2750 return ECORE_BUSY; 2751 2752 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2753 (cmd == ECORE_PHY_CORE_READ) ? 2754 DRV_MSG_CODE_PHY_CORE_READ : 2755 DRV_MSG_CODE_PHY_RAW_READ, 2756 addr, &resp, ¶m, &len, (u32 *)p_buf); 2757 if (rc != ECORE_SUCCESS) 2758 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 2759 2760 p_dev->mcp_nvm_resp = resp; 2761 ecore_ptt_release(p_hwfn, p_ptt); 2762 2763 return rc; 2764 } 2765 2766 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) 2767 { 2768 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2769 struct ecore_ptt *p_ptt; 2770 2771 p_ptt = ecore_ptt_acquire(p_hwfn); 2772 if (!p_ptt) 2773 return ECORE_BUSY; 2774 2775 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); 2776 ecore_ptt_release(p_hwfn, p_ptt); 2777 2778 return ECORE_SUCCESS; 2779 } 2780 2781 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) 2782 { 2783 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2784 struct ecore_ptt *p_ptt; 2785 u32 resp, param; 2786 enum _ecore_status_t rc; 2787 2788 p_ptt = ecore_ptt_acquire(p_hwfn); 2789 if (!p_ptt) 2790 return ECORE_BUSY; 2791 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, 2792 &resp, ¶m); 2793 p_dev->mcp_nvm_resp = resp; 2794 ecore_ptt_release(p_hwfn, p_ptt); 2795 2796 return rc; 2797 } 2798 2799 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, 2800 u32 addr) 2801 { 2802 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2803 struct ecore_ptt *p_ptt; 2804 u32 resp, param; 2805 enum _ecore_status_t rc; 2806 2807 p_ptt = ecore_ptt_acquire(p_hwfn); 2808 if (!p_ptt) 2809 return ECORE_BUSY; 2810 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, 2811 &resp, ¶m); 2812 p_dev->mcp_nvm_resp = resp; 2813 ecore_ptt_release(p_hwfn, p_ptt); 2814 2815 return rc; 2816 } 2817 2818 /* rc receives ECORE_INVAL as default parameter because 2819 * it might not enter the while loop if the len is 0 2820 */ 2821 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, 2822 u32 addr, u8 *p_buf, u32 len) 2823 { 2824 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param; 2825 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2826 enum _ecore_status_t rc = ECORE_INVAL; 2827 struct ecore_ptt *p_ptt; 2828 2829 p_ptt = ecore_ptt_acquire(p_hwfn); 2830 if (!p_ptt) 2831 return ECORE_BUSY; 2832 2833 switch (cmd) { 2834 case ECORE_PUT_FILE_DATA: 2835 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; 2836 break; 2837 case ECORE_NVM_WRITE_NVRAM: 2838 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; 2839 break; 2840 case ECORE_EXT_PHY_FW_UPGRADE: 2841 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; 2842 break; 2843 default: 2844 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", 2845 cmd); 2846 rc = ECORE_INVAL; 2847 goto out; 2848 } 2849 2850 buf_idx = 0; 2851 while (buf_idx < len) { 2852 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 2853 MCP_DRV_NVM_BUF_LEN); 2854 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | 2855 addr) + 2856 buf_idx; 2857 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, 2858 &resp, ¶m, buf_size, 2859 (u32 *)&p_buf[buf_idx]); 2860 if (rc != ECORE_SUCCESS) { 2861 DP_NOTICE(p_dev, false, 2862 "ecore_mcp_nvm_write() failed, rc = %d\n", 2863 rc); 2864 resp = FW_MSG_CODE_ERROR; 2865 break; 2866 } 2867 2868 if (resp != FW_MSG_CODE_OK && 2869 resp != FW_MSG_CODE_NVM_OK && 2870 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { 2871 DP_NOTICE(p_dev, false, 2872 "nvm write failed, resp = 0x%08x\n", resp); 2873 rc = ECORE_UNKNOWN_ERROR; 2874 break; 2875 } 2876 2877 /* This can be a lengthy process, and it's possible scheduler 2878 * isn't preemptible. Sleep a bit to prevent CPU hogging. 2879 */ 2880 if (buf_idx % 0x1000 > 2881 (buf_idx + buf_size) % 0x1000) 2882 OSAL_MSLEEP(1); 2883 2884 buf_idx += buf_size; 2885 } 2886 2887 p_dev->mcp_nvm_resp = resp; 2888 out: 2889 ecore_ptt_release(p_hwfn, p_ptt); 2890 2891 return rc; 2892 } 2893 2894 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, 2895 u32 addr, u8 *p_buf, u32 len) 2896 { 2897 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2898 struct ecore_ptt *p_ptt; 2899 u32 resp, param, nvm_cmd; 2900 enum _ecore_status_t rc; 2901 2902 p_ptt = ecore_ptt_acquire(p_hwfn); 2903 if (!p_ptt) 2904 return ECORE_BUSY; 2905 2906 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : 2907 DRV_MSG_CODE_PHY_RAW_WRITE; 2908 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, 2909 &resp, ¶m, len, (u32 *)p_buf); 2910 if (rc != ECORE_SUCCESS) 2911 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 2912 p_dev->mcp_nvm_resp = resp; 2913 ecore_ptt_release(p_hwfn, p_ptt); 2914 2915 return rc; 2916 } 2917 2918 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, 2919 u32 addr) 2920 { 2921 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2922 struct ecore_ptt *p_ptt; 2923 u32 resp, param; 2924 enum _ecore_status_t rc; 2925 2926 p_ptt = ecore_ptt_acquire(p_hwfn); 2927 if (!p_ptt) 2928 return ECORE_BUSY; 2929 2930 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, 2931 &resp, ¶m); 2932 p_dev->mcp_nvm_resp = resp; 2933 ecore_ptt_release(p_hwfn, p_ptt); 2934 2935 return rc; 2936 } 2937 2938 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, 2939 struct ecore_ptt *p_ptt, 2940 u32 port, u32 addr, u32 offset, 2941 u32 len, u8 *p_buf) 2942 { 2943 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; 2944 u32 resp, param; 2945 enum _ecore_status_t rc; 2946 2947 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 2948 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 2949 addr = offset; 2950 offset = 0; 2951 bytes_left = len; 2952 while (bytes_left > 0) { 2953 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 2954 MAX_I2C_TRANSACTION_SIZE); 2955 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 2956 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 2957 nvm_offset |= ((addr + offset) << 2958 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 2959 nvm_offset |= (bytes_to_copy << 2960 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 2961 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2962 DRV_MSG_CODE_TRANSCEIVER_READ, 2963 nvm_offset, &resp, ¶m, &buf_size, 2964 (u32 *)(p_buf + offset)); 2965 if (rc != ECORE_SUCCESS) { 2966 DP_NOTICE(p_hwfn, false, 2967 "Failed to send a transceiver read command to the MFW. rc = %d.\n", 2968 rc); 2969 return rc; 2970 } 2971 2972 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 2973 return ECORE_NODEV; 2974 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 2975 return ECORE_UNKNOWN_ERROR; 2976 2977 offset += buf_size; 2978 bytes_left -= buf_size; 2979 } 2980 2981 return ECORE_SUCCESS; 2982 } 2983 2984 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, 2985 struct ecore_ptt *p_ptt, 2986 u32 port, u32 addr, u32 offset, 2987 u32 len, u8 *p_buf) 2988 { 2989 u32 buf_idx, buf_size, nvm_offset, resp, param; 2990 enum _ecore_status_t rc; 2991 2992 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 2993 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 2994 buf_idx = 0; 2995 while (buf_idx < len) { 2996 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 2997 MAX_I2C_TRANSACTION_SIZE); 2998 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 2999 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 3000 nvm_offset |= ((offset + buf_idx) << 3001 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 3002 nvm_offset |= (buf_size << 3003 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 3004 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, 3005 DRV_MSG_CODE_TRANSCEIVER_WRITE, 3006 nvm_offset, &resp, ¶m, buf_size, 3007 (u32 *)&p_buf[buf_idx]); 3008 if (rc != ECORE_SUCCESS) { 3009 DP_NOTICE(p_hwfn, false, 3010 "Failed to send a transceiver write command to the MFW. rc = %d.\n", 3011 rc); 3012 return rc; 3013 } 3014 3015 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3016 return ECORE_NODEV; 3017 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3018 return ECORE_UNKNOWN_ERROR; 3019 3020 buf_idx += buf_size; 3021 } 3022 3023 return ECORE_SUCCESS; 3024 } 3025 3026 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, 3027 struct ecore_ptt *p_ptt, 3028 u16 gpio, u32 *gpio_val) 3029 { 3030 enum _ecore_status_t rc = ECORE_SUCCESS; 3031 u32 drv_mb_param = 0, rsp; 3032 3033 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); 3034 3035 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, 3036 drv_mb_param, &rsp, gpio_val); 3037 3038 if (rc != ECORE_SUCCESS) 3039 return rc; 3040 3041 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3042 return ECORE_UNKNOWN_ERROR; 3043 3044 return ECORE_SUCCESS; 3045 } 3046 3047 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, 3048 struct ecore_ptt *p_ptt, 3049 u16 gpio, u16 gpio_val) 3050 { 3051 enum _ecore_status_t rc = ECORE_SUCCESS; 3052 u32 drv_mb_param = 0, param, rsp; 3053 3054 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | 3055 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); 3056 3057 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, 3058 drv_mb_param, &rsp, ¶m); 3059 3060 if (rc != ECORE_SUCCESS) 3061 return rc; 3062 3063 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3064 return ECORE_UNKNOWN_ERROR; 3065 3066 return ECORE_SUCCESS; 3067 } 3068 3069 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, 3070 struct ecore_ptt *p_ptt, 3071 u16 gpio, u32 *gpio_direction, 3072 u32 *gpio_ctrl) 3073 { 3074 u32 drv_mb_param = 0, rsp, val = 0; 3075 enum _ecore_status_t rc = ECORE_SUCCESS; 3076 3077 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; 3078 3079 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, 3080 drv_mb_param, &rsp, &val); 3081 if (rc != ECORE_SUCCESS) 3082 return rc; 3083 3084 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> 3085 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; 3086 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> 3087 DRV_MB_PARAM_GPIO_CTRL_OFFSET; 3088 3089 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3090 return ECORE_UNKNOWN_ERROR; 3091 3092 return ECORE_SUCCESS; 3093 } 3094 3095 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, 3096 struct ecore_ptt *p_ptt) 3097 { 3098 u32 drv_mb_param = 0, rsp, param; 3099 enum _ecore_status_t rc = ECORE_SUCCESS; 3100 3101 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << 3102 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3103 3104 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3105 drv_mb_param, &rsp, ¶m); 3106 3107 if (rc != ECORE_SUCCESS) 3108 return rc; 3109 3110 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3111 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3112 rc = ECORE_UNKNOWN_ERROR; 3113 3114 return rc; 3115 } 3116 3117 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, 3118 struct ecore_ptt *p_ptt) 3119 { 3120 u32 drv_mb_param, rsp, param; 3121 enum _ecore_status_t rc = ECORE_SUCCESS; 3122 3123 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << 3124 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3125 3126 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3127 drv_mb_param, &rsp, ¶m); 3128 3129 if (rc != ECORE_SUCCESS) 3130 return rc; 3131 3132 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3133 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3134 rc = ECORE_UNKNOWN_ERROR; 3135 3136 return rc; 3137 } 3138 3139 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( 3140 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images) 3141 { 3142 u32 drv_mb_param = 0, rsp; 3143 enum _ecore_status_t rc = ECORE_SUCCESS; 3144 3145 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << 3146 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3147 3148 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3149 drv_mb_param, &rsp, num_images); 3150 3151 if (rc != ECORE_SUCCESS) 3152 return rc; 3153 3154 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) 3155 rc = ECORE_UNKNOWN_ERROR; 3156 3157 return rc; 3158 } 3159 3160 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( 3161 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3162 struct bist_nvm_image_att *p_image_att, u32 image_index) 3163 { 3164 u32 buf_size, nvm_offset, resp, param; 3165 enum _ecore_status_t rc; 3166 3167 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << 3168 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3169 nvm_offset |= (image_index << 3170 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); 3171 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3172 nvm_offset, &resp, ¶m, &buf_size, 3173 (u32 *)p_image_att); 3174 if (rc != ECORE_SUCCESS) 3175 return rc; 3176 3177 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3178 (p_image_att->return_code != 1)) 3179 rc = ECORE_UNKNOWN_ERROR; 3180 3181 return rc; 3182 } 3183 3184 enum _ecore_status_t 3185 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, 3186 struct ecore_ptt *p_ptt, 3187 struct ecore_temperature_info *p_temp_info) 3188 { 3189 struct ecore_temperature_sensor *p_temp_sensor; 3190 struct temperature_status_stc mfw_temp_info; 3191 struct ecore_mcp_mb_params mb_params; 3192 u32 val; 3193 enum _ecore_status_t rc; 3194 u8 i; 3195 3196 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3197 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE; 3198 mb_params.p_data_dst = &mfw_temp_info; 3199 mb_params.data_dst_size = sizeof(mfw_temp_info); 3200 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3201 if (rc != ECORE_SUCCESS) 3202 return rc; 3203 3204 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS); 3205 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors, 3206 ECORE_MAX_NUM_OF_SENSORS); 3207 for (i = 0; i < p_temp_info->num_sensors; i++) { 3208 val = mfw_temp_info.sensor[i]; 3209 p_temp_sensor = &p_temp_info->sensors[i]; 3210 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> 3211 SENSOR_LOCATION_OFFSET; 3212 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> 3213 THRESHOLD_HIGH_OFFSET; 3214 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> 3215 CRITICAL_TEMPERATURE_OFFSET; 3216 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> 3217 CURRENT_TEMP_OFFSET; 3218 } 3219 3220 return ECORE_SUCCESS; 3221 } 3222 3223 enum _ecore_status_t ecore_mcp_get_mba_versions( 3224 struct ecore_hwfn *p_hwfn, 3225 struct ecore_ptt *p_ptt, 3226 struct ecore_mba_vers *p_mba_vers) 3227 { 3228 u32 buf_size, resp, param; 3229 enum _ecore_status_t rc; 3230 3231 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, 3232 0, &resp, ¶m, &buf_size, 3233 &p_mba_vers->mba_vers[0]); 3234 3235 if (rc != ECORE_SUCCESS) 3236 return rc; 3237 3238 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 3239 rc = ECORE_UNKNOWN_ERROR; 3240 3241 if (buf_size != MCP_DRV_NVM_BUF_LEN) 3242 rc = ECORE_UNKNOWN_ERROR; 3243 3244 return rc; 3245 } 3246 3247 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, 3248 struct ecore_ptt *p_ptt, 3249 u64 *num_events) 3250 { 3251 u32 rsp; 3252 3253 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS, 3254 0, &rsp, (u32 *)num_events); 3255 } 3256 3257 static enum resource_id_enum 3258 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) 3259 { 3260 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; 3261 3262 switch (res_id) { 3263 case ECORE_SB: 3264 mfw_res_id = RESOURCE_NUM_SB_E; 3265 break; 3266 case ECORE_L2_QUEUE: 3267 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; 3268 break; 3269 case ECORE_VPORT: 3270 mfw_res_id = RESOURCE_NUM_VPORT_E; 3271 break; 3272 case ECORE_RSS_ENG: 3273 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; 3274 break; 3275 case ECORE_PQ: 3276 mfw_res_id = RESOURCE_NUM_PQ_E; 3277 break; 3278 case ECORE_RL: 3279 mfw_res_id = RESOURCE_NUM_RL_E; 3280 break; 3281 case ECORE_MAC: 3282 case ECORE_VLAN: 3283 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3284 mfw_res_id = RESOURCE_VFC_FILTER_E; 3285 break; 3286 case ECORE_ILT: 3287 mfw_res_id = RESOURCE_ILT_E; 3288 break; 3289 case ECORE_LL2_QUEUE: 3290 mfw_res_id = RESOURCE_LL2_QUEUE_E; 3291 break; 3292 case ECORE_RDMA_CNQ_RAM: 3293 case ECORE_CMDQS_CQS: 3294 /* CNQ/CMDQS are the same resource */ 3295 mfw_res_id = RESOURCE_CQS_E; 3296 break; 3297 case ECORE_RDMA_STATS_QUEUE: 3298 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; 3299 break; 3300 case ECORE_BDQ: 3301 mfw_res_id = RESOURCE_BDQ_E; 3302 break; 3303 default: 3304 break; 3305 } 3306 3307 return mfw_res_id; 3308 } 3309 3310 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2 3311 #define ECORE_RESC_ALLOC_VERSION_MINOR 0 3312 #define ECORE_RESC_ALLOC_VERSION \ 3313 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ 3314 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ 3315 (ECORE_RESC_ALLOC_VERSION_MINOR << \ 3316 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) 3317 3318 struct ecore_resc_alloc_in_params { 3319 u32 cmd; 3320 enum ecore_resources res_id; 3321 u32 resc_max_val; 3322 }; 3323 3324 struct ecore_resc_alloc_out_params { 3325 u32 mcp_resp; 3326 u32 mcp_param; 3327 u32 resc_num; 3328 u32 resc_start; 3329 u32 vf_resc_num; 3330 u32 vf_resc_start; 3331 u32 flags; 3332 }; 3333 3334 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100 3335 3336 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) 3337 { 3338 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3339 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 3340 enum _ecore_status_t rc; 3341 3342 /* Allow ongoing PCIe transactions to complete */ 3343 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS); 3344 3345 /* Clear the PF's internal FID_enable in the PXP */ 3346 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); 3347 if (rc != ECORE_SUCCESS) 3348 DP_NOTICE(p_hwfn, false, 3349 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 3350 rc); 3351 3352 return rc; 3353 } 3354 3355 static enum _ecore_status_t 3356 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, 3357 struct ecore_ptt *p_ptt, 3358 struct ecore_resc_alloc_in_params *p_in_params, 3359 struct ecore_resc_alloc_out_params *p_out_params) 3360 { 3361 struct ecore_mcp_mb_params mb_params; 3362 struct resource_info mfw_resc_info; 3363 enum _ecore_status_t rc; 3364 3365 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info)); 3366 3367 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id); 3368 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { 3369 DP_ERR(p_hwfn, 3370 "Failed to match resource %d [%s] with the MFW resources\n", 3371 p_in_params->res_id, 3372 ecore_hw_get_resc_name(p_in_params->res_id)); 3373 return ECORE_INVAL; 3374 } 3375 3376 switch (p_in_params->cmd) { 3377 case DRV_MSG_SET_RESOURCE_VALUE_MSG: 3378 mfw_resc_info.size = p_in_params->resc_max_val; 3379 /* Fallthrough */ 3380 case DRV_MSG_GET_RESOURCE_ALLOC_MSG: 3381 break; 3382 default: 3383 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", 3384 p_in_params->cmd); 3385 return ECORE_INVAL; 3386 } 3387 3388 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3389 mb_params.cmd = p_in_params->cmd; 3390 mb_params.param = ECORE_RESC_ALLOC_VERSION; 3391 mb_params.p_data_src = &mfw_resc_info; 3392 mb_params.data_src_size = sizeof(mfw_resc_info); 3393 mb_params.p_data_dst = mb_params.p_data_src; 3394 mb_params.data_dst_size = mb_params.data_src_size; 3395 3396 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3397 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", 3398 p_in_params->cmd, p_in_params->res_id, 3399 ecore_hw_get_resc_name(p_in_params->res_id), 3400 GET_MFW_FIELD(mb_params.param, 3401 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3402 GET_MFW_FIELD(mb_params.param, 3403 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3404 p_in_params->resc_max_val); 3405 3406 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3407 if (rc != ECORE_SUCCESS) 3408 return rc; 3409 3410 p_out_params->mcp_resp = mb_params.mcp_resp; 3411 p_out_params->mcp_param = mb_params.mcp_param; 3412 p_out_params->resc_num = mfw_resc_info.size; 3413 p_out_params->resc_start = mfw_resc_info.offset; 3414 p_out_params->vf_resc_num = mfw_resc_info.vf_size; 3415 p_out_params->vf_resc_start = mfw_resc_info.vf_offset; 3416 p_out_params->flags = mfw_resc_info.flags; 3417 3418 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3419 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", 3420 GET_MFW_FIELD(p_out_params->mcp_param, 3421 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3422 GET_MFW_FIELD(p_out_params->mcp_param, 3423 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3424 p_out_params->resc_num, p_out_params->resc_start, 3425 p_out_params->vf_resc_num, p_out_params->vf_resc_start, 3426 p_out_params->flags); 3427 3428 return ECORE_SUCCESS; 3429 } 3430 3431 enum _ecore_status_t 3432 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3433 enum ecore_resources res_id, u32 resc_max_val, 3434 u32 *p_mcp_resp) 3435 { 3436 struct ecore_resc_alloc_out_params out_params; 3437 struct ecore_resc_alloc_in_params in_params; 3438 enum _ecore_status_t rc; 3439 3440 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3441 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; 3442 in_params.res_id = res_id; 3443 in_params.resc_max_val = resc_max_val; 3444 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3445 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3446 &out_params); 3447 if (rc != ECORE_SUCCESS) 3448 return rc; 3449 3450 *p_mcp_resp = out_params.mcp_resp; 3451 3452 return ECORE_SUCCESS; 3453 } 3454 3455 enum _ecore_status_t 3456 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3457 enum ecore_resources res_id, u32 *p_mcp_resp, 3458 u32 *p_resc_num, u32 *p_resc_start) 3459 { 3460 struct ecore_resc_alloc_out_params out_params; 3461 struct ecore_resc_alloc_in_params in_params; 3462 enum _ecore_status_t rc; 3463 3464 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3465 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; 3466 in_params.res_id = res_id; 3467 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3468 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3469 &out_params); 3470 if (rc != ECORE_SUCCESS) 3471 return rc; 3472 3473 *p_mcp_resp = out_params.mcp_resp; 3474 3475 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3476 *p_resc_num = out_params.resc_num; 3477 *p_resc_start = out_params.resc_start; 3478 } 3479 3480 return ECORE_SUCCESS; 3481 } 3482 3483 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, 3484 struct ecore_ptt *p_ptt) 3485 { 3486 u32 mcp_resp, mcp_param; 3487 3488 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, 3489 &mcp_resp, &mcp_param); 3490 } 3491 3492 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, 3493 struct ecore_ptt *p_ptt, 3494 u32 param, u32 *p_mcp_resp, 3495 u32 *p_mcp_param) 3496 { 3497 enum _ecore_status_t rc; 3498 3499 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, 3500 p_mcp_resp, p_mcp_param); 3501 if (rc != ECORE_SUCCESS) 3502 return rc; 3503 3504 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3505 DP_INFO(p_hwfn, 3506 "The resource command is unsupported by the MFW\n"); 3507 return ECORE_NOTIMPL; 3508 } 3509 3510 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { 3511 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); 3512 3513 DP_NOTICE(p_hwfn, false, 3514 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", 3515 param, opcode); 3516 return ECORE_INVAL; 3517 } 3518 3519 return rc; 3520 } 3521 3522 enum _ecore_status_t 3523 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3524 struct ecore_resc_lock_params *p_params) 3525 { 3526 u32 param = 0, mcp_resp, mcp_param; 3527 u8 opcode; 3528 enum _ecore_status_t rc; 3529 3530 switch (p_params->timeout) { 3531 case ECORE_MCP_RESC_LOCK_TO_DEFAULT: 3532 opcode = RESOURCE_OPCODE_REQ; 3533 p_params->timeout = 0; 3534 break; 3535 case ECORE_MCP_RESC_LOCK_TO_NONE: 3536 opcode = RESOURCE_OPCODE_REQ_WO_AGING; 3537 p_params->timeout = 0; 3538 break; 3539 default: 3540 opcode = RESOURCE_OPCODE_REQ_W_AGING; 3541 break; 3542 } 3543 3544 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3545 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3546 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); 3547 3548 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3549 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", 3550 param, p_params->timeout, opcode, p_params->resource); 3551 3552 /* Attempt to acquire the resource */ 3553 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3554 &mcp_param); 3555 if (rc != ECORE_SUCCESS) 3556 return rc; 3557 3558 /* Analyze the response */ 3559 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); 3560 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3561 3562 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3563 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", 3564 mcp_param, opcode, p_params->owner); 3565 3566 switch (opcode) { 3567 case RESOURCE_OPCODE_GNT: 3568 p_params->b_granted = true; 3569 break; 3570 case RESOURCE_OPCODE_BUSY: 3571 p_params->b_granted = false; 3572 break; 3573 default: 3574 DP_NOTICE(p_hwfn, false, 3575 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", 3576 mcp_param, opcode); 3577 return ECORE_INVAL; 3578 } 3579 3580 return ECORE_SUCCESS; 3581 } 3582 3583 enum _ecore_status_t 3584 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3585 struct ecore_resc_lock_params *p_params) 3586 { 3587 u32 retry_cnt = 0; 3588 enum _ecore_status_t rc; 3589 3590 do { 3591 /* No need for an interval before the first iteration */ 3592 if (retry_cnt) { 3593 if (p_params->sleep_b4_retry) { 3594 u16 retry_interval_in_ms = 3595 DIV_ROUND_UP(p_params->retry_interval, 3596 1000); 3597 3598 OSAL_MSLEEP(retry_interval_in_ms); 3599 } else { 3600 OSAL_UDELAY(p_params->retry_interval); 3601 } 3602 } 3603 3604 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params); 3605 if (rc != ECORE_SUCCESS) 3606 return rc; 3607 3608 if (p_params->b_granted) 3609 break; 3610 } while (retry_cnt++ < p_params->retry_num); 3611 3612 return ECORE_SUCCESS; 3613 } 3614 3615 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, 3616 struct ecore_resc_unlock_params *p_unlock, 3617 enum ecore_resc_lock resource, 3618 bool b_is_permanent) 3619 { 3620 if (p_lock != OSAL_NULL) { 3621 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); 3622 3623 /* Permanent resources don't require aging, and there's no 3624 * point in trying to acquire them more than once since it's 3625 * unexpected another entity would release them. 3626 */ 3627 if (b_is_permanent) { 3628 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; 3629 } else { 3630 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; 3631 p_lock->retry_interval = 3632 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; 3633 p_lock->sleep_b4_retry = true; 3634 } 3635 3636 p_lock->resource = resource; 3637 } 3638 3639 if (p_unlock != OSAL_NULL) { 3640 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); 3641 p_unlock->resource = resource; 3642 } 3643 } 3644 3645 enum _ecore_status_t 3646 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3647 struct ecore_resc_unlock_params *p_params) 3648 { 3649 u32 param = 0, mcp_resp, mcp_param; 3650 u8 opcode; 3651 enum _ecore_status_t rc; 3652 3653 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE 3654 : RESOURCE_OPCODE_RELEASE; 3655 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3656 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3657 3658 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3659 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", 3660 param, opcode, p_params->resource); 3661 3662 /* Attempt to release the resource */ 3663 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3664 &mcp_param); 3665 if (rc != ECORE_SUCCESS) 3666 return rc; 3667 3668 /* Analyze the response */ 3669 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3670 3671 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3672 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", 3673 mcp_param, opcode); 3674 3675 switch (opcode) { 3676 case RESOURCE_OPCODE_RELEASED_PREVIOUS: 3677 DP_INFO(p_hwfn, 3678 "Resource unlock request for an already released resource [%d]\n", 3679 p_params->resource); 3680 /* Fallthrough */ 3681 case RESOURCE_OPCODE_RELEASED: 3682 p_params->b_released = true; 3683 break; 3684 case RESOURCE_OPCODE_WRONG_OWNER: 3685 p_params->b_released = false; 3686 break; 3687 default: 3688 DP_NOTICE(p_hwfn, false, 3689 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", 3690 mcp_param, opcode); 3691 return ECORE_INVAL; 3692 } 3693 3694 return ECORE_SUCCESS; 3695 } 3696 3697 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn) 3698 { 3699 return !!(p_hwfn->mcp_info->capabilities & 3700 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); 3701 } 3702 3703 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, 3704 struct ecore_ptt *p_ptt) 3705 { 3706 u32 mcp_resp; 3707 enum _ecore_status_t rc; 3708 3709 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 3710 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); 3711 if (rc == ECORE_SUCCESS) 3712 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE), 3713 "MFW supported features: %08x\n", 3714 p_hwfn->mcp_info->capabilities); 3715 3716 return rc; 3717 } 3718 3719 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, 3720 struct ecore_ptt *p_ptt) 3721 { 3722 u32 mcp_resp, mcp_param, features; 3723 3724 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | 3725 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | 3726 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; 3727 3728 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, 3729 features, &mcp_resp, &mcp_param); 3730 } 3731 3732 enum _ecore_status_t 3733 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3734 struct ecore_mcp_drv_attr *p_drv_attr) 3735 { 3736 struct attribute_cmd_write_stc attr_cmd_write; 3737 enum _attribute_commands_e mfw_attr_cmd; 3738 struct ecore_mcp_mb_params mb_params; 3739 enum _ecore_status_t rc; 3740 3741 switch (p_drv_attr->attr_cmd) { 3742 case ECORE_MCP_DRV_ATTR_CMD_READ: 3743 mfw_attr_cmd = ATTRIBUTE_CMD_READ; 3744 break; 3745 case ECORE_MCP_DRV_ATTR_CMD_WRITE: 3746 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; 3747 break; 3748 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: 3749 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; 3750 break; 3751 case ECORE_MCP_DRV_ATTR_CMD_CLEAR: 3752 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; 3753 break; 3754 default: 3755 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", 3756 p_drv_attr->attr_cmd); 3757 return ECORE_INVAL; 3758 } 3759 3760 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3761 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; 3762 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, 3763 p_drv_attr->attr_num); 3764 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, 3765 mfw_attr_cmd); 3766 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { 3767 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); 3768 attr_cmd_write.val = p_drv_attr->val; 3769 attr_cmd_write.mask = p_drv_attr->mask; 3770 attr_cmd_write.offset = p_drv_attr->offset; 3771 3772 mb_params.p_data_src = &attr_cmd_write; 3773 mb_params.data_src_size = sizeof(attr_cmd_write); 3774 } 3775 3776 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3777 if (rc != ECORE_SUCCESS) 3778 return rc; 3779 3780 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3781 DP_INFO(p_hwfn, 3782 "The attribute command is not supported by the MFW\n"); 3783 return ECORE_NOTIMPL; 3784 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { 3785 DP_INFO(p_hwfn, 3786 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", 3787 mb_params.mcp_resp, p_drv_attr->attr_cmd, 3788 p_drv_attr->attr_num); 3789 return ECORE_INVAL; 3790 } 3791 3792 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3793 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", 3794 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, 3795 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, 3796 mb_params.mcp_param); 3797 3798 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || 3799 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) 3800 p_drv_attr->val = mb_params.mcp_param; 3801 3802 return ECORE_SUCCESS; 3803 } 3804 3805 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3806 u32 offset, u32 val) 3807 { 3808 struct ecore_mcp_mb_params mb_params = {0}; 3809 enum _ecore_status_t rc = ECORE_SUCCESS; 3810 u32 dword = val; 3811 3812 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; 3813 mb_params.param = offset; 3814 mb_params.p_data_src = &dword; 3815 mb_params.data_src_size = sizeof(dword); 3816 3817 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3818 if (rc != ECORE_SUCCESS) { 3819 DP_NOTICE(p_hwfn, false, 3820 "Failed to wol write request, rc = %d\n", rc); 3821 } 3822 3823 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { 3824 DP_NOTICE(p_hwfn, false, 3825 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", 3826 val, offset, mb_params.mcp_resp); 3827 rc = ECORE_UNKNOWN_ERROR; 3828 } 3829 } 3830