1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_status.h" 12 #include "ecore_mcp.h" 13 #include "mcp_public.h" 14 #include "reg_addr.h" 15 #include "ecore_hw.h" 16 #include "ecore_init_fw_funcs.h" 17 #include "ecore_sriov.h" 18 #include "ecore_vf.h" 19 #include "ecore_iov_api.h" 20 #include "ecore_gtt_reg_addr.h" 21 #include "ecore_iro.h" 22 #include "ecore_dcbx.h" 23 #include "ecore_sp_commands.h" 24 #include "ecore_cxt.h" 25 26 #define CHIP_MCP_RESP_ITER_US 10 27 #define EMUL_MCP_RESP_ITER_US (1000 * 1000) 28 29 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 30 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 31 32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 33 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ 34 _val) 35 36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 37 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) 38 39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 41 OFFSETOF(struct public_drv_mb, _field), _val) 42 43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 45 OFFSETOF(struct public_drv_mb, _field)) 46 47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 48 DRV_ID_PDA_COMP_VER_OFFSET) 49 50 #define MCP_BYTES_PER_MBIT_OFFSET 17 51 52 #ifndef ASIC_ONLY 53 static int loaded; 54 static int loaded_port[MAX_NUM_PORTS] = { 0 }; 55 #endif 56 57 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) 58 { 59 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 60 return false; 61 return true; 62 } 63 64 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 65 { 66 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 67 PUBLIC_PORT); 68 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); 69 70 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 71 MFW_PORT(p_hwfn)); 72 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 73 "port_addr = 0x%x, port_id 0x%02x\n", 74 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 75 } 76 77 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 78 { 79 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 80 OSAL_BE32 tmp; 81 u32 i; 82 83 #ifndef ASIC_ONLY 84 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) 85 return; 86 #endif 87 88 if (!p_hwfn->mcp_info->public_base) 89 return; 90 91 for (i = 0; i < length; i++) { 92 tmp = ecore_rd(p_hwfn, p_ptt, 93 p_hwfn->mcp_info->mfw_mb_addr + 94 (i << 2) + sizeof(u32)); 95 96 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 97 OSAL_BE32_TO_CPU(tmp); 98 } 99 } 100 101 struct ecore_mcp_cmd_elem { 102 osal_list_entry_t list; 103 struct ecore_mcp_mb_params *p_mb_params; 104 u16 expected_seq_num; 105 bool b_is_completed; 106 }; 107 108 /* Must be called while cmd_lock is acquired */ 109 static struct ecore_mcp_cmd_elem * 110 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn, 111 struct ecore_mcp_mb_params *p_mb_params, 112 u16 expected_seq_num) 113 { 114 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 115 116 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 117 sizeof(*p_cmd_elem)); 118 if (!p_cmd_elem) { 119 DP_NOTICE(p_hwfn, false, 120 "Failed to allocate `struct ecore_mcp_cmd_elem'\n"); 121 goto out; 122 } 123 124 p_cmd_elem->p_mb_params = p_mb_params; 125 p_cmd_elem->expected_seq_num = expected_seq_num; 126 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 127 out: 128 return p_cmd_elem; 129 } 130 131 /* Must be called while cmd_lock is acquired */ 132 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn, 133 struct ecore_mcp_cmd_elem *p_cmd_elem) 134 { 135 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 136 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem); 137 } 138 139 /* Must be called while cmd_lock is acquired */ 140 static struct ecore_mcp_cmd_elem * 141 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num) 142 { 143 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 144 145 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list, 146 struct ecore_mcp_cmd_elem) { 147 if (p_cmd_elem->expected_seq_num == seq_num) 148 return p_cmd_elem; 149 } 150 151 return OSAL_NULL; 152 } 153 154 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) 155 { 156 if (p_hwfn->mcp_info) { 157 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; 158 159 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 160 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, 161 &p_hwfn->mcp_info->cmd_list, list, 162 struct ecore_mcp_cmd_elem) { 163 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 164 } 165 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 166 167 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); 168 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); 169 #ifdef CONFIG_ECORE_LOCK_ALLOC 170 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); 171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); 172 #endif 173 } 174 175 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 176 177 return ECORE_SUCCESS; 178 } 179 180 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, 181 struct ecore_ptt *p_ptt) 182 { 183 struct ecore_mcp_info *p_info = p_hwfn->mcp_info; 184 u32 drv_mb_offsize, mfw_mb_offsize; 185 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 186 187 #ifndef ASIC_ONLY 188 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 189 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n"); 190 p_info->public_base = 0; 191 return ECORE_INVAL; 192 } 193 #endif 194 195 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 196 if (!p_info->public_base) 197 return ECORE_INVAL; 198 199 p_info->public_base |= GRCBASE_MCP; 200 201 /* Calculate the driver and MFW mailbox address */ 202 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, 203 SECTION_OFFSIZE_ADDR(p_info->public_base, 204 PUBLIC_DRV_MB)); 205 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 206 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 207 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x" 208 " mcp_pf_id = 0x%x\n", 209 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 210 211 /* Set the MFW MB address */ 212 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, 213 SECTION_OFFSIZE_ADDR(p_info->public_base, 214 PUBLIC_MFW_MB)); 215 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 216 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, 217 p_info->mfw_mb_addr); 218 219 /* Get the current driver mailbox sequence before sending 220 * the first command 221 */ 222 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 223 DRV_MSG_SEQ_NUMBER_MASK; 224 225 /* Get current FW pulse sequence */ 226 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 227 DRV_PULSE_SEQ_MASK; 228 229 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 230 231 return ECORE_SUCCESS; 232 } 233 234 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, 235 struct ecore_ptt *p_ptt) 236 { 237 struct ecore_mcp_info *p_info; 238 u32 size; 239 240 /* Allocate mcp_info structure */ 241 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 242 sizeof(*p_hwfn->mcp_info)); 243 if (!p_hwfn->mcp_info) 244 goto err; 245 p_info = p_hwfn->mcp_info; 246 247 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { 248 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); 249 /* Do not free mcp_info here, since public_base indicate that 250 * the MCP is not initialized 251 */ 252 return ECORE_SUCCESS; 253 } 254 255 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 256 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 257 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 258 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 259 goto err; 260 261 /* Initialize the MFW spinlocks */ 262 #ifdef CONFIG_ECORE_LOCK_ALLOC 263 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock); 264 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock); 265 #endif 266 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); 267 OSAL_SPIN_LOCK_INIT(&p_info->link_lock); 268 269 OSAL_LIST_INIT(&p_info->cmd_list); 270 271 return ECORE_SUCCESS; 272 273 err: 274 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); 275 ecore_mcp_free(p_hwfn); 276 return ECORE_NOMEM; 277 } 278 279 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn, 280 struct ecore_ptt *p_ptt) 281 { 282 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 283 284 /* Use MCP history register to check if MCP reset occurred between init 285 * time and now. 286 */ 287 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { 288 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 289 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", 290 p_hwfn->mcp_info->mcp_hist, generic_por_0); 291 292 ecore_load_mcp_offsets(p_hwfn, p_ptt); 293 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 294 } 295 } 296 297 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, 298 struct ecore_ptt *p_ptt) 299 { 300 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; 301 enum _ecore_status_t rc = ECORE_SUCCESS; 302 303 #ifndef ASIC_ONLY 304 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 305 delay = EMUL_MCP_RESP_ITER_US; 306 #endif 307 308 if (p_hwfn->mcp_info->b_block_cmd) { 309 DP_NOTICE(p_hwfn, false, 310 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); 311 return ECORE_ABORTED; 312 } 313 314 /* Ensure that only a single thread is accessing the mailbox */ 315 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 316 317 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 318 319 /* Set drv command along with the updated sequence */ 320 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 321 seq = ++p_hwfn->mcp_info->drv_mb_seq; 322 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); 323 324 do { 325 /* Wait for MFW response */ 326 OSAL_UDELAY(delay); 327 /* Give the FW up to 500 second (50*1000*10usec) */ 328 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt, 329 MISCS_REG_GENERIC_POR_0)) && 330 (cnt++ < ECORE_MCP_RESET_RETRIES)); 331 332 if (org_mcp_reset_seq != 333 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 334 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 335 "MCP was reset after %d usec\n", cnt * delay); 336 } else { 337 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 338 rc = ECORE_AGAIN; 339 } 340 341 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 342 343 return rc; 344 } 345 346 /* Must be called while cmd_lock is acquired */ 347 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn) 348 { 349 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 350 351 /* There is at most one pending command at a certain time, and if it 352 * exists - it is placed at the HEAD of the list. 353 */ 354 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) { 355 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list, 356 struct ecore_mcp_cmd_elem, 357 list); 358 return !p_cmd_elem->b_is_completed; 359 } 360 361 return false; 362 } 363 364 /* Must be called while cmd_lock is acquired */ 365 static enum _ecore_status_t 366 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 367 { 368 struct ecore_mcp_mb_params *p_mb_params; 369 struct ecore_mcp_cmd_elem *p_cmd_elem; 370 u32 mcp_resp; 371 u16 seq_num; 372 373 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 374 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); 375 376 /* Return if no new non-handled response has been received */ 377 if (seq_num != p_hwfn->mcp_info->drv_mb_seq) 378 return ECORE_AGAIN; 379 380 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num); 381 if (!p_cmd_elem) { 382 DP_ERR(p_hwfn, 383 "Failed to find a pending mailbox cmd that expects sequence number %d\n", 384 seq_num); 385 return ECORE_UNKNOWN_ERROR; 386 } 387 388 p_mb_params = p_cmd_elem->p_mb_params; 389 390 /* Get the MFW response along with the sequence number */ 391 p_mb_params->mcp_resp = mcp_resp; 392 393 /* Get the MFW param */ 394 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 395 396 /* Get the union data */ 397 if (p_mb_params->p_data_dst != OSAL_NULL && 398 p_mb_params->data_dst_size) { 399 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 400 OFFSETOF(struct public_drv_mb, 401 union_data); 402 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 403 union_data_addr, p_mb_params->data_dst_size); 404 } 405 406 p_cmd_elem->b_is_completed = true; 407 408 return ECORE_SUCCESS; 409 } 410 411 /* Must be called while cmd_lock is acquired */ 412 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 413 struct ecore_ptt *p_ptt, 414 struct ecore_mcp_mb_params *p_mb_params, 415 u16 seq_num) 416 { 417 union drv_union_data union_data; 418 u32 union_data_addr; 419 420 /* Set the union data */ 421 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 422 OFFSETOF(struct public_drv_mb, union_data); 423 OSAL_MEM_ZERO(&union_data, sizeof(union_data)); 424 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size) 425 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src, 426 p_mb_params->data_src_size); 427 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, 428 sizeof(union_data)); 429 430 /* Set the drv param */ 431 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); 432 433 /* Set the drv command along with the sequence number */ 434 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); 435 436 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 437 "MFW mailbox: command 0x%08x param 0x%08x\n", 438 (p_mb_params->cmd | seq_num), p_mb_params->param); 439 } 440 441 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, 442 bool block_cmd) 443 { 444 p_hwfn->mcp_info->b_block_cmd = block_cmd; 445 446 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", 447 block_cmd ? "Block" : "Unblock"); 448 } 449 450 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, 451 struct ecore_ptt *p_ptt) 452 { 453 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; 454 455 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 456 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 457 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 458 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 459 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 460 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); 461 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 462 463 DP_NOTICE(p_hwfn, false, 464 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", 465 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); 466 } 467 468 static enum _ecore_status_t 469 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 470 struct ecore_mcp_mb_params *p_mb_params, 471 u32 max_retries, u32 delay) 472 { 473 struct ecore_mcp_cmd_elem *p_cmd_elem; 474 u32 cnt = 0; 475 u16 seq_num; 476 enum _ecore_status_t rc = ECORE_SUCCESS; 477 478 /* Wait until the mailbox is non-occupied */ 479 do { 480 /* Exit the loop if there is no pending command, or if the 481 * pending command is completed during this iteration. 482 * The spinlock stays locked until the command is sent. 483 */ 484 485 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 486 487 if (!ecore_mcp_has_pending_cmd(p_hwfn)) 488 break; 489 490 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 491 if (rc == ECORE_SUCCESS) 492 break; 493 else if (rc != ECORE_AGAIN) 494 goto err; 495 496 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 497 OSAL_UDELAY(delay); 498 OSAL_MFW_CMD_PREEMPT(p_hwfn); 499 } while (++cnt < max_retries); 500 501 if (cnt >= max_retries) { 502 DP_NOTICE(p_hwfn, false, 503 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", 504 p_mb_params->cmd, p_mb_params->param); 505 return ECORE_AGAIN; 506 } 507 508 /* Send the mailbox command */ 509 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 510 seq_num = ++p_hwfn->mcp_info->drv_mb_seq; 511 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); 512 if (!p_cmd_elem) { 513 rc = ECORE_NOMEM; 514 goto err; 515 } 516 517 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); 518 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 519 520 /* Wait for the MFW response */ 521 do { 522 /* Exit the loop if the command is already completed, or if the 523 * command is completed during this iteration. 524 * The spinlock stays locked until the list element is removed. 525 */ 526 527 OSAL_UDELAY(delay); 528 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 529 530 if (p_cmd_elem->b_is_completed) 531 break; 532 533 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 534 if (rc == ECORE_SUCCESS) 535 break; 536 else if (rc != ECORE_AGAIN) 537 goto err; 538 539 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 540 OSAL_MFW_CMD_PREEMPT(p_hwfn); 541 } while (++cnt < max_retries); 542 543 if (cnt >= max_retries) { 544 DP_NOTICE(p_hwfn, false, 545 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 546 p_mb_params->cmd, p_mb_params->param); 547 ecore_mcp_print_cpu_info(p_hwfn, p_ptt); 548 549 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 550 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 551 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 552 553 ecore_mcp_cmd_set_blocking(p_hwfn, true); 554 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); 555 return ECORE_AGAIN; 556 } 557 558 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 559 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 560 561 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 562 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 563 p_mb_params->mcp_resp, p_mb_params->mcp_param, 564 (cnt * delay) / 1000, (cnt * delay) % 1000); 565 566 /* Clear the sequence number from the MFW response */ 567 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 568 569 return ECORE_SUCCESS; 570 571 err: 572 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 573 return rc; 574 } 575 576 static enum _ecore_status_t 577 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 578 struct ecore_ptt *p_ptt, 579 struct ecore_mcp_mb_params *p_mb_params) 580 { 581 osal_size_t union_data_size = sizeof(union drv_union_data); 582 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES; 583 u32 delay = CHIP_MCP_RESP_ITER_US; 584 585 #ifndef ASIC_ONLY 586 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 587 delay = EMUL_MCP_RESP_ITER_US; 588 /* There is a built-in delay of 100usec in each MFW response read */ 589 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 590 max_retries /= 10; 591 #endif 592 593 /* MCP not initialized */ 594 if (!ecore_mcp_is_init(p_hwfn)) { 595 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); 596 return ECORE_BUSY; 597 } 598 599 if (p_mb_params->data_src_size > union_data_size || 600 p_mb_params->data_dst_size > union_data_size) { 601 DP_ERR(p_hwfn, 602 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", 603 p_mb_params->data_src_size, p_mb_params->data_dst_size, 604 union_data_size); 605 return ECORE_INVAL; 606 } 607 608 if (p_hwfn->mcp_info->b_block_cmd) { 609 DP_NOTICE(p_hwfn, false, 610 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", 611 p_mb_params->cmd, p_mb_params->param); 612 return ECORE_ABORTED; 613 } 614 615 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 616 delay); 617 } 618 619 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, 620 struct ecore_ptt *p_ptt, u32 cmd, u32 param, 621 u32 *o_mcp_resp, u32 *o_mcp_param) 622 { 623 struct ecore_mcp_mb_params mb_params; 624 enum _ecore_status_t rc; 625 626 #ifndef ASIC_ONLY 627 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 628 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) { 629 loaded--; 630 loaded_port[p_hwfn->port_id]--; 631 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", 632 loaded); 633 } 634 return ECORE_SUCCESS; 635 } 636 #endif 637 638 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 639 mb_params.cmd = cmd; 640 mb_params.param = param; 641 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 642 if (rc != ECORE_SUCCESS) 643 return rc; 644 645 *o_mcp_resp = mb_params.mcp_resp; 646 *o_mcp_param = mb_params.mcp_param; 647 648 return ECORE_SUCCESS; 649 } 650 651 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, 652 struct ecore_ptt *p_ptt, 653 u32 cmd, 654 u32 param, 655 u32 *o_mcp_resp, 656 u32 *o_mcp_param, 657 u32 i_txn_size, u32 *i_buf) 658 { 659 struct ecore_mcp_mb_params mb_params; 660 enum _ecore_status_t rc; 661 662 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 663 mb_params.cmd = cmd; 664 mb_params.param = param; 665 mb_params.p_data_src = i_buf; 666 mb_params.data_src_size = (u8)i_txn_size; 667 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 668 if (rc != ECORE_SUCCESS) 669 return rc; 670 671 *o_mcp_resp = mb_params.mcp_resp; 672 *o_mcp_param = mb_params.mcp_param; 673 674 return ECORE_SUCCESS; 675 } 676 677 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, 678 struct ecore_ptt *p_ptt, 679 u32 cmd, 680 u32 param, 681 u32 *o_mcp_resp, 682 u32 *o_mcp_param, 683 u32 *o_txn_size, u32 *o_buf) 684 { 685 struct ecore_mcp_mb_params mb_params; 686 u8 raw_data[MCP_DRV_NVM_BUF_LEN]; 687 enum _ecore_status_t rc; 688 689 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 690 mb_params.cmd = cmd; 691 mb_params.param = param; 692 mb_params.p_data_dst = raw_data; 693 694 /* Use the maximal value since the actual one is part of the response */ 695 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; 696 697 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 698 if (rc != ECORE_SUCCESS) 699 return rc; 700 701 *o_mcp_resp = mb_params.mcp_resp; 702 *o_mcp_param = mb_params.mcp_param; 703 704 *o_txn_size = *o_mcp_param; 705 /* @DPDK */ 706 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN)); 707 708 return ECORE_SUCCESS; 709 } 710 711 #ifndef ASIC_ONLY 712 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, 713 u32 *p_load_code) 714 { 715 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 716 717 if (!loaded) 718 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 719 else if (!loaded_port[p_hwfn->port_id]) 720 load_phase = FW_MSG_CODE_DRV_LOAD_PORT; 721 else 722 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION; 723 724 /* On CMT, always tell that it's engine */ 725 if (ECORE_IS_CMT(p_hwfn->p_dev)) 726 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; 727 728 *p_load_code = load_phase; 729 loaded++; 730 loaded_port[p_hwfn->port_id]++; 731 732 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 733 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n", 734 *p_load_code, loaded, p_hwfn->port_id, 735 loaded_port[p_hwfn->port_id]); 736 } 737 #endif 738 739 static bool 740 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, 741 enum ecore_override_force_load override_force_load) 742 { 743 bool can_force_load = false; 744 745 switch (override_force_load) { 746 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS: 747 can_force_load = true; 748 break; 749 case ECORE_OVERRIDE_FORCE_LOAD_NEVER: 750 can_force_load = false; 751 break; 752 default: 753 can_force_load = (drv_role == DRV_ROLE_OS && 754 exist_drv_role == DRV_ROLE_PREBOOT) || 755 (drv_role == DRV_ROLE_KDUMP && 756 exist_drv_role == DRV_ROLE_OS); 757 break; 758 } 759 760 return can_force_load; 761 } 762 763 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, 764 struct ecore_ptt *p_ptt) 765 { 766 u32 resp = 0, param = 0; 767 enum _ecore_status_t rc; 768 769 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, 770 &resp, ¶m); 771 if (rc != ECORE_SUCCESS) 772 DP_NOTICE(p_hwfn, false, 773 "Failed to send cancel load request, rc = %d\n", rc); 774 775 return rc; 776 } 777 778 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) 779 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) 780 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) 781 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) 782 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) 783 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) 784 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) 785 786 static u32 ecore_get_config_bitmap(void) 787 { 788 u32 config_bitmap = 0x0; 789 790 #ifdef CONFIG_ECORE_L2 791 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; 792 #endif 793 #ifdef CONFIG_ECORE_SRIOV 794 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; 795 #endif 796 #ifdef CONFIG_ECORE_ROCE 797 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; 798 #endif 799 #ifdef CONFIG_ECORE_IWARP 800 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; 801 #endif 802 #ifdef CONFIG_ECORE_FCOE 803 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; 804 #endif 805 #ifdef CONFIG_ECORE_ISCSI 806 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; 807 #endif 808 #ifdef CONFIG_ECORE_LL2 809 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; 810 #endif 811 812 return config_bitmap; 813 } 814 815 struct ecore_load_req_in_params { 816 u8 hsi_ver; 817 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 818 #define ECORE_LOAD_REQ_HSI_VER_1 1 819 u32 drv_ver_0; 820 u32 drv_ver_1; 821 u32 fw_ver; 822 u8 drv_role; 823 u8 timeout_val; 824 u8 force_cmd; 825 bool avoid_eng_reset; 826 }; 827 828 struct ecore_load_req_out_params { 829 u32 load_code; 830 u32 exist_drv_ver_0; 831 u32 exist_drv_ver_1; 832 u32 exist_fw_ver; 833 u8 exist_drv_role; 834 u8 mfw_hsi_ver; 835 bool drv_exists; 836 }; 837 838 static enum _ecore_status_t 839 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 840 struct ecore_load_req_in_params *p_in_params, 841 struct ecore_load_req_out_params *p_out_params) 842 { 843 struct ecore_mcp_mb_params mb_params; 844 struct load_req_stc load_req; 845 struct load_rsp_stc load_rsp; 846 u32 hsi_ver; 847 enum _ecore_status_t rc; 848 849 OSAL_MEM_ZERO(&load_req, sizeof(load_req)); 850 load_req.drv_ver_0 = p_in_params->drv_ver_0; 851 load_req.drv_ver_1 = p_in_params->drv_ver_1; 852 load_req.fw_ver = p_in_params->fw_ver; 853 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); 854 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, 855 p_in_params->timeout_val); 856 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); 857 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, 858 p_in_params->avoid_eng_reset); 859 860 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? 861 DRV_ID_MCP_HSI_VER_CURRENT : 862 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); 863 864 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 865 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 866 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; 867 mb_params.p_data_src = &load_req; 868 mb_params.data_src_size = sizeof(load_req); 869 mb_params.p_data_dst = &load_rsp; 870 mb_params.data_dst_size = sizeof(load_rsp); 871 872 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 873 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 874 mb_params.param, 875 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), 876 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), 877 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), 878 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); 879 880 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) 881 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 882 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", 883 load_req.drv_ver_0, load_req.drv_ver_1, 884 load_req.fw_ver, load_req.misc0, 885 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), 886 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), 887 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), 888 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); 889 890 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 891 if (rc != ECORE_SUCCESS) { 892 DP_NOTICE(p_hwfn, false, 893 "Failed to send load request, rc = %d\n", rc); 894 return rc; 895 } 896 897 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 898 "Load Response: resp 0x%08x\n", mb_params.mcp_resp); 899 p_out_params->load_code = mb_params.mcp_resp; 900 901 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 902 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 903 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 904 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", 905 load_rsp.drv_ver_0, load_rsp.drv_ver_1, 906 load_rsp.fw_ver, load_rsp.misc0, 907 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), 908 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), 909 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); 910 911 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; 912 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; 913 p_out_params->exist_fw_ver = load_rsp.fw_ver; 914 p_out_params->exist_drv_role = 915 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); 916 p_out_params->mfw_hsi_ver = 917 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); 918 p_out_params->drv_exists = 919 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & 920 LOAD_RSP_FLAGS0_DRV_EXISTS; 921 } 922 923 return ECORE_SUCCESS; 924 } 925 926 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, 927 u8 *p_mfw_drv_role) 928 { 929 switch (drv_role) { 930 case ECORE_DRV_ROLE_OS: 931 *p_mfw_drv_role = DRV_ROLE_OS; 932 break; 933 case ECORE_DRV_ROLE_KDUMP: 934 *p_mfw_drv_role = DRV_ROLE_KDUMP; 935 break; 936 } 937 } 938 939 enum ecore_load_req_force { 940 ECORE_LOAD_REQ_FORCE_NONE, 941 ECORE_LOAD_REQ_FORCE_PF, 942 ECORE_LOAD_REQ_FORCE_ALL, 943 }; 944 945 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, 946 u8 *p_mfw_force_cmd) 947 { 948 switch (force_cmd) { 949 case ECORE_LOAD_REQ_FORCE_NONE: 950 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; 951 break; 952 case ECORE_LOAD_REQ_FORCE_PF: 953 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; 954 break; 955 case ECORE_LOAD_REQ_FORCE_ALL: 956 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; 957 break; 958 } 959 } 960 961 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, 962 struct ecore_ptt *p_ptt, 963 struct ecore_load_req_params *p_params) 964 { 965 struct ecore_load_req_out_params out_params; 966 struct ecore_load_req_in_params in_params; 967 u8 mfw_drv_role = 0, mfw_force_cmd; 968 enum _ecore_status_t rc; 969 970 #ifndef ASIC_ONLY 971 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 972 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code); 973 return ECORE_SUCCESS; 974 } 975 #endif 976 977 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 978 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; 979 in_params.drv_ver_0 = ECORE_VERSION; 980 in_params.drv_ver_1 = ecore_get_config_bitmap(); 981 in_params.fw_ver = STORM_FW_VERSION; 982 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); 983 in_params.drv_role = mfw_drv_role; 984 in_params.timeout_val = p_params->timeout_val; 985 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); 986 in_params.force_cmd = mfw_force_cmd; 987 in_params.avoid_eng_reset = p_params->avoid_eng_reset; 988 989 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 990 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); 991 if (rc != ECORE_SUCCESS) 992 return rc; 993 994 /* First handle cases where another load request should/might be sent: 995 * - MFW expects the old interface [HSI version = 1] 996 * - MFW responds that a force load request is required 997 */ 998 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 999 DP_INFO(p_hwfn, 1000 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); 1001 1002 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; 1003 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1004 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1005 &out_params); 1006 if (rc != ECORE_SUCCESS) 1007 return rc; 1008 } else if (out_params.load_code == 1009 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { 1010 if (ecore_mcp_can_force_load(in_params.drv_role, 1011 out_params.exist_drv_role, 1012 p_params->override_force_load)) { 1013 DP_INFO(p_hwfn, 1014 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n", 1015 in_params.drv_role, in_params.fw_ver, 1016 in_params.drv_ver_0, in_params.drv_ver_1, 1017 out_params.exist_drv_role, 1018 out_params.exist_fw_ver, 1019 out_params.exist_drv_ver_0, 1020 out_params.exist_drv_ver_1); 1021 1022 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, 1023 &mfw_force_cmd); 1024 1025 in_params.force_cmd = mfw_force_cmd; 1026 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1027 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1028 &out_params); 1029 if (rc != ECORE_SUCCESS) 1030 return rc; 1031 } else { 1032 DP_NOTICE(p_hwfn, false, 1033 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", 1034 in_params.drv_role, in_params.fw_ver, 1035 in_params.drv_ver_0, in_params.drv_ver_1, 1036 out_params.exist_drv_role, 1037 out_params.exist_fw_ver, 1038 out_params.exist_drv_ver_0, 1039 out_params.exist_drv_ver_1); 1040 1041 ecore_mcp_cancel_load_req(p_hwfn, p_ptt); 1042 return ECORE_BUSY; 1043 } 1044 } 1045 1046 /* Now handle the other types of responses. 1047 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not 1048 * expected here after the additional revised load requests were sent. 1049 */ 1050 switch (out_params.load_code) { 1051 case FW_MSG_CODE_DRV_LOAD_ENGINE: 1052 case FW_MSG_CODE_DRV_LOAD_PORT: 1053 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1054 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 1055 out_params.drv_exists) { 1056 /* The role and fw/driver version match, but the PF is 1057 * already loaded and has not been unloaded gracefully. 1058 * This is unexpected since a quasi-FLR request was 1059 * previously sent as part of ecore_hw_prepare(). 1060 */ 1061 DP_NOTICE(p_hwfn, false, 1062 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); 1063 return ECORE_INVAL; 1064 } 1065 break; 1066 default: 1067 DP_NOTICE(p_hwfn, false, 1068 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", 1069 out_params.load_code); 1070 return ECORE_BUSY; 1071 } 1072 1073 p_params->load_code = out_params.load_code; 1074 1075 return ECORE_SUCCESS; 1076 } 1077 1078 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, 1079 struct ecore_ptt *p_ptt) 1080 { 1081 u32 resp = 0, param = 0; 1082 enum _ecore_status_t rc; 1083 1084 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, 1085 ¶m); 1086 if (rc != ECORE_SUCCESS) { 1087 DP_NOTICE(p_hwfn, false, 1088 "Failed to send a LOAD_DONE command, rc = %d\n", rc); 1089 return rc; 1090 } 1091 1092 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 1093 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 1094 DP_NOTICE(p_hwfn, false, 1095 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 1096 1097 return ECORE_SUCCESS; 1098 } 1099 1100 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, 1101 struct ecore_ptt *p_ptt) 1102 { 1103 u32 wol_param, mcp_resp, mcp_param; 1104 1105 /* @DPDK */ 1106 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1107 1108 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1109 &mcp_resp, &mcp_param); 1110 } 1111 1112 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, 1113 struct ecore_ptt *p_ptt) 1114 { 1115 struct ecore_mcp_mb_params mb_params; 1116 struct mcp_mac wol_mac; 1117 1118 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1119 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; 1120 1121 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1122 } 1123 1124 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, 1125 struct ecore_ptt *p_ptt) 1126 { 1127 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1128 PUBLIC_PATH); 1129 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1130 u32 path_addr = SECTION_ADDR(mfw_path_offsize, 1131 ECORE_PATH_ID(p_hwfn)); 1132 u32 disabled_vfs[VF_MAX_STATIC / 32]; 1133 int i; 1134 1135 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1136 "Reading Disabled VF information from [offset %08x]," 1137 " path_addr %08x\n", 1138 mfw_path_offsize, path_addr); 1139 1140 for (i = 0; i < (VF_MAX_STATIC / 32); i++) { 1141 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, 1142 path_addr + 1143 OFFSETOF(struct public_path, 1144 mcp_vf_disabled) + 1145 sizeof(u32) * i); 1146 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1147 "FLR-ed VFs [%08x,...,%08x] - %08x\n", 1148 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); 1149 } 1150 1151 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) 1152 OSAL_VF_FLR_UPDATE(p_hwfn); 1153 } 1154 1155 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, 1156 struct ecore_ptt *p_ptt, 1157 u32 *vfs_to_ack) 1158 { 1159 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1160 PUBLIC_FUNC); 1161 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1162 u32 func_addr = SECTION_ADDR(mfw_func_offsize, 1163 MCP_PF_ID(p_hwfn)); 1164 struct ecore_mcp_mb_params mb_params; 1165 enum _ecore_status_t rc; 1166 int i; 1167 1168 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1169 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1170 "Acking VFs [%08x,...,%08x] - %08x\n", 1171 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); 1172 1173 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1174 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; 1175 mb_params.p_data_src = vfs_to_ack; 1176 mb_params.data_src_size = VF_MAX_STATIC / 8; 1177 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, 1178 &mb_params); 1179 if (rc != ECORE_SUCCESS) { 1180 DP_NOTICE(p_hwfn, false, 1181 "Failed to pass ACK for VF flr to MFW\n"); 1182 return ECORE_TIMEOUT; 1183 } 1184 1185 /* TMP - clear the ACK bits; should be done by MFW */ 1186 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1187 ecore_wr(p_hwfn, p_ptt, 1188 func_addr + 1189 OFFSETOF(struct public_func, drv_ack_vf_disabled) + 1190 i * sizeof(u32), 0); 1191 1192 return rc; 1193 } 1194 1195 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, 1196 struct ecore_ptt *p_ptt) 1197 { 1198 u32 transceiver_state; 1199 1200 transceiver_state = ecore_rd(p_hwfn, p_ptt, 1201 p_hwfn->mcp_info->port_addr + 1202 OFFSETOF(struct public_port, 1203 transceiver_data)); 1204 1205 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), 1206 "Received transceiver state update [0x%08x] from mfw" 1207 " [Addr 0x%x]\n", 1208 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + 1209 OFFSETOF(struct public_port, 1210 transceiver_data))); 1211 1212 transceiver_state = GET_MFW_FIELD(transceiver_state, 1213 ETH_TRANSCEIVER_STATE); 1214 1215 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 1216 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); 1217 else 1218 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); 1219 1220 OSAL_TRANSCEIVER_UPDATE(p_hwfn); 1221 } 1222 1223 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, 1224 struct ecore_ptt *p_ptt, 1225 struct ecore_mcp_link_state *p_link) 1226 { 1227 u32 eee_status, val; 1228 1229 p_link->eee_adv_caps = 0; 1230 p_link->eee_lp_adv_caps = 0; 1231 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1232 OFFSETOF(struct public_port, eee_status)); 1233 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); 1234 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; 1235 if (val & EEE_1G_ADV) 1236 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; 1237 if (val & EEE_10G_ADV) 1238 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; 1239 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; 1240 if (val & EEE_1G_ADV) 1241 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; 1242 if (val & EEE_10G_ADV) 1243 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; 1244 } 1245 1246 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, 1247 struct ecore_ptt *p_ptt, 1248 struct public_func *p_data, 1249 int pfid) 1250 { 1251 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1252 PUBLIC_FUNC); 1253 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1254 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 1255 u32 i, size; 1256 1257 OSAL_MEM_ZERO(p_data, sizeof(*p_data)); 1258 1259 size = OSAL_MIN_T(u32, sizeof(*p_data), 1260 SECTION_SIZE(mfw_path_offsize)); 1261 for (i = 0; i < size / sizeof(u32); i++) 1262 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, 1263 func_addr + (i << 2)); 1264 1265 return size; 1266 } 1267 1268 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, 1269 struct ecore_ptt *p_ptt, 1270 bool b_reset) 1271 { 1272 struct ecore_mcp_link_state *p_link; 1273 u8 max_bw, min_bw; 1274 u32 status = 0; 1275 1276 /* Prevent SW/attentions from doing this at the same time */ 1277 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); 1278 1279 p_link = &p_hwfn->mcp_info->link_output; 1280 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1281 if (!b_reset) { 1282 status = ecore_rd(p_hwfn, p_ptt, 1283 p_hwfn->mcp_info->port_addr + 1284 OFFSETOF(struct public_port, link_status)); 1285 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), 1286 "Received link update [0x%08x] from mfw" 1287 " [Addr 0x%x]\n", 1288 status, (u32)(p_hwfn->mcp_info->port_addr + 1289 OFFSETOF(struct public_port, 1290 link_status))); 1291 } else { 1292 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1293 "Resetting link indications\n"); 1294 goto out; 1295 } 1296 1297 if (p_hwfn->b_drv_link_init) { 1298 /* Link indication with modern MFW arrives as per-PF 1299 * indication. 1300 */ 1301 if (p_hwfn->mcp_info->capabilities & 1302 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { 1303 struct public_func shmem_info; 1304 1305 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1306 MCP_PF_ID(p_hwfn)); 1307 p_link->link_up = !!(shmem_info.status & 1308 FUNC_STATUS_VIRTUAL_LINK_UP); 1309 } else { 1310 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 1311 } 1312 } else { 1313 p_link->link_up = false; 1314 } 1315 1316 p_link->full_duplex = true; 1317 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 1318 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 1319 p_link->speed = 100000; 1320 break; 1321 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 1322 p_link->speed = 50000; 1323 break; 1324 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 1325 p_link->speed = 40000; 1326 break; 1327 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 1328 p_link->speed = 25000; 1329 break; 1330 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 1331 p_link->speed = 20000; 1332 break; 1333 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 1334 p_link->speed = 10000; 1335 break; 1336 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 1337 p_link->full_duplex = false; 1338 /* Fall-through */ 1339 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 1340 p_link->speed = 1000; 1341 break; 1342 default: 1343 p_link->speed = 0; 1344 } 1345 1346 /* We never store total line speed as p_link->speed is 1347 * again changes according to bandwidth allocation. 1348 */ 1349 if (p_link->link_up && p_link->speed) 1350 p_link->line_speed = p_link->speed; 1351 else 1352 p_link->line_speed = 0; 1353 1354 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; 1355 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; 1356 1357 /* Max bandwidth configuration */ 1358 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 1359 p_link, max_bw); 1360 1361 /* Min bandwidth configuration */ 1362 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 1363 p_link, min_bw); 1364 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, 1365 p_link->min_pf_rate); 1366 1367 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 1368 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 1369 p_link->parallel_detection = !!(status & 1370 LINK_STATUS_PARALLEL_DETECTION_USED); 1371 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 1372 1373 p_link->partner_adv_speed |= 1374 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 1375 ECORE_LINK_PARTNER_SPEED_1G_FD : 0; 1376 p_link->partner_adv_speed |= 1377 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 1378 ECORE_LINK_PARTNER_SPEED_1G_HD : 0; 1379 p_link->partner_adv_speed |= 1380 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 1381 ECORE_LINK_PARTNER_SPEED_10G : 0; 1382 p_link->partner_adv_speed |= 1383 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 1384 ECORE_LINK_PARTNER_SPEED_20G : 0; 1385 p_link->partner_adv_speed |= 1386 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? 1387 ECORE_LINK_PARTNER_SPEED_25G : 0; 1388 p_link->partner_adv_speed |= 1389 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 1390 ECORE_LINK_PARTNER_SPEED_40G : 0; 1391 p_link->partner_adv_speed |= 1392 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 1393 ECORE_LINK_PARTNER_SPEED_50G : 0; 1394 p_link->partner_adv_speed |= 1395 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 1396 ECORE_LINK_PARTNER_SPEED_100G : 0; 1397 1398 p_link->partner_tx_flow_ctrl_en = 1399 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 1400 p_link->partner_rx_flow_ctrl_en = 1401 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 1402 1403 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 1404 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 1405 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; 1406 break; 1407 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 1408 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; 1409 break; 1410 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 1411 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; 1412 break; 1413 default: 1414 p_link->partner_adv_pause = 0; 1415 } 1416 1417 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 1418 1419 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) 1420 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); 1421 1422 OSAL_LINK_UPDATE(p_hwfn, p_ptt); 1423 out: 1424 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); 1425 } 1426 1427 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, 1428 struct ecore_ptt *p_ptt, bool b_up) 1429 { 1430 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 1431 struct ecore_mcp_mb_params mb_params; 1432 struct eth_phy_cfg phy_cfg; 1433 enum _ecore_status_t rc = ECORE_SUCCESS; 1434 u32 cmd; 1435 1436 #ifndef ASIC_ONLY 1437 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1438 return ECORE_SUCCESS; 1439 #endif 1440 1441 /* Set the shmem configuration according to params */ 1442 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg)); 1443 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 1444 if (!params->speed.autoneg) 1445 phy_cfg.speed = params->speed.forced_speed; 1446 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; 1447 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; 1448 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1449 phy_cfg.adv_speed = params->speed.advertised_speeds; 1450 phy_cfg.loopback_mode = params->loopback_mode; 1451 1452 /* There are MFWs that share this capability regardless of whether 1453 * this is feasible or not. And given that at the very least adv_caps 1454 * would be set internally by ecore, we want to make sure LFA would 1455 * still work. 1456 */ 1457 if ((p_hwfn->mcp_info->capabilities & 1458 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && 1459 params->eee.enable) { 1460 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1461 if (params->eee.tx_lpi_enable) 1462 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1463 if (params->eee.adv_caps & ECORE_EEE_1G_ADV) 1464 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; 1465 if (params->eee.adv_caps & ECORE_EEE_10G_ADV) 1466 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; 1467 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << 1468 EEE_TX_TIMER_USEC_OFFSET) & 1469 EEE_TX_TIMER_USEC_MASK; 1470 } 1471 1472 p_hwfn->b_drv_link_init = b_up; 1473 1474 if (b_up) 1475 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1476 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n", 1477 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, 1478 phy_cfg.loopback_mode); 1479 else 1480 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); 1481 1482 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1483 mb_params.cmd = cmd; 1484 mb_params.p_data_src = &phy_cfg; 1485 mb_params.data_src_size = sizeof(phy_cfg); 1486 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1487 1488 /* if mcp fails to respond we must abort */ 1489 if (rc != ECORE_SUCCESS) { 1490 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1491 return rc; 1492 } 1493 1494 /* Mimic link-change attention, done for several reasons: 1495 * - On reset, there's no guarantee MFW would trigger 1496 * an attention. 1497 * - On initialization, older MFWs might not indicate link change 1498 * during LFA, so we'll never get an UP indication. 1499 */ 1500 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); 1501 1502 return ECORE_SUCCESS; 1503 } 1504 1505 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, 1506 struct ecore_ptt *p_ptt) 1507 { 1508 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; 1509 1510 /* TODO - Add support for VFs */ 1511 if (IS_VF(p_hwfn->p_dev)) 1512 return ECORE_INVAL; 1513 1514 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1515 PUBLIC_PATH); 1516 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); 1517 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); 1518 1519 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, 1520 path_addr + 1521 OFFSETOF(struct public_path, process_kill)) & 1522 PROCESS_KILL_COUNTER_MASK; 1523 1524 return proc_kill_cnt; 1525 } 1526 1527 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, 1528 struct ecore_ptt *p_ptt) 1529 { 1530 struct ecore_dev *p_dev = p_hwfn->p_dev; 1531 u32 proc_kill_cnt; 1532 1533 /* Prevent possible attentions/interrupts during the recovery handling 1534 * and till its load phase, during which they will be re-enabled. 1535 */ 1536 ecore_int_igu_disable_int(p_hwfn, p_ptt); 1537 1538 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); 1539 1540 /* The following operations should be done once, and thus in CMT mode 1541 * are carried out by only the first HW function. 1542 */ 1543 if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) 1544 return; 1545 1546 if (p_dev->recov_in_prog) { 1547 DP_NOTICE(p_hwfn, false, 1548 "Ignoring the indication since a recovery" 1549 " process is already in progress\n"); 1550 return; 1551 } 1552 1553 p_dev->recov_in_prog = true; 1554 1555 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); 1556 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); 1557 1558 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); 1559 } 1560 1561 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, 1562 struct ecore_ptt *p_ptt, 1563 enum MFW_DRV_MSG_TYPE type) 1564 { 1565 enum ecore_mcp_protocol_type stats_type; 1566 union ecore_mcp_protocol_stats stats; 1567 struct ecore_mcp_mb_params mb_params; 1568 u32 hsi_param; 1569 enum _ecore_status_t rc; 1570 1571 switch (type) { 1572 case MFW_DRV_MSG_GET_LAN_STATS: 1573 stats_type = ECORE_MCP_LAN_STATS; 1574 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; 1575 break; 1576 default: 1577 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1578 "Invalid protocol type %d\n", type); 1579 return; 1580 } 1581 1582 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); 1583 1584 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1585 mb_params.cmd = DRV_MSG_CODE_GET_STATS; 1586 mb_params.param = hsi_param; 1587 mb_params.p_data_src = &stats; 1588 mb_params.data_src_size = sizeof(stats); 1589 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1590 if (rc != ECORE_SUCCESS) 1591 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc); 1592 } 1593 1594 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, 1595 struct public_func *p_shmem_info) 1596 { 1597 struct ecore_mcp_function_info *p_info; 1598 1599 p_info = &p_hwfn->mcp_info->func_info; 1600 1601 /* TODO - bandwidth min/max should have valid values of 1-100, 1602 * as well as some indication that the feature is disabled. 1603 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS 1604 * limit and correct value to min `1' and max `100' if limit isn't in 1605 * range. 1606 */ 1607 p_info->bandwidth_min = (p_shmem_info->config & 1608 FUNC_MF_CFG_MIN_BW_MASK) >> 1609 FUNC_MF_CFG_MIN_BW_OFFSET; 1610 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { 1611 DP_INFO(p_hwfn, 1612 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 1613 p_info->bandwidth_min); 1614 p_info->bandwidth_min = 1; 1615 } 1616 1617 p_info->bandwidth_max = (p_shmem_info->config & 1618 FUNC_MF_CFG_MAX_BW_MASK) >> 1619 FUNC_MF_CFG_MAX_BW_OFFSET; 1620 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { 1621 DP_INFO(p_hwfn, 1622 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 1623 p_info->bandwidth_max); 1624 p_info->bandwidth_max = 100; 1625 } 1626 } 1627 1628 static void 1629 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1630 { 1631 struct ecore_mcp_function_info *p_info; 1632 struct public_func shmem_info; 1633 u32 resp = 0, param = 0; 1634 1635 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1636 1637 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 1638 1639 p_info = &p_hwfn->mcp_info->func_info; 1640 1641 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); 1642 1643 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); 1644 1645 /* Acknowledge the MFW */ 1646 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, 1647 ¶m); 1648 } 1649 1650 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) 1651 { 1652 /* A single notification should be sent to upper driver in CMT mode */ 1653 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1654 return; 1655 1656 DP_NOTICE(p_hwfn, false, 1657 "Fan failure was detected on the network interface card" 1658 " and it's going to be shut down.\n"); 1659 1660 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); 1661 } 1662 1663 struct ecore_mdump_cmd_params { 1664 u32 cmd; 1665 void *p_data_src; 1666 u8 data_src_size; 1667 void *p_data_dst; 1668 u8 data_dst_size; 1669 u32 mcp_resp; 1670 }; 1671 1672 static enum _ecore_status_t 1673 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1674 struct ecore_mdump_cmd_params *p_mdump_cmd_params) 1675 { 1676 struct ecore_mcp_mb_params mb_params; 1677 enum _ecore_status_t rc; 1678 1679 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1680 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; 1681 mb_params.param = p_mdump_cmd_params->cmd; 1682 mb_params.p_data_src = p_mdump_cmd_params->p_data_src; 1683 mb_params.data_src_size = p_mdump_cmd_params->data_src_size; 1684 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; 1685 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; 1686 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1687 if (rc != ECORE_SUCCESS) 1688 return rc; 1689 1690 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; 1691 1692 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { 1693 DP_INFO(p_hwfn, 1694 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", 1695 p_mdump_cmd_params->cmd); 1696 rc = ECORE_NOTIMPL; 1697 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 1698 DP_INFO(p_hwfn, 1699 "The mdump command is not supported by the MFW\n"); 1700 rc = ECORE_NOTIMPL; 1701 } 1702 1703 return rc; 1704 } 1705 1706 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn, 1707 struct ecore_ptt *p_ptt) 1708 { 1709 struct ecore_mdump_cmd_params mdump_cmd_params; 1710 1711 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1712 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; 1713 1714 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1715 } 1716 1717 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, 1718 struct ecore_ptt *p_ptt, 1719 u32 epoch) 1720 { 1721 struct ecore_mdump_cmd_params mdump_cmd_params; 1722 1723 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1724 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES; 1725 mdump_cmd_params.p_data_src = &epoch; 1726 mdump_cmd_params.data_src_size = sizeof(epoch); 1727 1728 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1729 } 1730 1731 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, 1732 struct ecore_ptt *p_ptt) 1733 { 1734 struct ecore_mdump_cmd_params mdump_cmd_params; 1735 1736 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1737 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER; 1738 1739 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1740 } 1741 1742 static enum _ecore_status_t 1743 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1744 struct mdump_config_stc *p_mdump_config) 1745 { 1746 struct ecore_mdump_cmd_params mdump_cmd_params; 1747 enum _ecore_status_t rc; 1748 1749 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1750 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG; 1751 mdump_cmd_params.p_data_dst = p_mdump_config; 1752 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config); 1753 1754 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1755 if (rc != ECORE_SUCCESS) 1756 return rc; 1757 1758 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1759 DP_INFO(p_hwfn, 1760 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n", 1761 mdump_cmd_params.mcp_resp); 1762 rc = ECORE_UNKNOWN_ERROR; 1763 } 1764 1765 return rc; 1766 } 1767 1768 enum _ecore_status_t 1769 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1770 struct ecore_mdump_info *p_mdump_info) 1771 { 1772 u32 addr, global_offsize, global_addr; 1773 struct mdump_config_stc mdump_config; 1774 enum _ecore_status_t rc; 1775 1776 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info)); 1777 1778 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1779 PUBLIC_GLOBAL); 1780 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1781 global_addr = SECTION_ADDR(global_offsize, 0); 1782 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt, 1783 global_addr + 1784 OFFSETOF(struct public_global, 1785 mdump_reason)); 1786 1787 if (p_mdump_info->reason) { 1788 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config); 1789 if (rc != ECORE_SUCCESS) 1790 return rc; 1791 1792 p_mdump_info->version = mdump_config.version; 1793 p_mdump_info->config = mdump_config.config; 1794 p_mdump_info->epoch = mdump_config.epoc; 1795 p_mdump_info->num_of_logs = mdump_config.num_of_logs; 1796 p_mdump_info->valid_logs = mdump_config.valid_logs; 1797 1798 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1799 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n", 1800 p_mdump_info->reason, p_mdump_info->version, 1801 p_mdump_info->config, p_mdump_info->epoch, 1802 p_mdump_info->num_of_logs, p_mdump_info->valid_logs); 1803 } else { 1804 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1805 "MFW mdump info: reason %d\n", p_mdump_info->reason); 1806 } 1807 1808 return ECORE_SUCCESS; 1809 } 1810 1811 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, 1812 struct ecore_ptt *p_ptt) 1813 { 1814 struct ecore_mdump_cmd_params mdump_cmd_params; 1815 1816 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1817 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS; 1818 1819 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1820 } 1821 1822 enum _ecore_status_t 1823 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1824 struct ecore_mdump_retain_data *p_mdump_retain) 1825 { 1826 struct ecore_mdump_cmd_params mdump_cmd_params; 1827 struct mdump_retain_data_stc mfw_mdump_retain; 1828 enum _ecore_status_t rc; 1829 1830 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1831 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; 1832 mdump_cmd_params.p_data_dst = &mfw_mdump_retain; 1833 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain); 1834 1835 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1836 if (rc != ECORE_SUCCESS) 1837 return rc; 1838 1839 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1840 DP_INFO(p_hwfn, 1841 "Failed to get the mdump retained data [mcp_resp 0x%x]\n", 1842 mdump_cmd_params.mcp_resp); 1843 return ECORE_UNKNOWN_ERROR; 1844 } 1845 1846 p_mdump_retain->valid = mfw_mdump_retain.valid; 1847 p_mdump_retain->epoch = mfw_mdump_retain.epoch; 1848 p_mdump_retain->pf = mfw_mdump_retain.pf; 1849 p_mdump_retain->status = mfw_mdump_retain.status; 1850 1851 return ECORE_SUCCESS; 1852 } 1853 1854 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, 1855 struct ecore_ptt *p_ptt) 1856 { 1857 struct ecore_mdump_cmd_params mdump_cmd_params; 1858 1859 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1860 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN; 1861 1862 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1863 } 1864 1865 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, 1866 struct ecore_ptt *p_ptt) 1867 { 1868 struct ecore_mdump_retain_data mdump_retain; 1869 enum _ecore_status_t rc; 1870 1871 /* In CMT mode - no need for more than a single acknowledgment to the 1872 * MFW, and no more than a single notification to the upper driver. 1873 */ 1874 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1875 return; 1876 1877 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); 1878 if (rc == ECORE_SUCCESS && mdump_retain.valid) { 1879 DP_NOTICE(p_hwfn, false, 1880 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", 1881 mdump_retain.epoch, mdump_retain.pf, 1882 mdump_retain.status); 1883 } else { 1884 DP_NOTICE(p_hwfn, false, 1885 "The MFW notified that a critical error occurred in the device\n"); 1886 } 1887 1888 if (p_hwfn->p_dev->allow_mdump) { 1889 DP_NOTICE(p_hwfn, false, 1890 "Not acknowledging the notification to allow the MFW crash dump\n"); 1891 return; 1892 } 1893 1894 DP_NOTICE(p_hwfn, false, 1895 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); 1896 ecore_mcp_mdump_ack(p_hwfn, p_ptt); 1897 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 1898 } 1899 1900 void 1901 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1902 { 1903 struct public_func shmem_info; 1904 u32 port_cfg, val; 1905 1906 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) 1907 return; 1908 1909 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); 1910 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1911 OFFSETOF(struct public_port, oem_cfg_port)); 1912 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); 1913 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) 1914 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", 1915 val); 1916 1917 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); 1918 if (val == OEM_CFG_SCHED_TYPE_ETS) 1919 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; 1920 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) 1921 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; 1922 else 1923 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", 1924 val); 1925 1926 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1927 MCP_PF_ID(p_hwfn)); 1928 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); 1929 p_hwfn->ufp_info.tc = (u8)val; 1930 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, 1931 OEM_CFG_FUNC_HOST_PRI_CTRL); 1932 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) 1933 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; 1934 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) 1935 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; 1936 else 1937 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", 1938 val); 1939 1940 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, 1941 "UFP shmem config: mode = %d tc = %d pri_type = %d\n", 1942 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, 1943 p_hwfn->ufp_info.pri_type); 1944 } 1945 1946 static enum _ecore_status_t 1947 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1948 { 1949 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 1950 1951 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { 1952 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; 1953 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; 1954 1955 ecore_qm_reconf(p_hwfn, p_ptt); 1956 } else { 1957 /* Merge UFP TC with the dcbx TC data */ 1958 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 1959 ECORE_DCBX_OPERATIONAL_MIB); 1960 } 1961 1962 /* update storm FW with negotiation results */ 1963 ecore_sp_pf_update_ufp(p_hwfn); 1964 1965 return ECORE_SUCCESS; 1966 } 1967 1968 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, 1969 struct ecore_ptt *p_ptt) 1970 { 1971 struct ecore_mcp_info *info = p_hwfn->mcp_info; 1972 enum _ecore_status_t rc = ECORE_SUCCESS; 1973 bool found = false; 1974 u16 i; 1975 1976 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); 1977 1978 /* Read Messages from MFW */ 1979 ecore_mcp_read_mb(p_hwfn, p_ptt); 1980 1981 /* Compare current messages to old ones */ 1982 for (i = 0; i < info->mfw_mb_length; i++) { 1983 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 1984 continue; 1985 1986 found = true; 1987 1988 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1989 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 1990 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 1991 1992 switch (i) { 1993 case MFW_DRV_MSG_LINK_CHANGE: 1994 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); 1995 break; 1996 case MFW_DRV_MSG_VF_DISABLED: 1997 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); 1998 break; 1999 case MFW_DRV_MSG_LLDP_DATA_UPDATED: 2000 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2001 ECORE_DCBX_REMOTE_LLDP_MIB); 2002 break; 2003 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: 2004 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2005 ECORE_DCBX_REMOTE_MIB); 2006 break; 2007 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 2008 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2009 ECORE_DCBX_OPERATIONAL_MIB); 2010 /* clear the user-config cache */ 2011 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, 2012 sizeof(struct ecore_dcbx_set)); 2013 break; 2014 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: 2015 ecore_lldp_mib_update_event(p_hwfn, p_ptt); 2016 break; 2017 case MFW_DRV_MSG_OEM_CFG_UPDATE: 2018 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); 2019 break; 2020 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 2021 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); 2022 break; 2023 case MFW_DRV_MSG_ERROR_RECOVERY: 2024 ecore_mcp_handle_process_kill(p_hwfn, p_ptt); 2025 break; 2026 case MFW_DRV_MSG_GET_LAN_STATS: 2027 case MFW_DRV_MSG_GET_FCOE_STATS: 2028 case MFW_DRV_MSG_GET_ISCSI_STATS: 2029 case MFW_DRV_MSG_GET_RDMA_STATS: 2030 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); 2031 break; 2032 case MFW_DRV_MSG_BW_UPDATE: 2033 ecore_mcp_update_bw(p_hwfn, p_ptt); 2034 break; 2035 case MFW_DRV_MSG_FAILURE_DETECTED: 2036 ecore_mcp_handle_fan_failure(p_hwfn); 2037 break; 2038 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: 2039 ecore_mcp_handle_critical_error(p_hwfn, p_ptt); 2040 break; 2041 default: 2042 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); 2043 rc = ECORE_INVAL; 2044 } 2045 } 2046 2047 /* ACK everything */ 2048 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 2049 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); 2050 2051 /* MFW expect answer in BE, so we force write in that format */ 2052 ecore_wr(p_hwfn, p_ptt, 2053 info->mfw_mb_addr + sizeof(u32) + 2054 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 2055 sizeof(u32) + i * sizeof(u32), val); 2056 } 2057 2058 if (!found) { 2059 DP_NOTICE(p_hwfn, false, 2060 "Received an MFW message indication but no" 2061 " new message!\n"); 2062 rc = ECORE_INVAL; 2063 } 2064 2065 /* Copy the new mfw messages into the shadow */ 2066 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 2067 2068 return rc; 2069 } 2070 2071 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, 2072 struct ecore_ptt *p_ptt, 2073 u32 *p_mfw_ver, 2074 u32 *p_running_bundle_id) 2075 { 2076 u32 global_offsize; 2077 2078 #ifndef ASIC_ONLY 2079 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2080 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n"); 2081 return ECORE_SUCCESS; 2082 } 2083 #endif 2084 2085 if (IS_VF(p_hwfn->p_dev)) { 2086 if (p_hwfn->vf_iov_info) { 2087 struct pfvf_acquire_resp_tlv *p_resp; 2088 2089 p_resp = &p_hwfn->vf_iov_info->acquire_resp; 2090 *p_mfw_ver = p_resp->pfdev_info.mfw_ver; 2091 return ECORE_SUCCESS; 2092 } else { 2093 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2094 "VF requested MFW version prior to ACQUIRE\n"); 2095 return ECORE_INVAL; 2096 } 2097 } 2098 2099 global_offsize = ecore_rd(p_hwfn, p_ptt, 2100 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> 2101 public_base, 2102 PUBLIC_GLOBAL)); 2103 *p_mfw_ver = 2104 ecore_rd(p_hwfn, p_ptt, 2105 SECTION_ADDR(global_offsize, 2106 0) + OFFSETOF(struct public_global, mfw_ver)); 2107 2108 if (p_running_bundle_id != OSAL_NULL) { 2109 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, 2110 SECTION_ADDR(global_offsize, 2111 0) + 2112 OFFSETOF(struct public_global, 2113 running_bundle_id)); 2114 } 2115 2116 return ECORE_SUCCESS; 2117 } 2118 2119 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, 2120 struct ecore_ptt *p_ptt, 2121 u32 *p_media_type) 2122 { 2123 2124 /* TODO - Add support for VFs */ 2125 if (IS_VF(p_hwfn->p_dev)) 2126 return ECORE_INVAL; 2127 2128 if (!ecore_mcp_is_init(p_hwfn)) { 2129 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); 2130 return ECORE_BUSY; 2131 } 2132 2133 if (!p_ptt) { 2134 *p_media_type = MEDIA_UNSPECIFIED; 2135 return ECORE_INVAL; 2136 } else { 2137 *p_media_type = ecore_rd(p_hwfn, p_ptt, 2138 p_hwfn->mcp_info->port_addr + 2139 OFFSETOF(struct public_port, 2140 media_type)); 2141 } 2142 2143 return ECORE_SUCCESS; 2144 } 2145 2146 /* @DPDK */ 2147 /* Old MFW has a global configuration for all PFs regarding RDMA support */ 2148 static void 2149 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn, 2150 enum ecore_pci_personality *p_proto) 2151 { 2152 *p_proto = ECORE_PCI_ETH; 2153 2154 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2155 "According to Legacy capabilities, L2 personality is %08x\n", 2156 (u32)*p_proto); 2157 } 2158 2159 /* @DPDK */ 2160 static enum _ecore_status_t 2161 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn, 2162 struct ecore_ptt *p_ptt, 2163 enum ecore_pci_personality *p_proto) 2164 { 2165 u32 resp = 0, param = 0; 2166 enum _ecore_status_t rc; 2167 2168 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2169 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", 2170 (u32)*p_proto, resp, param); 2171 return ECORE_SUCCESS; 2172 } 2173 2174 static enum _ecore_status_t 2175 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, 2176 struct public_func *p_info, 2177 struct ecore_ptt *p_ptt, 2178 enum ecore_pci_personality *p_proto) 2179 { 2180 enum _ecore_status_t rc = ECORE_SUCCESS; 2181 2182 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 2183 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 2184 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) != 2185 ECORE_SUCCESS) 2186 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); 2187 break; 2188 default: 2189 rc = ECORE_INVAL; 2190 } 2191 2192 return rc; 2193 } 2194 2195 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, 2196 struct ecore_ptt *p_ptt) 2197 { 2198 struct ecore_mcp_function_info *info; 2199 struct public_func shmem_info; 2200 2201 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 2202 info = &p_hwfn->mcp_info->func_info; 2203 2204 info->pause_on_host = (shmem_info.config & 2205 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 2206 2207 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2208 &info->protocol)) { 2209 DP_ERR(p_hwfn, "Unknown personality %08x\n", 2210 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 2211 return ECORE_INVAL; 2212 } 2213 2214 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 2215 2216 if (shmem_info.mac_upper || shmem_info.mac_lower) { 2217 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 2218 info->mac[1] = (u8)(shmem_info.mac_upper); 2219 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 2220 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 2221 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 2222 info->mac[5] = (u8)(shmem_info.mac_lower); 2223 } else { 2224 /* TODO - are there protocols for which there's no MAC? */ 2225 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); 2226 } 2227 2228 /* TODO - are these calculations true for BE machine? */ 2229 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | 2230 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); 2231 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | 2232 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); 2233 2234 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 2235 2236 info->mtu = (u16)shmem_info.mtu_size; 2237 2238 if (info->mtu == 0) 2239 info->mtu = 1500; 2240 2241 info->mtu = (u16)shmem_info.mtu_size; 2242 2243 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), 2244 "Read configuration from shmem: pause_on_host %02x" 2245 " protocol %02x BW [%02x - %02x]" 2246 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx" 2247 " node %lx ovlan %04x\n", 2248 info->pause_on_host, info->protocol, 2249 info->bandwidth_min, info->bandwidth_max, 2250 info->mac[0], info->mac[1], info->mac[2], 2251 info->mac[3], info->mac[4], info->mac[5], 2252 (unsigned long)info->wwn_port, 2253 (unsigned long)info->wwn_node, info->ovlan); 2254 2255 return ECORE_SUCCESS; 2256 } 2257 2258 struct ecore_mcp_link_params 2259 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) 2260 { 2261 if (!p_hwfn || !p_hwfn->mcp_info) 2262 return OSAL_NULL; 2263 return &p_hwfn->mcp_info->link_input; 2264 } 2265 2266 struct ecore_mcp_link_state 2267 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) 2268 { 2269 if (!p_hwfn || !p_hwfn->mcp_info) 2270 return OSAL_NULL; 2271 2272 #ifndef ASIC_ONLY 2273 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 2274 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); 2275 p_hwfn->mcp_info->link_output.link_up = true; 2276 } 2277 #endif 2278 2279 return &p_hwfn->mcp_info->link_output; 2280 } 2281 2282 struct ecore_mcp_link_capabilities 2283 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) 2284 { 2285 if (!p_hwfn || !p_hwfn->mcp_info) 2286 return OSAL_NULL; 2287 return &p_hwfn->mcp_info->link_capabilities; 2288 } 2289 2290 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, 2291 struct ecore_ptt *p_ptt) 2292 { 2293 u32 resp = 0, param = 0; 2294 enum _ecore_status_t rc; 2295 2296 rc = ecore_mcp_cmd(p_hwfn, p_ptt, 2297 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); 2298 2299 /* Wait for the drain to complete before returning */ 2300 OSAL_MSLEEP(1020); 2301 2302 return rc; 2303 } 2304 2305 const struct ecore_mcp_function_info 2306 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) 2307 { 2308 if (!p_hwfn || !p_hwfn->mcp_info) 2309 return OSAL_NULL; 2310 return &p_hwfn->mcp_info->func_info; 2311 } 2312 2313 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, 2314 struct ecore_ptt *p_ptt, u32 personalities) 2315 { 2316 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; 2317 struct public_func shmem_info; 2318 int i, count = 0, num_pfs; 2319 2320 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); 2321 2322 for (i = 0; i < num_pfs; i++) { 2323 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 2324 MCP_PF_ID_BY_REL(p_hwfn, i)); 2325 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) 2326 continue; 2327 2328 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2329 &protocol) != 2330 ECORE_SUCCESS) 2331 continue; 2332 2333 if ((1 << ((u32)protocol)) & personalities) 2334 count++; 2335 } 2336 2337 return count; 2338 } 2339 2340 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, 2341 struct ecore_ptt *p_ptt, 2342 u32 *p_flash_size) 2343 { 2344 u32 flash_size; 2345 2346 #ifndef ASIC_ONLY 2347 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2348 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n"); 2349 return ECORE_INVAL; 2350 } 2351 #endif 2352 2353 if (IS_VF(p_hwfn->p_dev)) 2354 return ECORE_INVAL; 2355 2356 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 2357 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 2358 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 2359 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); 2360 2361 *p_flash_size = flash_size; 2362 2363 return ECORE_SUCCESS; 2364 } 2365 2366 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, 2367 struct ecore_ptt *p_ptt) 2368 { 2369 struct ecore_dev *p_dev = p_hwfn->p_dev; 2370 2371 if (p_dev->recov_in_prog) { 2372 DP_NOTICE(p_hwfn, false, 2373 "Avoid triggering a recovery since such a process" 2374 " is already in progress\n"); 2375 return ECORE_AGAIN; 2376 } 2377 2378 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); 2379 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); 2380 2381 return ECORE_SUCCESS; 2382 } 2383 2384 static enum _ecore_status_t 2385 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, 2386 struct ecore_ptt *p_ptt, 2387 u8 vf_id, u8 num) 2388 { 2389 u32 resp = 0, param = 0, rc_param = 0; 2390 enum _ecore_status_t rc; 2391 2392 /* Only Leader can configure MSIX, and need to take CMT into account */ 2393 2394 if (!IS_LEAD_HWFN(p_hwfn)) 2395 return ECORE_SUCCESS; 2396 num *= p_hwfn->p_dev->num_hwfns; 2397 2398 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & 2399 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; 2400 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & 2401 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; 2402 2403 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, 2404 &resp, &rc_param); 2405 2406 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { 2407 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", 2408 vf_id); 2409 rc = ECORE_INVAL; 2410 } else { 2411 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2412 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", 2413 num, vf_id); 2414 } 2415 2416 return rc; 2417 } 2418 2419 static enum _ecore_status_t 2420 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, 2421 struct ecore_ptt *p_ptt, 2422 u8 num) 2423 { 2424 u32 resp = 0, param = num, rc_param = 0; 2425 enum _ecore_status_t rc; 2426 2427 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, 2428 param, &resp, &rc_param); 2429 2430 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { 2431 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); 2432 rc = ECORE_INVAL; 2433 } else { 2434 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2435 "Requested 0x%02x MSI-x interrupts for VFs\n", 2436 num); 2437 } 2438 2439 return rc; 2440 } 2441 2442 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, 2443 struct ecore_ptt *p_ptt, 2444 u8 vf_id, u8 num) 2445 { 2446 if (ECORE_IS_BB(p_hwfn->p_dev)) 2447 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); 2448 else 2449 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); 2450 } 2451 2452 enum _ecore_status_t 2453 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2454 struct ecore_mcp_drv_version *p_ver) 2455 { 2456 struct ecore_mcp_mb_params mb_params; 2457 struct drv_version_stc drv_version; 2458 u32 num_words, i; 2459 void *p_name; 2460 OSAL_BE32 val; 2461 enum _ecore_status_t rc; 2462 2463 #ifndef ASIC_ONLY 2464 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 2465 return ECORE_SUCCESS; 2466 #endif 2467 2468 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version)); 2469 drv_version.version = p_ver->version; 2470 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; 2471 for (i = 0; i < num_words; i++) { 2472 /* The driver name is expected to be in a big-endian format */ 2473 p_name = &p_ver->name[i * sizeof(u32)]; 2474 val = OSAL_CPU_TO_BE32(*(u32 *)p_name); 2475 *(u32 *)&drv_version.name[i * sizeof(u32)] = val; 2476 } 2477 2478 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 2479 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 2480 mb_params.p_data_src = &drv_version; 2481 mb_params.data_src_size = sizeof(drv_version); 2482 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2483 if (rc != ECORE_SUCCESS) 2484 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2485 2486 return rc; 2487 } 2488 2489 /* A maximal 100 msec waiting time for the MCP to halt */ 2490 #define ECORE_MCP_HALT_SLEEP_MS 10 2491 #define ECORE_MCP_HALT_MAX_RETRIES 10 2492 2493 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, 2494 struct ecore_ptt *p_ptt) 2495 { 2496 u32 resp = 0, param = 0, cpu_state, cnt = 0; 2497 enum _ecore_status_t rc; 2498 2499 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2500 ¶m); 2501 if (rc != ECORE_SUCCESS) { 2502 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2503 return rc; 2504 } 2505 2506 do { 2507 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); 2508 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2509 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) 2510 break; 2511 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); 2512 2513 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { 2514 DP_NOTICE(p_hwfn, false, 2515 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2516 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); 2517 return ECORE_BUSY; 2518 } 2519 2520 ecore_mcp_cmd_set_blocking(p_hwfn, true); 2521 2522 return ECORE_SUCCESS; 2523 } 2524 2525 #define ECORE_MCP_RESUME_SLEEP_MS 10 2526 2527 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, 2528 struct ecore_ptt *p_ptt) 2529 { 2530 u32 cpu_mode, cpu_state; 2531 2532 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2533 2534 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2535 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; 2536 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); 2537 2538 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); 2539 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2540 2541 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { 2542 DP_NOTICE(p_hwfn, false, 2543 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2544 cpu_mode, cpu_state); 2545 return ECORE_BUSY; 2546 } 2547 2548 ecore_mcp_cmd_set_blocking(p_hwfn, false); 2549 2550 return ECORE_SUCCESS; 2551 } 2552 2553 enum _ecore_status_t 2554 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, 2555 struct ecore_ptt *p_ptt, 2556 enum ecore_ov_client client) 2557 { 2558 u32 resp = 0, param = 0; 2559 u32 drv_mb_param; 2560 enum _ecore_status_t rc; 2561 2562 switch (client) { 2563 case ECORE_OV_CLIENT_DRV: 2564 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; 2565 break; 2566 case ECORE_OV_CLIENT_USER: 2567 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; 2568 break; 2569 case ECORE_OV_CLIENT_VENDOR_SPEC: 2570 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; 2571 break; 2572 default: 2573 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client); 2574 return ECORE_INVAL; 2575 } 2576 2577 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, 2578 drv_mb_param, &resp, ¶m); 2579 if (rc != ECORE_SUCCESS) 2580 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2581 2582 return rc; 2583 } 2584 2585 enum _ecore_status_t 2586 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, 2587 struct ecore_ptt *p_ptt, 2588 enum ecore_ov_driver_state drv_state) 2589 { 2590 u32 resp = 0, param = 0; 2591 u32 drv_mb_param; 2592 enum _ecore_status_t rc; 2593 2594 switch (drv_state) { 2595 case ECORE_OV_DRIVER_STATE_NOT_LOADED: 2596 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; 2597 break; 2598 case ECORE_OV_DRIVER_STATE_DISABLED: 2599 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; 2600 break; 2601 case ECORE_OV_DRIVER_STATE_ACTIVE: 2602 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; 2603 break; 2604 default: 2605 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); 2606 return ECORE_INVAL; 2607 } 2608 2609 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, 2610 drv_mb_param, &resp, ¶m); 2611 if (rc != ECORE_SUCCESS) 2612 DP_ERR(p_hwfn, "Failed to send driver state\n"); 2613 2614 return rc; 2615 } 2616 2617 enum _ecore_status_t 2618 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2619 struct ecore_fc_npiv_tbl *p_table) 2620 { 2621 return 0; 2622 } 2623 2624 enum _ecore_status_t 2625 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, 2626 struct ecore_ptt *p_ptt, u16 mtu) 2627 { 2628 return 0; 2629 } 2630 2631 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, 2632 struct ecore_ptt *p_ptt, 2633 enum ecore_led_mode mode) 2634 { 2635 u32 resp = 0, param = 0, drv_mb_param; 2636 enum _ecore_status_t rc; 2637 2638 switch (mode) { 2639 case ECORE_LED_MODE_ON: 2640 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 2641 break; 2642 case ECORE_LED_MODE_OFF: 2643 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 2644 break; 2645 case ECORE_LED_MODE_RESTORE: 2646 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 2647 break; 2648 default: 2649 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); 2650 return ECORE_INVAL; 2651 } 2652 2653 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 2654 drv_mb_param, &resp, ¶m); 2655 if (rc != ECORE_SUCCESS) 2656 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2657 2658 return rc; 2659 } 2660 2661 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, 2662 struct ecore_ptt *p_ptt, 2663 u32 mask_parities) 2664 { 2665 u32 resp = 0, param = 0; 2666 enum _ecore_status_t rc; 2667 2668 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, 2669 mask_parities, &resp, ¶m); 2670 2671 if (rc != ECORE_SUCCESS) { 2672 DP_ERR(p_hwfn, 2673 "MCP response failure for mask parities, aborting\n"); 2674 } else if (resp != FW_MSG_CODE_OK) { 2675 DP_ERR(p_hwfn, 2676 "MCP did not ack mask parity request. Old MFW?\n"); 2677 rc = ECORE_INVAL; 2678 } 2679 2680 return rc; 2681 } 2682 2683 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, 2684 u8 *p_buf, u32 len) 2685 { 2686 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2687 u32 bytes_left, offset, bytes_to_copy, buf_size; 2688 u32 nvm_offset, resp, param; 2689 struct ecore_ptt *p_ptt; 2690 enum _ecore_status_t rc = ECORE_SUCCESS; 2691 2692 p_ptt = ecore_ptt_acquire(p_hwfn); 2693 if (!p_ptt) 2694 return ECORE_BUSY; 2695 2696 bytes_left = len; 2697 offset = 0; 2698 while (bytes_left > 0) { 2699 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 2700 MCP_DRV_NVM_BUF_LEN); 2701 nvm_offset = (addr + offset) | (bytes_to_copy << 2702 DRV_MB_PARAM_NVM_LEN_OFFSET); 2703 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2704 DRV_MSG_CODE_NVM_READ_NVRAM, 2705 nvm_offset, &resp, ¶m, &buf_size, 2706 (u32 *)(p_buf + offset)); 2707 if (rc != ECORE_SUCCESS) { 2708 DP_NOTICE(p_dev, false, 2709 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", 2710 rc); 2711 resp = FW_MSG_CODE_ERROR; 2712 break; 2713 } 2714 2715 if (resp != FW_MSG_CODE_NVM_OK) { 2716 DP_NOTICE(p_dev, false, 2717 "nvm read failed, resp = 0x%08x\n", resp); 2718 rc = ECORE_UNKNOWN_ERROR; 2719 break; 2720 } 2721 2722 /* This can be a lengthy process, and it's possible scheduler 2723 * isn't preemptible. Sleep a bit to prevent CPU hogging. 2724 */ 2725 if (bytes_left % 0x1000 < 2726 (bytes_left - buf_size) % 0x1000) 2727 OSAL_MSLEEP(1); 2728 2729 offset += buf_size; 2730 bytes_left -= buf_size; 2731 } 2732 2733 p_dev->mcp_nvm_resp = resp; 2734 ecore_ptt_release(p_hwfn, p_ptt); 2735 2736 return rc; 2737 } 2738 2739 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, 2740 u32 addr, u8 *p_buf, u32 len) 2741 { 2742 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2743 struct ecore_ptt *p_ptt; 2744 u32 resp, param; 2745 enum _ecore_status_t rc; 2746 2747 p_ptt = ecore_ptt_acquire(p_hwfn); 2748 if (!p_ptt) 2749 return ECORE_BUSY; 2750 2751 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2752 (cmd == ECORE_PHY_CORE_READ) ? 2753 DRV_MSG_CODE_PHY_CORE_READ : 2754 DRV_MSG_CODE_PHY_RAW_READ, 2755 addr, &resp, ¶m, &len, (u32 *)p_buf); 2756 if (rc != ECORE_SUCCESS) 2757 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 2758 2759 p_dev->mcp_nvm_resp = resp; 2760 ecore_ptt_release(p_hwfn, p_ptt); 2761 2762 return rc; 2763 } 2764 2765 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) 2766 { 2767 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2768 struct ecore_ptt *p_ptt; 2769 2770 p_ptt = ecore_ptt_acquire(p_hwfn); 2771 if (!p_ptt) 2772 return ECORE_BUSY; 2773 2774 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); 2775 ecore_ptt_release(p_hwfn, p_ptt); 2776 2777 return ECORE_SUCCESS; 2778 } 2779 2780 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) 2781 { 2782 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2783 struct ecore_ptt *p_ptt; 2784 u32 resp, param; 2785 enum _ecore_status_t rc; 2786 2787 p_ptt = ecore_ptt_acquire(p_hwfn); 2788 if (!p_ptt) 2789 return ECORE_BUSY; 2790 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, 2791 &resp, ¶m); 2792 p_dev->mcp_nvm_resp = resp; 2793 ecore_ptt_release(p_hwfn, p_ptt); 2794 2795 return rc; 2796 } 2797 2798 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, 2799 u32 addr) 2800 { 2801 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2802 struct ecore_ptt *p_ptt; 2803 u32 resp, param; 2804 enum _ecore_status_t rc; 2805 2806 p_ptt = ecore_ptt_acquire(p_hwfn); 2807 if (!p_ptt) 2808 return ECORE_BUSY; 2809 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, 2810 &resp, ¶m); 2811 p_dev->mcp_nvm_resp = resp; 2812 ecore_ptt_release(p_hwfn, p_ptt); 2813 2814 return rc; 2815 } 2816 2817 /* rc receives ECORE_INVAL as default parameter because 2818 * it might not enter the while loop if the len is 0 2819 */ 2820 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, 2821 u32 addr, u8 *p_buf, u32 len) 2822 { 2823 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param; 2824 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2825 enum _ecore_status_t rc = ECORE_INVAL; 2826 struct ecore_ptt *p_ptt; 2827 2828 p_ptt = ecore_ptt_acquire(p_hwfn); 2829 if (!p_ptt) 2830 return ECORE_BUSY; 2831 2832 switch (cmd) { 2833 case ECORE_PUT_FILE_DATA: 2834 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; 2835 break; 2836 case ECORE_NVM_WRITE_NVRAM: 2837 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; 2838 break; 2839 case ECORE_EXT_PHY_FW_UPGRADE: 2840 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; 2841 break; 2842 default: 2843 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", 2844 cmd); 2845 rc = ECORE_INVAL; 2846 goto out; 2847 } 2848 2849 buf_idx = 0; 2850 while (buf_idx < len) { 2851 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 2852 MCP_DRV_NVM_BUF_LEN); 2853 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | 2854 addr) + 2855 buf_idx; 2856 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, 2857 &resp, ¶m, buf_size, 2858 (u32 *)&p_buf[buf_idx]); 2859 if (rc != ECORE_SUCCESS) { 2860 DP_NOTICE(p_dev, false, 2861 "ecore_mcp_nvm_write() failed, rc = %d\n", 2862 rc); 2863 resp = FW_MSG_CODE_ERROR; 2864 break; 2865 } 2866 2867 if (resp != FW_MSG_CODE_OK && 2868 resp != FW_MSG_CODE_NVM_OK && 2869 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { 2870 DP_NOTICE(p_dev, false, 2871 "nvm write failed, resp = 0x%08x\n", resp); 2872 rc = ECORE_UNKNOWN_ERROR; 2873 break; 2874 } 2875 2876 /* This can be a lengthy process, and it's possible scheduler 2877 * isn't preemptible. Sleep a bit to prevent CPU hogging. 2878 */ 2879 if (buf_idx % 0x1000 > 2880 (buf_idx + buf_size) % 0x1000) 2881 OSAL_MSLEEP(1); 2882 2883 buf_idx += buf_size; 2884 } 2885 2886 p_dev->mcp_nvm_resp = resp; 2887 out: 2888 ecore_ptt_release(p_hwfn, p_ptt); 2889 2890 return rc; 2891 } 2892 2893 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, 2894 u32 addr, u8 *p_buf, u32 len) 2895 { 2896 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2897 struct ecore_ptt *p_ptt; 2898 u32 resp, param, nvm_cmd; 2899 enum _ecore_status_t rc; 2900 2901 p_ptt = ecore_ptt_acquire(p_hwfn); 2902 if (!p_ptt) 2903 return ECORE_BUSY; 2904 2905 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : 2906 DRV_MSG_CODE_PHY_RAW_WRITE; 2907 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, 2908 &resp, ¶m, len, (u32 *)p_buf); 2909 if (rc != ECORE_SUCCESS) 2910 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 2911 p_dev->mcp_nvm_resp = resp; 2912 ecore_ptt_release(p_hwfn, p_ptt); 2913 2914 return rc; 2915 } 2916 2917 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, 2918 u32 addr) 2919 { 2920 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 2921 struct ecore_ptt *p_ptt; 2922 u32 resp, param; 2923 enum _ecore_status_t rc; 2924 2925 p_ptt = ecore_ptt_acquire(p_hwfn); 2926 if (!p_ptt) 2927 return ECORE_BUSY; 2928 2929 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, 2930 &resp, ¶m); 2931 p_dev->mcp_nvm_resp = resp; 2932 ecore_ptt_release(p_hwfn, p_ptt); 2933 2934 return rc; 2935 } 2936 2937 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, 2938 struct ecore_ptt *p_ptt, 2939 u32 port, u32 addr, u32 offset, 2940 u32 len, u8 *p_buf) 2941 { 2942 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; 2943 u32 resp, param; 2944 enum _ecore_status_t rc; 2945 2946 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 2947 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 2948 addr = offset; 2949 offset = 0; 2950 bytes_left = len; 2951 while (bytes_left > 0) { 2952 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 2953 MAX_I2C_TRANSACTION_SIZE); 2954 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 2955 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 2956 nvm_offset |= ((addr + offset) << 2957 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 2958 nvm_offset |= (bytes_to_copy << 2959 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 2960 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 2961 DRV_MSG_CODE_TRANSCEIVER_READ, 2962 nvm_offset, &resp, ¶m, &buf_size, 2963 (u32 *)(p_buf + offset)); 2964 if (rc != ECORE_SUCCESS) { 2965 DP_NOTICE(p_hwfn, false, 2966 "Failed to send a transceiver read command to the MFW. rc = %d.\n", 2967 rc); 2968 return rc; 2969 } 2970 2971 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 2972 return ECORE_NODEV; 2973 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 2974 return ECORE_UNKNOWN_ERROR; 2975 2976 offset += buf_size; 2977 bytes_left -= buf_size; 2978 } 2979 2980 return ECORE_SUCCESS; 2981 } 2982 2983 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, 2984 struct ecore_ptt *p_ptt, 2985 u32 port, u32 addr, u32 offset, 2986 u32 len, u8 *p_buf) 2987 { 2988 u32 buf_idx, buf_size, nvm_offset, resp, param; 2989 enum _ecore_status_t rc; 2990 2991 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 2992 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 2993 buf_idx = 0; 2994 while (buf_idx < len) { 2995 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 2996 MAX_I2C_TRANSACTION_SIZE); 2997 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 2998 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 2999 nvm_offset |= ((offset + buf_idx) << 3000 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 3001 nvm_offset |= (buf_size << 3002 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 3003 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, 3004 DRV_MSG_CODE_TRANSCEIVER_WRITE, 3005 nvm_offset, &resp, ¶m, buf_size, 3006 (u32 *)&p_buf[buf_idx]); 3007 if (rc != ECORE_SUCCESS) { 3008 DP_NOTICE(p_hwfn, false, 3009 "Failed to send a transceiver write command to the MFW. rc = %d.\n", 3010 rc); 3011 return rc; 3012 } 3013 3014 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3015 return ECORE_NODEV; 3016 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3017 return ECORE_UNKNOWN_ERROR; 3018 3019 buf_idx += buf_size; 3020 } 3021 3022 return ECORE_SUCCESS; 3023 } 3024 3025 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, 3026 struct ecore_ptt *p_ptt, 3027 u16 gpio, u32 *gpio_val) 3028 { 3029 enum _ecore_status_t rc = ECORE_SUCCESS; 3030 u32 drv_mb_param = 0, rsp; 3031 3032 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); 3033 3034 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, 3035 drv_mb_param, &rsp, gpio_val); 3036 3037 if (rc != ECORE_SUCCESS) 3038 return rc; 3039 3040 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3041 return ECORE_UNKNOWN_ERROR; 3042 3043 return ECORE_SUCCESS; 3044 } 3045 3046 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, 3047 struct ecore_ptt *p_ptt, 3048 u16 gpio, u16 gpio_val) 3049 { 3050 enum _ecore_status_t rc = ECORE_SUCCESS; 3051 u32 drv_mb_param = 0, param, rsp; 3052 3053 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | 3054 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); 3055 3056 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, 3057 drv_mb_param, &rsp, ¶m); 3058 3059 if (rc != ECORE_SUCCESS) 3060 return rc; 3061 3062 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3063 return ECORE_UNKNOWN_ERROR; 3064 3065 return ECORE_SUCCESS; 3066 } 3067 3068 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, 3069 struct ecore_ptt *p_ptt, 3070 u16 gpio, u32 *gpio_direction, 3071 u32 *gpio_ctrl) 3072 { 3073 u32 drv_mb_param = 0, rsp, val = 0; 3074 enum _ecore_status_t rc = ECORE_SUCCESS; 3075 3076 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; 3077 3078 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, 3079 drv_mb_param, &rsp, &val); 3080 if (rc != ECORE_SUCCESS) 3081 return rc; 3082 3083 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> 3084 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; 3085 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> 3086 DRV_MB_PARAM_GPIO_CTRL_OFFSET; 3087 3088 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3089 return ECORE_UNKNOWN_ERROR; 3090 3091 return ECORE_SUCCESS; 3092 } 3093 3094 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, 3095 struct ecore_ptt *p_ptt) 3096 { 3097 u32 drv_mb_param = 0, rsp, param; 3098 enum _ecore_status_t rc = ECORE_SUCCESS; 3099 3100 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << 3101 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3102 3103 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3104 drv_mb_param, &rsp, ¶m); 3105 3106 if (rc != ECORE_SUCCESS) 3107 return rc; 3108 3109 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3110 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3111 rc = ECORE_UNKNOWN_ERROR; 3112 3113 return rc; 3114 } 3115 3116 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, 3117 struct ecore_ptt *p_ptt) 3118 { 3119 u32 drv_mb_param, rsp, param; 3120 enum _ecore_status_t rc = ECORE_SUCCESS; 3121 3122 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << 3123 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3124 3125 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3126 drv_mb_param, &rsp, ¶m); 3127 3128 if (rc != ECORE_SUCCESS) 3129 return rc; 3130 3131 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3132 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3133 rc = ECORE_UNKNOWN_ERROR; 3134 3135 return rc; 3136 } 3137 3138 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( 3139 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images) 3140 { 3141 u32 drv_mb_param = 0, rsp; 3142 enum _ecore_status_t rc = ECORE_SUCCESS; 3143 3144 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << 3145 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3146 3147 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3148 drv_mb_param, &rsp, num_images); 3149 3150 if (rc != ECORE_SUCCESS) 3151 return rc; 3152 3153 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) 3154 rc = ECORE_UNKNOWN_ERROR; 3155 3156 return rc; 3157 } 3158 3159 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( 3160 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3161 struct bist_nvm_image_att *p_image_att, u32 image_index) 3162 { 3163 u32 buf_size, nvm_offset, resp, param; 3164 enum _ecore_status_t rc; 3165 3166 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << 3167 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3168 nvm_offset |= (image_index << 3169 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); 3170 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3171 nvm_offset, &resp, ¶m, &buf_size, 3172 (u32 *)p_image_att); 3173 if (rc != ECORE_SUCCESS) 3174 return rc; 3175 3176 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3177 (p_image_att->return_code != 1)) 3178 rc = ECORE_UNKNOWN_ERROR; 3179 3180 return rc; 3181 } 3182 3183 enum _ecore_status_t 3184 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, 3185 struct ecore_ptt *p_ptt, 3186 struct ecore_temperature_info *p_temp_info) 3187 { 3188 struct ecore_temperature_sensor *p_temp_sensor; 3189 struct temperature_status_stc mfw_temp_info; 3190 struct ecore_mcp_mb_params mb_params; 3191 u32 val; 3192 enum _ecore_status_t rc; 3193 u8 i; 3194 3195 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3196 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE; 3197 mb_params.p_data_dst = &mfw_temp_info; 3198 mb_params.data_dst_size = sizeof(mfw_temp_info); 3199 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3200 if (rc != ECORE_SUCCESS) 3201 return rc; 3202 3203 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS); 3204 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors, 3205 ECORE_MAX_NUM_OF_SENSORS); 3206 for (i = 0; i < p_temp_info->num_sensors; i++) { 3207 val = mfw_temp_info.sensor[i]; 3208 p_temp_sensor = &p_temp_info->sensors[i]; 3209 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> 3210 SENSOR_LOCATION_OFFSET; 3211 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> 3212 THRESHOLD_HIGH_OFFSET; 3213 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> 3214 CRITICAL_TEMPERATURE_OFFSET; 3215 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> 3216 CURRENT_TEMP_OFFSET; 3217 } 3218 3219 return ECORE_SUCCESS; 3220 } 3221 3222 enum _ecore_status_t ecore_mcp_get_mba_versions( 3223 struct ecore_hwfn *p_hwfn, 3224 struct ecore_ptt *p_ptt, 3225 struct ecore_mba_vers *p_mba_vers) 3226 { 3227 u32 buf_size, resp, param; 3228 enum _ecore_status_t rc; 3229 3230 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, 3231 0, &resp, ¶m, &buf_size, 3232 &p_mba_vers->mba_vers[0]); 3233 3234 if (rc != ECORE_SUCCESS) 3235 return rc; 3236 3237 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 3238 rc = ECORE_UNKNOWN_ERROR; 3239 3240 if (buf_size != MCP_DRV_NVM_BUF_LEN) 3241 rc = ECORE_UNKNOWN_ERROR; 3242 3243 return rc; 3244 } 3245 3246 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, 3247 struct ecore_ptt *p_ptt, 3248 u64 *num_events) 3249 { 3250 u32 rsp; 3251 3252 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS, 3253 0, &rsp, (u32 *)num_events); 3254 } 3255 3256 static enum resource_id_enum 3257 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) 3258 { 3259 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; 3260 3261 switch (res_id) { 3262 case ECORE_SB: 3263 mfw_res_id = RESOURCE_NUM_SB_E; 3264 break; 3265 case ECORE_L2_QUEUE: 3266 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; 3267 break; 3268 case ECORE_VPORT: 3269 mfw_res_id = RESOURCE_NUM_VPORT_E; 3270 break; 3271 case ECORE_RSS_ENG: 3272 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; 3273 break; 3274 case ECORE_PQ: 3275 mfw_res_id = RESOURCE_NUM_PQ_E; 3276 break; 3277 case ECORE_RL: 3278 mfw_res_id = RESOURCE_NUM_RL_E; 3279 break; 3280 case ECORE_MAC: 3281 case ECORE_VLAN: 3282 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3283 mfw_res_id = RESOURCE_VFC_FILTER_E; 3284 break; 3285 case ECORE_ILT: 3286 mfw_res_id = RESOURCE_ILT_E; 3287 break; 3288 case ECORE_LL2_QUEUE: 3289 mfw_res_id = RESOURCE_LL2_QUEUE_E; 3290 break; 3291 case ECORE_RDMA_CNQ_RAM: 3292 case ECORE_CMDQS_CQS: 3293 /* CNQ/CMDQS are the same resource */ 3294 mfw_res_id = RESOURCE_CQS_E; 3295 break; 3296 case ECORE_RDMA_STATS_QUEUE: 3297 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; 3298 break; 3299 case ECORE_BDQ: 3300 mfw_res_id = RESOURCE_BDQ_E; 3301 break; 3302 default: 3303 break; 3304 } 3305 3306 return mfw_res_id; 3307 } 3308 3309 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2 3310 #define ECORE_RESC_ALLOC_VERSION_MINOR 0 3311 #define ECORE_RESC_ALLOC_VERSION \ 3312 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ 3313 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ 3314 (ECORE_RESC_ALLOC_VERSION_MINOR << \ 3315 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) 3316 3317 struct ecore_resc_alloc_in_params { 3318 u32 cmd; 3319 enum ecore_resources res_id; 3320 u32 resc_max_val; 3321 }; 3322 3323 struct ecore_resc_alloc_out_params { 3324 u32 mcp_resp; 3325 u32 mcp_param; 3326 u32 resc_num; 3327 u32 resc_start; 3328 u32 vf_resc_num; 3329 u32 vf_resc_start; 3330 u32 flags; 3331 }; 3332 3333 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100 3334 3335 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) 3336 { 3337 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3338 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 3339 enum _ecore_status_t rc; 3340 3341 /* Allow ongoing PCIe transactions to complete */ 3342 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS); 3343 3344 /* Clear the PF's internal FID_enable in the PXP */ 3345 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); 3346 if (rc != ECORE_SUCCESS) 3347 DP_NOTICE(p_hwfn, false, 3348 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 3349 rc); 3350 3351 return rc; 3352 } 3353 3354 static enum _ecore_status_t 3355 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, 3356 struct ecore_ptt *p_ptt, 3357 struct ecore_resc_alloc_in_params *p_in_params, 3358 struct ecore_resc_alloc_out_params *p_out_params) 3359 { 3360 struct ecore_mcp_mb_params mb_params; 3361 struct resource_info mfw_resc_info; 3362 enum _ecore_status_t rc; 3363 3364 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info)); 3365 3366 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id); 3367 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { 3368 DP_ERR(p_hwfn, 3369 "Failed to match resource %d [%s] with the MFW resources\n", 3370 p_in_params->res_id, 3371 ecore_hw_get_resc_name(p_in_params->res_id)); 3372 return ECORE_INVAL; 3373 } 3374 3375 switch (p_in_params->cmd) { 3376 case DRV_MSG_SET_RESOURCE_VALUE_MSG: 3377 mfw_resc_info.size = p_in_params->resc_max_val; 3378 /* Fallthrough */ 3379 case DRV_MSG_GET_RESOURCE_ALLOC_MSG: 3380 break; 3381 default: 3382 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", 3383 p_in_params->cmd); 3384 return ECORE_INVAL; 3385 } 3386 3387 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3388 mb_params.cmd = p_in_params->cmd; 3389 mb_params.param = ECORE_RESC_ALLOC_VERSION; 3390 mb_params.p_data_src = &mfw_resc_info; 3391 mb_params.data_src_size = sizeof(mfw_resc_info); 3392 mb_params.p_data_dst = mb_params.p_data_src; 3393 mb_params.data_dst_size = mb_params.data_src_size; 3394 3395 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3396 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", 3397 p_in_params->cmd, p_in_params->res_id, 3398 ecore_hw_get_resc_name(p_in_params->res_id), 3399 GET_MFW_FIELD(mb_params.param, 3400 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3401 GET_MFW_FIELD(mb_params.param, 3402 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3403 p_in_params->resc_max_val); 3404 3405 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3406 if (rc != ECORE_SUCCESS) 3407 return rc; 3408 3409 p_out_params->mcp_resp = mb_params.mcp_resp; 3410 p_out_params->mcp_param = mb_params.mcp_param; 3411 p_out_params->resc_num = mfw_resc_info.size; 3412 p_out_params->resc_start = mfw_resc_info.offset; 3413 p_out_params->vf_resc_num = mfw_resc_info.vf_size; 3414 p_out_params->vf_resc_start = mfw_resc_info.vf_offset; 3415 p_out_params->flags = mfw_resc_info.flags; 3416 3417 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3418 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", 3419 GET_MFW_FIELD(p_out_params->mcp_param, 3420 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3421 GET_MFW_FIELD(p_out_params->mcp_param, 3422 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3423 p_out_params->resc_num, p_out_params->resc_start, 3424 p_out_params->vf_resc_num, p_out_params->vf_resc_start, 3425 p_out_params->flags); 3426 3427 return ECORE_SUCCESS; 3428 } 3429 3430 enum _ecore_status_t 3431 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3432 enum ecore_resources res_id, u32 resc_max_val, 3433 u32 *p_mcp_resp) 3434 { 3435 struct ecore_resc_alloc_out_params out_params; 3436 struct ecore_resc_alloc_in_params in_params; 3437 enum _ecore_status_t rc; 3438 3439 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3440 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; 3441 in_params.res_id = res_id; 3442 in_params.resc_max_val = resc_max_val; 3443 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3444 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3445 &out_params); 3446 if (rc != ECORE_SUCCESS) 3447 return rc; 3448 3449 *p_mcp_resp = out_params.mcp_resp; 3450 3451 return ECORE_SUCCESS; 3452 } 3453 3454 enum _ecore_status_t 3455 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3456 enum ecore_resources res_id, u32 *p_mcp_resp, 3457 u32 *p_resc_num, u32 *p_resc_start) 3458 { 3459 struct ecore_resc_alloc_out_params out_params; 3460 struct ecore_resc_alloc_in_params in_params; 3461 enum _ecore_status_t rc; 3462 3463 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3464 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; 3465 in_params.res_id = res_id; 3466 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3467 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3468 &out_params); 3469 if (rc != ECORE_SUCCESS) 3470 return rc; 3471 3472 *p_mcp_resp = out_params.mcp_resp; 3473 3474 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3475 *p_resc_num = out_params.resc_num; 3476 *p_resc_start = out_params.resc_start; 3477 } 3478 3479 return ECORE_SUCCESS; 3480 } 3481 3482 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, 3483 struct ecore_ptt *p_ptt) 3484 { 3485 u32 mcp_resp, mcp_param; 3486 3487 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, 3488 &mcp_resp, &mcp_param); 3489 } 3490 3491 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, 3492 struct ecore_ptt *p_ptt, 3493 u32 param, u32 *p_mcp_resp, 3494 u32 *p_mcp_param) 3495 { 3496 enum _ecore_status_t rc; 3497 3498 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, 3499 p_mcp_resp, p_mcp_param); 3500 if (rc != ECORE_SUCCESS) 3501 return rc; 3502 3503 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3504 DP_INFO(p_hwfn, 3505 "The resource command is unsupported by the MFW\n"); 3506 return ECORE_NOTIMPL; 3507 } 3508 3509 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { 3510 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); 3511 3512 DP_NOTICE(p_hwfn, false, 3513 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", 3514 param, opcode); 3515 return ECORE_INVAL; 3516 } 3517 3518 return rc; 3519 } 3520 3521 enum _ecore_status_t 3522 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3523 struct ecore_resc_lock_params *p_params) 3524 { 3525 u32 param = 0, mcp_resp, mcp_param; 3526 u8 opcode; 3527 enum _ecore_status_t rc; 3528 3529 switch (p_params->timeout) { 3530 case ECORE_MCP_RESC_LOCK_TO_DEFAULT: 3531 opcode = RESOURCE_OPCODE_REQ; 3532 p_params->timeout = 0; 3533 break; 3534 case ECORE_MCP_RESC_LOCK_TO_NONE: 3535 opcode = RESOURCE_OPCODE_REQ_WO_AGING; 3536 p_params->timeout = 0; 3537 break; 3538 default: 3539 opcode = RESOURCE_OPCODE_REQ_W_AGING; 3540 break; 3541 } 3542 3543 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3544 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3545 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); 3546 3547 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3548 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", 3549 param, p_params->timeout, opcode, p_params->resource); 3550 3551 /* Attempt to acquire the resource */ 3552 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3553 &mcp_param); 3554 if (rc != ECORE_SUCCESS) 3555 return rc; 3556 3557 /* Analyze the response */ 3558 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); 3559 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3560 3561 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3562 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", 3563 mcp_param, opcode, p_params->owner); 3564 3565 switch (opcode) { 3566 case RESOURCE_OPCODE_GNT: 3567 p_params->b_granted = true; 3568 break; 3569 case RESOURCE_OPCODE_BUSY: 3570 p_params->b_granted = false; 3571 break; 3572 default: 3573 DP_NOTICE(p_hwfn, false, 3574 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", 3575 mcp_param, opcode); 3576 return ECORE_INVAL; 3577 } 3578 3579 return ECORE_SUCCESS; 3580 } 3581 3582 enum _ecore_status_t 3583 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3584 struct ecore_resc_lock_params *p_params) 3585 { 3586 u32 retry_cnt = 0; 3587 enum _ecore_status_t rc; 3588 3589 do { 3590 /* No need for an interval before the first iteration */ 3591 if (retry_cnt) { 3592 if (p_params->sleep_b4_retry) { 3593 u16 retry_interval_in_ms = 3594 DIV_ROUND_UP(p_params->retry_interval, 3595 1000); 3596 3597 OSAL_MSLEEP(retry_interval_in_ms); 3598 } else { 3599 OSAL_UDELAY(p_params->retry_interval); 3600 } 3601 } 3602 3603 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params); 3604 if (rc != ECORE_SUCCESS) 3605 return rc; 3606 3607 if (p_params->b_granted) 3608 break; 3609 } while (retry_cnt++ < p_params->retry_num); 3610 3611 return ECORE_SUCCESS; 3612 } 3613 3614 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, 3615 struct ecore_resc_unlock_params *p_unlock, 3616 enum ecore_resc_lock resource, 3617 bool b_is_permanent) 3618 { 3619 if (p_lock != OSAL_NULL) { 3620 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); 3621 3622 /* Permanent resources don't require aging, and there's no 3623 * point in trying to acquire them more than once since it's 3624 * unexpected another entity would release them. 3625 */ 3626 if (b_is_permanent) { 3627 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; 3628 } else { 3629 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; 3630 p_lock->retry_interval = 3631 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; 3632 p_lock->sleep_b4_retry = true; 3633 } 3634 3635 p_lock->resource = resource; 3636 } 3637 3638 if (p_unlock != OSAL_NULL) { 3639 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); 3640 p_unlock->resource = resource; 3641 } 3642 } 3643 3644 enum _ecore_status_t 3645 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3646 struct ecore_resc_unlock_params *p_params) 3647 { 3648 u32 param = 0, mcp_resp, mcp_param; 3649 u8 opcode; 3650 enum _ecore_status_t rc; 3651 3652 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE 3653 : RESOURCE_OPCODE_RELEASE; 3654 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3655 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3656 3657 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3658 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", 3659 param, opcode, p_params->resource); 3660 3661 /* Attempt to release the resource */ 3662 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3663 &mcp_param); 3664 if (rc != ECORE_SUCCESS) 3665 return rc; 3666 3667 /* Analyze the response */ 3668 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3669 3670 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3671 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", 3672 mcp_param, opcode); 3673 3674 switch (opcode) { 3675 case RESOURCE_OPCODE_RELEASED_PREVIOUS: 3676 DP_INFO(p_hwfn, 3677 "Resource unlock request for an already released resource [%d]\n", 3678 p_params->resource); 3679 /* Fallthrough */ 3680 case RESOURCE_OPCODE_RELEASED: 3681 p_params->b_released = true; 3682 break; 3683 case RESOURCE_OPCODE_WRONG_OWNER: 3684 p_params->b_released = false; 3685 break; 3686 default: 3687 DP_NOTICE(p_hwfn, false, 3688 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", 3689 mcp_param, opcode); 3690 return ECORE_INVAL; 3691 } 3692 3693 return ECORE_SUCCESS; 3694 } 3695 3696 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn) 3697 { 3698 return !!(p_hwfn->mcp_info->capabilities & 3699 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); 3700 } 3701 3702 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, 3703 struct ecore_ptt *p_ptt) 3704 { 3705 u32 mcp_resp; 3706 enum _ecore_status_t rc; 3707 3708 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 3709 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); 3710 if (rc == ECORE_SUCCESS) 3711 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE), 3712 "MFW supported features: %08x\n", 3713 p_hwfn->mcp_info->capabilities); 3714 3715 return rc; 3716 } 3717 3718 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, 3719 struct ecore_ptt *p_ptt) 3720 { 3721 u32 mcp_resp, mcp_param, features; 3722 3723 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | 3724 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | 3725 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; 3726 3727 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, 3728 features, &mcp_resp, &mcp_param); 3729 } 3730 3731 enum _ecore_status_t 3732 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3733 struct ecore_mcp_drv_attr *p_drv_attr) 3734 { 3735 struct attribute_cmd_write_stc attr_cmd_write; 3736 enum _attribute_commands_e mfw_attr_cmd; 3737 struct ecore_mcp_mb_params mb_params; 3738 enum _ecore_status_t rc; 3739 3740 switch (p_drv_attr->attr_cmd) { 3741 case ECORE_MCP_DRV_ATTR_CMD_READ: 3742 mfw_attr_cmd = ATTRIBUTE_CMD_READ; 3743 break; 3744 case ECORE_MCP_DRV_ATTR_CMD_WRITE: 3745 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; 3746 break; 3747 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: 3748 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; 3749 break; 3750 case ECORE_MCP_DRV_ATTR_CMD_CLEAR: 3751 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; 3752 break; 3753 default: 3754 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", 3755 p_drv_attr->attr_cmd); 3756 return ECORE_INVAL; 3757 } 3758 3759 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3760 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; 3761 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, 3762 p_drv_attr->attr_num); 3763 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, 3764 mfw_attr_cmd); 3765 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { 3766 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); 3767 attr_cmd_write.val = p_drv_attr->val; 3768 attr_cmd_write.mask = p_drv_attr->mask; 3769 attr_cmd_write.offset = p_drv_attr->offset; 3770 3771 mb_params.p_data_src = &attr_cmd_write; 3772 mb_params.data_src_size = sizeof(attr_cmd_write); 3773 } 3774 3775 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3776 if (rc != ECORE_SUCCESS) 3777 return rc; 3778 3779 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3780 DP_INFO(p_hwfn, 3781 "The attribute command is not supported by the MFW\n"); 3782 return ECORE_NOTIMPL; 3783 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { 3784 DP_INFO(p_hwfn, 3785 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", 3786 mb_params.mcp_resp, p_drv_attr->attr_cmd, 3787 p_drv_attr->attr_num); 3788 return ECORE_INVAL; 3789 } 3790 3791 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3792 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", 3793 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, 3794 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, 3795 mb_params.mcp_param); 3796 3797 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || 3798 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) 3799 p_drv_attr->val = mb_params.mcp_param; 3800 3801 return ECORE_SUCCESS; 3802 } 3803 3804 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3805 u32 offset, u32 val) 3806 { 3807 struct ecore_mcp_mb_params mb_params = {0}; 3808 enum _ecore_status_t rc = ECORE_SUCCESS; 3809 u32 dword = val; 3810 3811 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; 3812 mb_params.param = offset; 3813 mb_params.p_data_src = &dword; 3814 mb_params.data_src_size = sizeof(dword); 3815 3816 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3817 if (rc != ECORE_SUCCESS) { 3818 DP_NOTICE(p_hwfn, false, 3819 "Failed to wol write request, rc = %d\n", rc); 3820 } 3821 3822 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { 3823 DP_NOTICE(p_hwfn, false, 3824 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", 3825 val, offset, mb_params.mcp_resp); 3826 rc = ECORE_UNKNOWN_ERROR; 3827 } 3828 } 3829