1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "bcm_osal.h" 8 #include "ecore.h" 9 #include "ecore_status.h" 10 #include "nvm_cfg.h" 11 #include "ecore_mcp.h" 12 #include "mcp_public.h" 13 #include "reg_addr.h" 14 #include "ecore_hw.h" 15 #include "ecore_init_fw_funcs.h" 16 #include "ecore_sriov.h" 17 #include "ecore_vf.h" 18 #include "ecore_iov_api.h" 19 #include "ecore_gtt_reg_addr.h" 20 #include "ecore_iro.h" 21 #include "ecore_dcbx.h" 22 #include "ecore_sp_commands.h" 23 #include "ecore_cxt.h" 24 25 #define GRCBASE_MCP 0xe00000 26 27 #define ECORE_MCP_RESP_ITER_US 10 28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 29 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 30 31 #ifndef ASIC_ONLY 32 /* Non-ASIC: 33 * The waiting interval is multiplied by 100 to reduce the impact of the 34 * built-in delay of 100usec in each ecore_rd(). 35 * In addition, a factor of 4 comparing to ASIC is applied. 36 */ 37 #define ECORE_EMUL_MCP_RESP_ITER_US (ECORE_MCP_RESP_ITER_US * 100) 38 #define ECORE_EMUL_DRV_MB_MAX_RETRIES ((ECORE_DRV_MB_MAX_RETRIES / 100) * 4) 39 #define ECORE_EMUL_MCP_RESET_RETRIES ((ECORE_MCP_RESET_RETRIES / 100) * 4) 40 #endif 41 42 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 43 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ 44 _val) 45 46 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 47 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) 48 49 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 50 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 51 OFFSETOF(struct public_drv_mb, _field), _val) 52 53 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 54 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 55 OFFSETOF(struct public_drv_mb, _field)) 56 57 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 58 DRV_ID_PDA_COMP_VER_OFFSET) 59 60 #define MCP_BYTES_PER_MBIT_OFFSET 17 61 62 #ifndef ASIC_ONLY 63 static int loaded; 64 static int loaded_port[MAX_NUM_PORTS] = { 0 }; 65 #endif 66 67 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) 68 { 69 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 70 return false; 71 return true; 72 } 73 74 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 75 { 76 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 77 PUBLIC_PORT); 78 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); 79 80 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 81 MFW_PORT(p_hwfn)); 82 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 83 "port_addr = 0x%x, port_id 0x%02x\n", 84 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 85 } 86 87 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 88 { 89 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 90 OSAL_BE32 tmp; 91 u32 i; 92 93 #ifndef ASIC_ONLY 94 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) 95 return; 96 #endif 97 98 if (!p_hwfn->mcp_info->public_base) 99 return; 100 101 for (i = 0; i < length; i++) { 102 tmp = ecore_rd(p_hwfn, p_ptt, 103 p_hwfn->mcp_info->mfw_mb_addr + 104 (i << 2) + sizeof(u32)); 105 106 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 107 OSAL_BE32_TO_CPU(tmp); 108 } 109 } 110 111 struct ecore_mcp_cmd_elem { 112 osal_list_entry_t list; 113 struct ecore_mcp_mb_params *p_mb_params; 114 u16 expected_seq_num; 115 bool b_is_completed; 116 }; 117 118 /* Must be called while cmd_lock is acquired */ 119 static struct ecore_mcp_cmd_elem * 120 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn, 121 struct ecore_mcp_mb_params *p_mb_params, 122 u16 expected_seq_num) 123 { 124 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 125 126 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 127 sizeof(*p_cmd_elem)); 128 if (!p_cmd_elem) { 129 DP_NOTICE(p_hwfn, false, 130 "Failed to allocate `struct ecore_mcp_cmd_elem'\n"); 131 goto out; 132 } 133 134 p_cmd_elem->p_mb_params = p_mb_params; 135 p_cmd_elem->expected_seq_num = expected_seq_num; 136 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 137 out: 138 return p_cmd_elem; 139 } 140 141 /* Must be called while cmd_lock is acquired */ 142 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn, 143 struct ecore_mcp_cmd_elem *p_cmd_elem) 144 { 145 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 146 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem); 147 } 148 149 /* Must be called while cmd_lock is acquired */ 150 static struct ecore_mcp_cmd_elem * 151 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num) 152 { 153 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 154 155 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list, 156 struct ecore_mcp_cmd_elem) { 157 if (p_cmd_elem->expected_seq_num == seq_num) 158 return p_cmd_elem; 159 } 160 161 return OSAL_NULL; 162 } 163 164 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) 165 { 166 if (p_hwfn->mcp_info) { 167 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; 168 169 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); 170 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); 171 172 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 173 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, 174 &p_hwfn->mcp_info->cmd_list, list, 175 struct ecore_mcp_cmd_elem) { 176 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 177 } 178 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 179 180 #ifdef CONFIG_ECORE_LOCK_ALLOC 181 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); 182 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); 183 #endif 184 } 185 186 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 187 188 return ECORE_SUCCESS; 189 } 190 191 /* Maximum of 1 sec to wait for the SHMEM ready indication */ 192 #define ECORE_MCP_SHMEM_RDY_MAX_RETRIES 20 193 #define ECORE_MCP_SHMEM_RDY_ITER_MS 50 194 195 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, 196 struct ecore_ptt *p_ptt) 197 { 198 struct ecore_mcp_info *p_info = p_hwfn->mcp_info; 199 u32 drv_mb_offsize, mfw_mb_offsize, val; 200 u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES; 201 u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS; 202 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 203 204 val = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE); 205 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 206 if (!p_info->public_base) { 207 DP_NOTICE(p_hwfn, false, 208 "The address of the MCP scratch-pad is not configured\n"); 209 #ifndef ASIC_ONLY 210 /* Zeroed "public_base" implies no MFW */ 211 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 212 DP_INFO(p_hwfn, "Emulation: Assume no MFW\n"); 213 #endif 214 return ECORE_INVAL; 215 } 216 217 p_info->public_base |= GRCBASE_MCP; 218 219 /* Get the MFW MB address and number of supported messages */ 220 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, 221 SECTION_OFFSIZE_ADDR(p_info->public_base, 222 PUBLIC_MFW_MB)); 223 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 224 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, 225 p_info->mfw_mb_addr); 226 227 /* @@@TBD: 228 * The driver can notify that there was an MCP reset, and read the SHMEM 229 * values before the MFW has completed initializing them. 230 * As a temporary solution, the "sup_msgs" field is used as a data ready 231 * indication. 232 * This should be replaced with an actual indication when it is provided 233 * by the MFW. 234 */ 235 while (!p_info->mfw_mb_length && cnt--) { 236 OSAL_MSLEEP(msec); 237 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, 238 p_info->mfw_mb_addr); 239 } 240 241 if (!cnt) { 242 DP_NOTICE(p_hwfn, false, 243 "Failed to get the SHMEM ready notification after %d msec\n", 244 ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec); 245 return ECORE_TIMEOUT; 246 } 247 248 /* Calculate the driver and MFW mailbox address */ 249 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, 250 SECTION_OFFSIZE_ADDR(p_info->public_base, 251 PUBLIC_DRV_MB)); 252 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 253 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 254 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x" 255 " mcp_pf_id = 0x%x\n", 256 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 257 258 /* Get the current driver mailbox sequence before sending 259 * the first command 260 */ 261 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 262 DRV_MSG_SEQ_NUMBER_MASK; 263 264 /* Get current FW pulse sequence */ 265 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 266 DRV_PULSE_SEQ_MASK; 267 268 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 269 270 return ECORE_SUCCESS; 271 } 272 273 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, 274 struct ecore_ptt *p_ptt) 275 { 276 struct ecore_mcp_info *p_info; 277 u32 size; 278 279 /* Allocate mcp_info structure */ 280 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 281 sizeof(*p_hwfn->mcp_info)); 282 if (!p_hwfn->mcp_info) { 283 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); 284 return ECORE_NOMEM; 285 } 286 p_info = p_hwfn->mcp_info; 287 288 /* Initialize the MFW spinlocks */ 289 #ifdef CONFIG_ECORE_LOCK_ALLOC 290 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { 291 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 292 return ECORE_NOMEM; 293 } 294 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { 295 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); 296 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); 297 return ECORE_NOMEM; 298 } 299 #endif 300 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); 301 OSAL_SPIN_LOCK_INIT(&p_info->link_lock); 302 303 OSAL_LIST_INIT(&p_info->cmd_list); 304 305 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { 306 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); 307 /* Do not free mcp_info here, since "public_base" indicates that 308 * the MCP is not initialized 309 */ 310 return ECORE_SUCCESS; 311 } 312 313 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 314 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 315 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 316 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 317 goto err; 318 319 return ECORE_SUCCESS; 320 321 err: 322 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); 323 ecore_mcp_free(p_hwfn); 324 return ECORE_NOMEM; 325 } 326 327 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn, 328 struct ecore_ptt *p_ptt) 329 { 330 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 331 332 /* Use MCP history register to check if MCP reset occurred between init 333 * time and now. 334 */ 335 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { 336 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 337 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", 338 p_hwfn->mcp_info->mcp_hist, generic_por_0); 339 340 ecore_load_mcp_offsets(p_hwfn, p_ptt); 341 ecore_mcp_cmd_port_init(p_hwfn, p_ptt); 342 } 343 } 344 345 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, 346 struct ecore_ptt *p_ptt) 347 { 348 u32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0; 349 u32 retries = ECORE_MCP_RESET_RETRIES; 350 enum _ecore_status_t rc = ECORE_SUCCESS; 351 352 #ifndef ASIC_ONLY 353 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 354 delay = ECORE_EMUL_MCP_RESP_ITER_US; 355 retries = ECORE_EMUL_MCP_RESET_RETRIES; 356 } 357 #endif 358 if (p_hwfn->mcp_info->b_block_cmd) { 359 DP_NOTICE(p_hwfn, false, 360 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); 361 return ECORE_ABORTED; 362 } 363 364 /* Ensure that only a single thread is accessing the mailbox */ 365 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 366 367 prev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 368 369 /* Set drv command along with the updated sequence */ 370 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 371 seq = ++p_hwfn->mcp_info->drv_mb_seq; 372 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); 373 374 /* Give the MFW up to 500 second (50*1000*10usec) to resume */ 375 do { 376 OSAL_UDELAY(delay); 377 378 if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) != 379 prev_generic_por_0) 380 break; 381 } while (cnt++ < retries); 382 383 if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) != 384 prev_generic_por_0) { 385 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 386 "MCP was reset after %d usec\n", cnt * delay); 387 } else { 388 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 389 rc = ECORE_AGAIN; 390 } 391 392 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 393 394 return rc; 395 } 396 397 #ifndef ASIC_ONLY 398 static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn, 399 struct ecore_mcp_mb_params *p_mb_params) 400 { 401 if (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) != 402 1 /* ECORE_LOAD_REQ_HSI_VER_1 */) { 403 p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1; 404 return; 405 } 406 407 if (!loaded) 408 p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE; 409 else if (!loaded_port[p_hwfn->port_id]) 410 p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT; 411 else 412 p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION; 413 414 /* On CMT, always tell that it's engine */ 415 if (ECORE_IS_CMT(p_hwfn->p_dev)) 416 p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE; 417 418 loaded++; 419 loaded_port[p_hwfn->port_id]++; 420 421 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 422 "Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\n", 423 p_mb_params->mcp_resp, loaded, p_hwfn->port_id, 424 loaded_port[p_hwfn->port_id]); 425 } 426 427 static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn) 428 { 429 loaded--; 430 loaded_port[p_hwfn->port_id]--; 431 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded); 432 } 433 434 static enum _ecore_status_t 435 ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn, 436 struct ecore_mcp_mb_params *p_mb_params) 437 { 438 if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 439 return ECORE_INVAL; 440 441 switch (p_mb_params->cmd) { 442 case DRV_MSG_CODE_LOAD_REQ: 443 ecore_emul_mcp_load_req(p_hwfn, p_mb_params); 444 break; 445 case DRV_MSG_CODE_UNLOAD_REQ: 446 ecore_emul_mcp_unload_req(p_hwfn); 447 break; 448 case DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT: 449 case DRV_MSG_CODE_RESOURCE_CMD: 450 case DRV_MSG_CODE_MDUMP_CMD: 451 case DRV_MSG_CODE_GET_ENGINE_CONFIG: 452 case DRV_MSG_CODE_GET_PPFID_BITMAP: 453 return ECORE_NOTIMPL; 454 default: 455 break; 456 } 457 458 return ECORE_SUCCESS; 459 } 460 #endif 461 462 /* Must be called while cmd_lock is acquired */ 463 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn) 464 { 465 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; 466 467 /* There is at most one pending command at a certain time, and if it 468 * exists - it is placed at the HEAD of the list. 469 */ 470 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) { 471 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list, 472 struct ecore_mcp_cmd_elem, 473 list); 474 return !p_cmd_elem->b_is_completed; 475 } 476 477 return false; 478 } 479 480 /* Must be called while cmd_lock is acquired */ 481 static enum _ecore_status_t 482 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 483 { 484 struct ecore_mcp_mb_params *p_mb_params; 485 struct ecore_mcp_cmd_elem *p_cmd_elem; 486 u32 mcp_resp; 487 u16 seq_num; 488 489 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 490 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); 491 492 /* Return if no new non-handled response has been received */ 493 if (seq_num != p_hwfn->mcp_info->drv_mb_seq) 494 return ECORE_AGAIN; 495 496 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num); 497 if (!p_cmd_elem) { 498 DP_ERR(p_hwfn, 499 "Failed to find a pending mailbox cmd that expects sequence number %d\n", 500 seq_num); 501 return ECORE_UNKNOWN_ERROR; 502 } 503 504 p_mb_params = p_cmd_elem->p_mb_params; 505 506 /* Get the MFW response along with the sequence number */ 507 p_mb_params->mcp_resp = mcp_resp; 508 509 /* Get the MFW param */ 510 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 511 512 /* Get the union data */ 513 if (p_mb_params->p_data_dst != OSAL_NULL && 514 p_mb_params->data_dst_size) { 515 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 516 OFFSETOF(struct public_drv_mb, 517 union_data); 518 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 519 union_data_addr, p_mb_params->data_dst_size); 520 } 521 522 p_cmd_elem->b_is_completed = true; 523 524 return ECORE_SUCCESS; 525 } 526 527 /* Must be called while cmd_lock is acquired */ 528 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 529 struct ecore_ptt *p_ptt, 530 struct ecore_mcp_mb_params *p_mb_params, 531 u16 seq_num) 532 { 533 union drv_union_data union_data; 534 u32 union_data_addr; 535 536 /* Set the union data */ 537 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 538 OFFSETOF(struct public_drv_mb, union_data); 539 OSAL_MEM_ZERO(&union_data, sizeof(union_data)); 540 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size) 541 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src, 542 p_mb_params->data_src_size); 543 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, 544 sizeof(union_data)); 545 546 /* Set the drv param */ 547 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); 548 549 /* Set the drv command along with the sequence number */ 550 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); 551 552 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 553 "MFW mailbox: command 0x%08x param 0x%08x\n", 554 (p_mb_params->cmd | seq_num), p_mb_params->param); 555 } 556 557 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, 558 bool block_cmd) 559 { 560 p_hwfn->mcp_info->b_block_cmd = block_cmd; 561 562 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", 563 block_cmd ? "Block" : "Unblock"); 564 } 565 566 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, 567 struct ecore_ptt *p_ptt) 568 { 569 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; 570 u32 delay = ECORE_MCP_RESP_ITER_US; 571 572 #ifndef ASIC_ONLY 573 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 574 delay = ECORE_EMUL_MCP_RESP_ITER_US; 575 #endif 576 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 577 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 578 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 579 OSAL_UDELAY(delay); 580 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 581 OSAL_UDELAY(delay); 582 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 583 584 DP_NOTICE(p_hwfn, false, 585 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", 586 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); 587 } 588 589 static enum _ecore_status_t 590 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 591 struct ecore_mcp_mb_params *p_mb_params, 592 u32 max_retries, u32 delay) 593 { 594 struct ecore_mcp_cmd_elem *p_cmd_elem; 595 u32 cnt = 0; 596 u16 seq_num; 597 enum _ecore_status_t rc = ECORE_SUCCESS; 598 599 /* Wait until the mailbox is non-occupied */ 600 do { 601 /* Exit the loop if there is no pending command, or if the 602 * pending command is completed during this iteration. 603 * The spinlock stays locked until the command is sent. 604 */ 605 606 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 607 608 if (!ecore_mcp_has_pending_cmd(p_hwfn)) 609 break; 610 611 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 612 if (rc == ECORE_SUCCESS) 613 break; 614 else if (rc != ECORE_AGAIN) 615 goto err; 616 617 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 618 OSAL_UDELAY(delay); 619 OSAL_MFW_CMD_PREEMPT(p_hwfn); 620 } while (++cnt < max_retries); 621 622 if (cnt >= max_retries) { 623 DP_NOTICE(p_hwfn, false, 624 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", 625 p_mb_params->cmd, p_mb_params->param); 626 return ECORE_AGAIN; 627 } 628 629 /* Send the mailbox command */ 630 ecore_mcp_reread_offsets(p_hwfn, p_ptt); 631 seq_num = ++p_hwfn->mcp_info->drv_mb_seq; 632 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); 633 if (!p_cmd_elem) { 634 rc = ECORE_NOMEM; 635 goto err; 636 } 637 638 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); 639 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 640 641 /* Wait for the MFW response */ 642 do { 643 /* Exit the loop if the command is already completed, or if the 644 * command is completed during this iteration. 645 * The spinlock stays locked until the list element is removed. 646 */ 647 648 OSAL_UDELAY(delay); 649 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 650 651 if (p_cmd_elem->b_is_completed) 652 break; 653 654 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); 655 if (rc == ECORE_SUCCESS) 656 break; 657 else if (rc != ECORE_AGAIN) 658 goto err; 659 660 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 661 OSAL_MFW_CMD_PREEMPT(p_hwfn); 662 } while (++cnt < max_retries); 663 664 if (cnt >= max_retries) { 665 DP_NOTICE(p_hwfn, false, 666 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 667 p_mb_params->cmd, p_mb_params->param); 668 ecore_mcp_print_cpu_info(p_hwfn, p_ptt); 669 670 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); 671 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 672 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 673 674 ecore_mcp_cmd_set_blocking(p_hwfn, true); 675 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); 676 return ECORE_AGAIN; 677 } 678 679 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 680 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 681 682 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, 683 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 684 p_mb_params->mcp_resp, p_mb_params->mcp_param, 685 (cnt * delay) / 1000, (cnt * delay) % 1000); 686 687 /* Clear the sequence number from the MFW response */ 688 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 689 690 return ECORE_SUCCESS; 691 692 err: 693 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); 694 return rc; 695 } 696 697 static enum _ecore_status_t 698 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, 699 struct ecore_ptt *p_ptt, 700 struct ecore_mcp_mb_params *p_mb_params) 701 { 702 osal_size_t union_data_size = sizeof(union drv_union_data); 703 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES; 704 u32 usecs = ECORE_MCP_RESP_ITER_US; 705 706 #ifndef ASIC_ONLY 707 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) 708 return ecore_emul_mcp_cmd(p_hwfn, p_mb_params); 709 710 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 711 max_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES; 712 usecs = ECORE_EMUL_MCP_RESP_ITER_US; 713 } 714 #endif 715 if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { 716 max_retries = DIV_ROUND_UP(max_retries, 1000); 717 usecs *= 1000; 718 } 719 720 /* MCP not initialized */ 721 if (!ecore_mcp_is_init(p_hwfn)) { 722 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); 723 return ECORE_BUSY; 724 } 725 726 if (p_mb_params->data_src_size > union_data_size || 727 p_mb_params->data_dst_size > union_data_size) { 728 DP_ERR(p_hwfn, 729 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", 730 p_mb_params->data_src_size, p_mb_params->data_dst_size, 731 union_data_size); 732 return ECORE_INVAL; 733 } 734 735 if (p_hwfn->mcp_info->b_block_cmd) { 736 DP_NOTICE(p_hwfn, false, 737 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", 738 p_mb_params->cmd, p_mb_params->param); 739 return ECORE_ABORTED; 740 } 741 742 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 743 usecs); 744 } 745 746 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, 747 struct ecore_ptt *p_ptt, u32 cmd, u32 param, 748 u32 *o_mcp_resp, u32 *o_mcp_param) 749 { 750 struct ecore_mcp_mb_params mb_params; 751 enum _ecore_status_t rc; 752 753 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 754 mb_params.cmd = cmd; 755 mb_params.param = param; 756 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 757 if (rc != ECORE_SUCCESS) 758 return rc; 759 760 *o_mcp_resp = mb_params.mcp_resp; 761 *o_mcp_param = mb_params.mcp_param; 762 763 return ECORE_SUCCESS; 764 } 765 766 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, 767 struct ecore_ptt *p_ptt, 768 u32 cmd, 769 u32 param, 770 u32 *o_mcp_resp, 771 u32 *o_mcp_param, 772 u32 i_txn_size, u32 *i_buf) 773 { 774 struct ecore_mcp_mb_params mb_params; 775 enum _ecore_status_t rc; 776 777 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 778 mb_params.cmd = cmd; 779 mb_params.param = param; 780 mb_params.p_data_src = i_buf; 781 mb_params.data_src_size = (u8)i_txn_size; 782 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 783 if (rc != ECORE_SUCCESS) 784 return rc; 785 786 *o_mcp_resp = mb_params.mcp_resp; 787 *o_mcp_param = mb_params.mcp_param; 788 789 return ECORE_SUCCESS; 790 } 791 792 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, 793 struct ecore_ptt *p_ptt, 794 u32 cmd, 795 u32 param, 796 u32 *o_mcp_resp, 797 u32 *o_mcp_param, 798 u32 *o_txn_size, u32 *o_buf) 799 { 800 struct ecore_mcp_mb_params mb_params; 801 u8 raw_data[MCP_DRV_NVM_BUF_LEN]; 802 enum _ecore_status_t rc; 803 804 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 805 mb_params.cmd = cmd; 806 mb_params.param = param; 807 mb_params.p_data_dst = raw_data; 808 809 /* Use the maximal value since the actual one is part of the response */ 810 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; 811 812 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 813 if (rc != ECORE_SUCCESS) 814 return rc; 815 816 *o_mcp_resp = mb_params.mcp_resp; 817 *o_mcp_param = mb_params.mcp_param; 818 819 *o_txn_size = *o_mcp_param; 820 /* @DPDK */ 821 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN)); 822 823 return ECORE_SUCCESS; 824 } 825 826 static bool 827 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, 828 enum ecore_override_force_load override_force_load) 829 { 830 bool can_force_load = false; 831 832 switch (override_force_load) { 833 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS: 834 can_force_load = true; 835 break; 836 case ECORE_OVERRIDE_FORCE_LOAD_NEVER: 837 can_force_load = false; 838 break; 839 default: 840 can_force_load = (drv_role == DRV_ROLE_OS && 841 exist_drv_role == DRV_ROLE_PREBOOT) || 842 (drv_role == DRV_ROLE_KDUMP && 843 exist_drv_role == DRV_ROLE_OS); 844 break; 845 } 846 847 return can_force_load; 848 } 849 850 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, 851 struct ecore_ptt *p_ptt) 852 { 853 u32 resp = 0, param = 0; 854 enum _ecore_status_t rc; 855 856 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, 857 &resp, ¶m); 858 if (rc != ECORE_SUCCESS) 859 DP_NOTICE(p_hwfn, false, 860 "Failed to send cancel load request, rc = %d\n", rc); 861 862 return rc; 863 } 864 865 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) 866 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) 867 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) 868 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) 869 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) 870 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) 871 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) 872 873 static u32 ecore_get_config_bitmap(void) 874 { 875 u32 config_bitmap = 0x0; 876 877 #ifdef CONFIG_ECORE_L2 878 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; 879 #endif 880 #ifdef CONFIG_ECORE_SRIOV 881 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; 882 #endif 883 #ifdef CONFIG_ECORE_ROCE 884 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; 885 #endif 886 #ifdef CONFIG_ECORE_IWARP 887 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; 888 #endif 889 #ifdef CONFIG_ECORE_FCOE 890 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; 891 #endif 892 #ifdef CONFIG_ECORE_ISCSI 893 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; 894 #endif 895 #ifdef CONFIG_ECORE_LL2 896 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; 897 #endif 898 899 return config_bitmap; 900 } 901 902 struct ecore_load_req_in_params { 903 u8 hsi_ver; 904 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 905 #define ECORE_LOAD_REQ_HSI_VER_1 1 906 u32 drv_ver_0; 907 u32 drv_ver_1; 908 u32 fw_ver; 909 u8 drv_role; 910 u8 timeout_val; 911 u8 force_cmd; 912 bool avoid_eng_reset; 913 }; 914 915 struct ecore_load_req_out_params { 916 u32 load_code; 917 u32 exist_drv_ver_0; 918 u32 exist_drv_ver_1; 919 u32 exist_fw_ver; 920 u8 exist_drv_role; 921 u8 mfw_hsi_ver; 922 bool drv_exists; 923 }; 924 925 static enum _ecore_status_t 926 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 927 struct ecore_load_req_in_params *p_in_params, 928 struct ecore_load_req_out_params *p_out_params) 929 { 930 struct ecore_mcp_mb_params mb_params; 931 struct load_req_stc load_req; 932 struct load_rsp_stc load_rsp; 933 u32 hsi_ver; 934 enum _ecore_status_t rc; 935 936 OSAL_MEM_ZERO(&load_req, sizeof(load_req)); 937 load_req.drv_ver_0 = p_in_params->drv_ver_0; 938 load_req.drv_ver_1 = p_in_params->drv_ver_1; 939 load_req.fw_ver = p_in_params->fw_ver; 940 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); 941 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, 942 p_in_params->timeout_val); 943 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); 944 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, 945 p_in_params->avoid_eng_reset); 946 947 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? 948 DRV_ID_MCP_HSI_VER_CURRENT : 949 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); 950 951 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 952 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 953 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; 954 mb_params.p_data_src = &load_req; 955 mb_params.data_src_size = sizeof(load_req); 956 mb_params.p_data_dst = &load_rsp; 957 mb_params.data_dst_size = sizeof(load_rsp); 958 959 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 960 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 961 mb_params.param, 962 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), 963 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), 964 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), 965 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); 966 967 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) 968 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 969 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", 970 load_req.drv_ver_0, load_req.drv_ver_1, 971 load_req.fw_ver, load_req.misc0, 972 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), 973 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), 974 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), 975 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); 976 977 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 978 if (rc != ECORE_SUCCESS) { 979 DP_NOTICE(p_hwfn, false, 980 "Failed to send load request, rc = %d\n", rc); 981 return rc; 982 } 983 984 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 985 "Load Response: resp 0x%08x\n", mb_params.mcp_resp); 986 p_out_params->load_code = mb_params.mcp_resp; 987 988 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 989 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 990 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 991 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", 992 load_rsp.drv_ver_0, load_rsp.drv_ver_1, 993 load_rsp.fw_ver, load_rsp.misc0, 994 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), 995 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), 996 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); 997 998 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; 999 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; 1000 p_out_params->exist_fw_ver = load_rsp.fw_ver; 1001 p_out_params->exist_drv_role = 1002 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); 1003 p_out_params->mfw_hsi_ver = 1004 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); 1005 p_out_params->drv_exists = 1006 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & 1007 LOAD_RSP_FLAGS0_DRV_EXISTS; 1008 } 1009 1010 return ECORE_SUCCESS; 1011 } 1012 1013 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, 1014 u8 *p_mfw_drv_role) 1015 { 1016 switch (drv_role) { 1017 case ECORE_DRV_ROLE_OS: 1018 *p_mfw_drv_role = DRV_ROLE_OS; 1019 break; 1020 case ECORE_DRV_ROLE_KDUMP: 1021 *p_mfw_drv_role = DRV_ROLE_KDUMP; 1022 break; 1023 } 1024 } 1025 1026 enum ecore_load_req_force { 1027 ECORE_LOAD_REQ_FORCE_NONE, 1028 ECORE_LOAD_REQ_FORCE_PF, 1029 ECORE_LOAD_REQ_FORCE_ALL, 1030 }; 1031 1032 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, 1033 u8 *p_mfw_force_cmd) 1034 { 1035 switch (force_cmd) { 1036 case ECORE_LOAD_REQ_FORCE_NONE: 1037 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; 1038 break; 1039 case ECORE_LOAD_REQ_FORCE_PF: 1040 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; 1041 break; 1042 case ECORE_LOAD_REQ_FORCE_ALL: 1043 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; 1044 break; 1045 } 1046 } 1047 1048 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, 1049 struct ecore_ptt *p_ptt, 1050 struct ecore_load_req_params *p_params) 1051 { 1052 struct ecore_load_req_out_params out_params; 1053 struct ecore_load_req_in_params in_params; 1054 u8 mfw_drv_role = 0, mfw_force_cmd; 1055 enum _ecore_status_t rc; 1056 1057 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 1058 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; 1059 in_params.drv_ver_0 = ECORE_VERSION; 1060 in_params.drv_ver_1 = ecore_get_config_bitmap(); 1061 in_params.fw_ver = STORM_FW_VERSION; 1062 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); 1063 in_params.drv_role = mfw_drv_role; 1064 in_params.timeout_val = p_params->timeout_val; 1065 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); 1066 in_params.force_cmd = mfw_force_cmd; 1067 in_params.avoid_eng_reset = p_params->avoid_eng_reset; 1068 1069 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1070 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); 1071 if (rc != ECORE_SUCCESS) 1072 return rc; 1073 1074 /* First handle cases where another load request should/might be sent: 1075 * - MFW expects the old interface [HSI version = 1] 1076 * - MFW responds that a force load request is required 1077 */ 1078 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 1079 DP_INFO(p_hwfn, 1080 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); 1081 1082 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; 1083 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1084 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1085 &out_params); 1086 if (rc != ECORE_SUCCESS) 1087 return rc; 1088 } else if (out_params.load_code == 1089 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { 1090 if (ecore_mcp_can_force_load(in_params.drv_role, 1091 out_params.exist_drv_role, 1092 p_params->override_force_load)) { 1093 DP_INFO(p_hwfn, 1094 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n", 1095 in_params.drv_role, in_params.fw_ver, 1096 in_params.drv_ver_0, in_params.drv_ver_1, 1097 out_params.exist_drv_role, 1098 out_params.exist_fw_ver, 1099 out_params.exist_drv_ver_0, 1100 out_params.exist_drv_ver_1); 1101 1102 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, 1103 &mfw_force_cmd); 1104 1105 in_params.force_cmd = mfw_force_cmd; 1106 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 1107 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, 1108 &out_params); 1109 if (rc != ECORE_SUCCESS) 1110 return rc; 1111 } else { 1112 DP_NOTICE(p_hwfn, false, 1113 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", 1114 in_params.drv_role, in_params.fw_ver, 1115 in_params.drv_ver_0, in_params.drv_ver_1, 1116 out_params.exist_drv_role, 1117 out_params.exist_fw_ver, 1118 out_params.exist_drv_ver_0, 1119 out_params.exist_drv_ver_1); 1120 1121 ecore_mcp_cancel_load_req(p_hwfn, p_ptt); 1122 return ECORE_BUSY; 1123 } 1124 } 1125 1126 /* Now handle the other types of responses. 1127 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not 1128 * expected here after the additional revised load requests were sent. 1129 */ 1130 switch (out_params.load_code) { 1131 case FW_MSG_CODE_DRV_LOAD_ENGINE: 1132 case FW_MSG_CODE_DRV_LOAD_PORT: 1133 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1134 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && 1135 out_params.drv_exists) { 1136 /* The role and fw/driver version match, but the PF is 1137 * already loaded and has not been unloaded gracefully. 1138 * This is unexpected since a quasi-FLR request was 1139 * previously sent as part of ecore_hw_prepare(). 1140 */ 1141 DP_NOTICE(p_hwfn, false, 1142 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); 1143 return ECORE_INVAL; 1144 } 1145 break; 1146 default: 1147 DP_NOTICE(p_hwfn, false, 1148 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", 1149 out_params.load_code); 1150 return ECORE_BUSY; 1151 } 1152 1153 p_params->load_code = out_params.load_code; 1154 1155 return ECORE_SUCCESS; 1156 } 1157 1158 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, 1159 struct ecore_ptt *p_ptt) 1160 { 1161 u32 resp = 0, param = 0; 1162 enum _ecore_status_t rc; 1163 1164 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, 1165 ¶m); 1166 if (rc != ECORE_SUCCESS) { 1167 DP_NOTICE(p_hwfn, false, 1168 "Failed to send a LOAD_DONE command, rc = %d\n", rc); 1169 return rc; 1170 } 1171 1172 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 1173 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 1174 DP_NOTICE(p_hwfn, false, 1175 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 1176 1177 return ECORE_SUCCESS; 1178 } 1179 1180 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, 1181 struct ecore_ptt *p_ptt) 1182 { 1183 u32 wol_param, mcp_resp, mcp_param; 1184 1185 /* @DPDK */ 1186 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1187 1188 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1189 &mcp_resp, &mcp_param); 1190 } 1191 1192 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, 1193 struct ecore_ptt *p_ptt) 1194 { 1195 struct ecore_mcp_mb_params mb_params; 1196 struct mcp_mac wol_mac; 1197 1198 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1199 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; 1200 1201 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1202 } 1203 1204 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, 1205 struct ecore_ptt *p_ptt) 1206 { 1207 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1208 PUBLIC_PATH); 1209 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1210 u32 path_addr = SECTION_ADDR(mfw_path_offsize, 1211 ECORE_PATH_ID(p_hwfn)); 1212 u32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS]; 1213 int i; 1214 1215 OSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES); 1216 1217 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1218 "Reading Disabled VF information from [offset %08x]," 1219 " path_addr %08x\n", 1220 mfw_path_offsize, path_addr); 1221 1222 for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) { 1223 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, 1224 path_addr + 1225 OFFSETOF(struct public_path, 1226 mcp_vf_disabled) + 1227 sizeof(u32) * i); 1228 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1229 "FLR-ed VFs [%08x,...,%08x] - %08x\n", 1230 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); 1231 } 1232 1233 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) 1234 OSAL_VF_FLR_UPDATE(p_hwfn); 1235 } 1236 1237 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, 1238 struct ecore_ptt *p_ptt, 1239 u32 *vfs_to_ack) 1240 { 1241 struct ecore_mcp_mb_params mb_params; 1242 enum _ecore_status_t rc; 1243 u16 i; 1244 1245 for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) 1246 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), 1247 "Acking VFs [%08x,...,%08x] - %08x\n", 1248 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); 1249 1250 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1251 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; 1252 mb_params.p_data_src = vfs_to_ack; 1253 mb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES; 1254 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, 1255 &mb_params); 1256 if (rc != ECORE_SUCCESS) { 1257 DP_NOTICE(p_hwfn, false, 1258 "Failed to pass ACK for VF flr to MFW\n"); 1259 return ECORE_TIMEOUT; 1260 } 1261 1262 return rc; 1263 } 1264 1265 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, 1266 struct ecore_ptt *p_ptt) 1267 { 1268 u32 transceiver_state; 1269 1270 transceiver_state = ecore_rd(p_hwfn, p_ptt, 1271 p_hwfn->mcp_info->port_addr + 1272 OFFSETOF(struct public_port, 1273 transceiver_data)); 1274 1275 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), 1276 "Received transceiver state update [0x%08x] from mfw" 1277 " [Addr 0x%x]\n", 1278 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + 1279 OFFSETOF(struct public_port, 1280 transceiver_data))); 1281 1282 transceiver_state = GET_MFW_FIELD(transceiver_state, 1283 ETH_TRANSCEIVER_STATE); 1284 1285 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 1286 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); 1287 else 1288 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); 1289 1290 OSAL_TRANSCEIVER_UPDATE(p_hwfn); 1291 } 1292 1293 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, 1294 struct ecore_ptt *p_ptt, 1295 struct ecore_mcp_link_state *p_link) 1296 { 1297 u32 eee_status, val; 1298 1299 p_link->eee_adv_caps = 0; 1300 p_link->eee_lp_adv_caps = 0; 1301 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1302 OFFSETOF(struct public_port, eee_status)); 1303 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); 1304 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; 1305 if (val & EEE_1G_ADV) 1306 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; 1307 if (val & EEE_10G_ADV) 1308 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; 1309 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; 1310 if (val & EEE_1G_ADV) 1311 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; 1312 if (val & EEE_10G_ADV) 1313 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; 1314 } 1315 1316 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, 1317 struct ecore_ptt *p_ptt, 1318 struct public_func *p_data, 1319 int pfid) 1320 { 1321 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1322 PUBLIC_FUNC); 1323 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1324 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 1325 u32 i, size; 1326 1327 OSAL_MEM_ZERO(p_data, sizeof(*p_data)); 1328 1329 size = OSAL_MIN_T(u32, sizeof(*p_data), 1330 SECTION_SIZE(mfw_path_offsize)); 1331 for (i = 0; i < size / sizeof(u32); i++) 1332 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, 1333 func_addr + (i << 2)); 1334 1335 return size; 1336 } 1337 1338 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, 1339 struct ecore_ptt *p_ptt, 1340 bool b_reset) 1341 { 1342 struct ecore_mcp_link_state *p_link; 1343 u8 max_bw, min_bw; 1344 u32 status = 0; 1345 1346 /* Prevent SW/attentions from doing this at the same time */ 1347 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); 1348 1349 p_link = &p_hwfn->mcp_info->link_output; 1350 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1351 if (!b_reset) { 1352 status = ecore_rd(p_hwfn, p_ptt, 1353 p_hwfn->mcp_info->port_addr + 1354 OFFSETOF(struct public_port, link_status)); 1355 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), 1356 "Received link update [0x%08x] from mfw" 1357 " [Addr 0x%x]\n", 1358 status, (u32)(p_hwfn->mcp_info->port_addr + 1359 OFFSETOF(struct public_port, 1360 link_status))); 1361 } else { 1362 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1363 "Resetting link indications\n"); 1364 goto out; 1365 } 1366 1367 if (p_hwfn->b_drv_link_init) { 1368 /* Link indication with modern MFW arrives as per-PF 1369 * indication. 1370 */ 1371 if (p_hwfn->mcp_info->capabilities & 1372 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { 1373 struct public_func shmem_info; 1374 1375 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1376 MCP_PF_ID(p_hwfn)); 1377 p_link->link_up = !!(shmem_info.status & 1378 FUNC_STATUS_VIRTUAL_LINK_UP); 1379 } else { 1380 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 1381 } 1382 } else { 1383 p_link->link_up = false; 1384 } 1385 1386 p_link->full_duplex = true; 1387 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 1388 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 1389 p_link->speed = 100000; 1390 break; 1391 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 1392 p_link->speed = 50000; 1393 break; 1394 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 1395 p_link->speed = 40000; 1396 break; 1397 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 1398 p_link->speed = 25000; 1399 break; 1400 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 1401 p_link->speed = 20000; 1402 break; 1403 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 1404 p_link->speed = 10000; 1405 break; 1406 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 1407 p_link->full_duplex = false; 1408 /* Fall-through */ 1409 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 1410 p_link->speed = 1000; 1411 break; 1412 default: 1413 p_link->speed = 0; 1414 } 1415 1416 /* We never store total line speed as p_link->speed is 1417 * again changes according to bandwidth allocation. 1418 */ 1419 if (p_link->link_up && p_link->speed) 1420 p_link->line_speed = p_link->speed; 1421 else 1422 p_link->line_speed = 0; 1423 1424 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; 1425 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; 1426 1427 /* Max bandwidth configuration */ 1428 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, 1429 p_link, max_bw); 1430 1431 /* Min bandwidth configuration */ 1432 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, 1433 p_link, min_bw); 1434 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, 1435 p_link->min_pf_rate); 1436 1437 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 1438 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 1439 p_link->parallel_detection = !!(status & 1440 LINK_STATUS_PARALLEL_DETECTION_USED); 1441 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 1442 1443 p_link->partner_adv_speed |= 1444 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 1445 ECORE_LINK_PARTNER_SPEED_1G_FD : 0; 1446 p_link->partner_adv_speed |= 1447 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 1448 ECORE_LINK_PARTNER_SPEED_1G_HD : 0; 1449 p_link->partner_adv_speed |= 1450 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 1451 ECORE_LINK_PARTNER_SPEED_10G : 0; 1452 p_link->partner_adv_speed |= 1453 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 1454 ECORE_LINK_PARTNER_SPEED_20G : 0; 1455 p_link->partner_adv_speed |= 1456 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? 1457 ECORE_LINK_PARTNER_SPEED_25G : 0; 1458 p_link->partner_adv_speed |= 1459 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 1460 ECORE_LINK_PARTNER_SPEED_40G : 0; 1461 p_link->partner_adv_speed |= 1462 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 1463 ECORE_LINK_PARTNER_SPEED_50G : 0; 1464 p_link->partner_adv_speed |= 1465 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 1466 ECORE_LINK_PARTNER_SPEED_100G : 0; 1467 1468 p_link->partner_tx_flow_ctrl_en = 1469 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 1470 p_link->partner_rx_flow_ctrl_en = 1471 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 1472 1473 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 1474 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 1475 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; 1476 break; 1477 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 1478 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; 1479 break; 1480 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 1481 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; 1482 break; 1483 default: 1484 p_link->partner_adv_pause = 0; 1485 } 1486 1487 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 1488 1489 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) 1490 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); 1491 1492 OSAL_LINK_UPDATE(p_hwfn); 1493 out: 1494 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); 1495 } 1496 1497 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, 1498 struct ecore_ptt *p_ptt, bool b_up) 1499 { 1500 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 1501 struct ecore_mcp_mb_params mb_params; 1502 struct eth_phy_cfg phy_cfg; 1503 enum _ecore_status_t rc = ECORE_SUCCESS; 1504 u32 cmd; 1505 1506 #ifndef ASIC_ONLY 1507 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 1508 if (b_up) 1509 OSAL_LINK_UPDATE(p_hwfn); 1510 return ECORE_SUCCESS; 1511 } 1512 #endif 1513 1514 /* Set the shmem configuration according to params */ 1515 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg)); 1516 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 1517 if (!params->speed.autoneg) 1518 phy_cfg.speed = params->speed.forced_speed; 1519 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; 1520 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; 1521 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1522 phy_cfg.adv_speed = params->speed.advertised_speeds; 1523 phy_cfg.loopback_mode = params->loopback_mode; 1524 1525 /* There are MFWs that share this capability regardless of whether 1526 * this is feasible or not. And given that at the very least adv_caps 1527 * would be set internally by ecore, we want to make sure LFA would 1528 * still work. 1529 */ 1530 if ((p_hwfn->mcp_info->capabilities & 1531 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && 1532 params->eee.enable) { 1533 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1534 if (params->eee.tx_lpi_enable) 1535 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1536 if (params->eee.adv_caps & ECORE_EEE_1G_ADV) 1537 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; 1538 if (params->eee.adv_caps & ECORE_EEE_10G_ADV) 1539 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; 1540 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << 1541 EEE_TX_TIMER_USEC_OFFSET) & 1542 EEE_TX_TIMER_USEC_MASK; 1543 } 1544 1545 p_hwfn->b_drv_link_init = b_up; 1546 1547 if (b_up) 1548 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 1549 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n", 1550 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, 1551 phy_cfg.loopback_mode); 1552 else 1553 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); 1554 1555 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1556 mb_params.cmd = cmd; 1557 mb_params.p_data_src = &phy_cfg; 1558 mb_params.data_src_size = sizeof(phy_cfg); 1559 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1560 1561 /* if mcp fails to respond we must abort */ 1562 if (rc != ECORE_SUCCESS) { 1563 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1564 return rc; 1565 } 1566 1567 /* Mimic link-change attention, done for several reasons: 1568 * - On reset, there's no guarantee MFW would trigger 1569 * an attention. 1570 * - On initialization, older MFWs might not indicate link change 1571 * during LFA, so we'll never get an UP indication. 1572 */ 1573 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); 1574 1575 return ECORE_SUCCESS; 1576 } 1577 1578 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, 1579 struct ecore_ptt *p_ptt) 1580 { 1581 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; 1582 1583 /* TODO - Add support for VFs */ 1584 if (IS_VF(p_hwfn->p_dev)) 1585 return ECORE_INVAL; 1586 1587 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1588 PUBLIC_PATH); 1589 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); 1590 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); 1591 1592 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, 1593 path_addr + 1594 OFFSETOF(struct public_path, process_kill)) & 1595 PROCESS_KILL_COUNTER_MASK; 1596 1597 return proc_kill_cnt; 1598 } 1599 1600 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, 1601 struct ecore_ptt *p_ptt) 1602 { 1603 struct ecore_dev *p_dev = p_hwfn->p_dev; 1604 u32 proc_kill_cnt; 1605 1606 /* Prevent possible attentions/interrupts during the recovery handling 1607 * and till its load phase, during which they will be re-enabled. 1608 */ 1609 ecore_int_igu_disable_int(p_hwfn, p_ptt); 1610 1611 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); 1612 1613 /* The following operations should be done once, and thus in CMT mode 1614 * are carried out by only the first HW function. 1615 */ 1616 if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) 1617 return; 1618 1619 if (p_dev->recov_in_prog) { 1620 DP_NOTICE(p_hwfn, false, 1621 "Ignoring the indication since a recovery" 1622 " process is already in progress\n"); 1623 return; 1624 } 1625 1626 p_dev->recov_in_prog = true; 1627 1628 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); 1629 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); 1630 1631 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); 1632 } 1633 1634 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, 1635 struct ecore_ptt *p_ptt, 1636 enum MFW_DRV_MSG_TYPE type) 1637 { 1638 enum ecore_mcp_protocol_type stats_type; 1639 union ecore_mcp_protocol_stats stats; 1640 struct ecore_mcp_mb_params mb_params; 1641 u32 hsi_param; 1642 enum _ecore_status_t rc; 1643 1644 switch (type) { 1645 case MFW_DRV_MSG_GET_LAN_STATS: 1646 stats_type = ECORE_MCP_LAN_STATS; 1647 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; 1648 break; 1649 default: 1650 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1651 "Invalid protocol type %d\n", type); 1652 return; 1653 } 1654 1655 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); 1656 1657 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1658 mb_params.cmd = DRV_MSG_CODE_GET_STATS; 1659 mb_params.param = hsi_param; 1660 mb_params.p_data_src = &stats; 1661 mb_params.data_src_size = sizeof(stats); 1662 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1663 if (rc != ECORE_SUCCESS) 1664 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc); 1665 } 1666 1667 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, 1668 struct public_func *p_shmem_info) 1669 { 1670 struct ecore_mcp_function_info *p_info; 1671 1672 p_info = &p_hwfn->mcp_info->func_info; 1673 1674 /* TODO - bandwidth min/max should have valid values of 1-100, 1675 * as well as some indication that the feature is disabled. 1676 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS 1677 * limit and correct value to min `1' and max `100' if limit isn't in 1678 * range. 1679 */ 1680 p_info->bandwidth_min = (p_shmem_info->config & 1681 FUNC_MF_CFG_MIN_BW_MASK) >> 1682 FUNC_MF_CFG_MIN_BW_OFFSET; 1683 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { 1684 DP_INFO(p_hwfn, 1685 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 1686 p_info->bandwidth_min); 1687 p_info->bandwidth_min = 1; 1688 } 1689 1690 p_info->bandwidth_max = (p_shmem_info->config & 1691 FUNC_MF_CFG_MAX_BW_MASK) >> 1692 FUNC_MF_CFG_MAX_BW_OFFSET; 1693 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { 1694 DP_INFO(p_hwfn, 1695 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 1696 p_info->bandwidth_max); 1697 p_info->bandwidth_max = 100; 1698 } 1699 } 1700 1701 static void 1702 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1703 { 1704 struct ecore_mcp_function_info *p_info; 1705 struct public_func shmem_info; 1706 u32 resp = 0, param = 0; 1707 1708 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1709 1710 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 1711 1712 p_info = &p_hwfn->mcp_info->func_info; 1713 1714 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); 1715 1716 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); 1717 1718 /* Acknowledge the MFW */ 1719 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, 1720 ¶m); 1721 } 1722 1723 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn, 1724 struct ecore_ptt *p_ptt) 1725 { 1726 struct public_func shmem_info; 1727 u32 resp = 0, param = 0; 1728 1729 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1730 MCP_PF_ID(p_hwfn)); 1731 1732 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & 1733 FUNC_MF_CFG_OV_STAG_MASK; 1734 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; 1735 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) { 1736 if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) { 1737 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 1738 p_hwfn->hw_info.ovlan); 1739 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); 1740 1741 /* Configure DB to add external vlan to EDPM packets */ 1742 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); 1743 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID, 1744 p_hwfn->hw_info.ovlan); 1745 } else { 1746 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); 1747 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); 1748 1749 /* Configure DB to add external vlan to EDPM packets */ 1750 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); 1751 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID, 0); 1752 } 1753 1754 ecore_sp_pf_update_stag(p_hwfn); 1755 } 1756 1757 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", 1758 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); 1759 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN); 1760 1761 /* Acknowledge the MFW */ 1762 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, 1763 &resp, ¶m); 1764 } 1765 1766 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) 1767 { 1768 /* A single notification should be sent to upper driver in CMT mode */ 1769 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1770 return; 1771 1772 DP_NOTICE(p_hwfn, false, 1773 "Fan failure was detected on the network interface card" 1774 " and it's going to be shut down.\n"); 1775 1776 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); 1777 } 1778 1779 struct ecore_mdump_cmd_params { 1780 u32 cmd; 1781 void *p_data_src; 1782 u8 data_src_size; 1783 void *p_data_dst; 1784 u8 data_dst_size; 1785 u32 mcp_resp; 1786 }; 1787 1788 static enum _ecore_status_t 1789 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1790 struct ecore_mdump_cmd_params *p_mdump_cmd_params) 1791 { 1792 struct ecore_mcp_mb_params mb_params; 1793 enum _ecore_status_t rc; 1794 1795 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 1796 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; 1797 mb_params.param = p_mdump_cmd_params->cmd; 1798 mb_params.p_data_src = p_mdump_cmd_params->p_data_src; 1799 mb_params.data_src_size = p_mdump_cmd_params->data_src_size; 1800 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; 1801 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; 1802 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1803 if (rc != ECORE_SUCCESS) 1804 return rc; 1805 1806 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; 1807 1808 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { 1809 DP_INFO(p_hwfn, 1810 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", 1811 p_mdump_cmd_params->cmd); 1812 rc = ECORE_NOTIMPL; 1813 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 1814 DP_INFO(p_hwfn, 1815 "The mdump command is not supported by the MFW\n"); 1816 rc = ECORE_NOTIMPL; 1817 } 1818 1819 return rc; 1820 } 1821 1822 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn, 1823 struct ecore_ptt *p_ptt) 1824 { 1825 struct ecore_mdump_cmd_params mdump_cmd_params; 1826 1827 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1828 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; 1829 1830 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1831 } 1832 1833 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, 1834 struct ecore_ptt *p_ptt, 1835 u32 epoch) 1836 { 1837 struct ecore_mdump_cmd_params mdump_cmd_params; 1838 1839 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1840 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES; 1841 mdump_cmd_params.p_data_src = &epoch; 1842 mdump_cmd_params.data_src_size = sizeof(epoch); 1843 1844 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1845 } 1846 1847 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, 1848 struct ecore_ptt *p_ptt) 1849 { 1850 struct ecore_mdump_cmd_params mdump_cmd_params; 1851 1852 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1853 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER; 1854 1855 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1856 } 1857 1858 static enum _ecore_status_t 1859 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1860 struct mdump_config_stc *p_mdump_config) 1861 { 1862 struct ecore_mdump_cmd_params mdump_cmd_params; 1863 enum _ecore_status_t rc; 1864 1865 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1866 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG; 1867 mdump_cmd_params.p_data_dst = p_mdump_config; 1868 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config); 1869 1870 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1871 if (rc != ECORE_SUCCESS) 1872 return rc; 1873 1874 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1875 DP_INFO(p_hwfn, 1876 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n", 1877 mdump_cmd_params.mcp_resp); 1878 rc = ECORE_UNKNOWN_ERROR; 1879 } 1880 1881 return rc; 1882 } 1883 1884 enum _ecore_status_t 1885 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1886 struct ecore_mdump_info *p_mdump_info) 1887 { 1888 u32 addr, global_offsize, global_addr; 1889 struct mdump_config_stc mdump_config; 1890 enum _ecore_status_t rc; 1891 1892 #ifndef ASIC_ONLY 1893 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { 1894 DP_INFO(p_hwfn, "Emulation: Can't get mdump info\n"); 1895 return ECORE_NOTIMPL; 1896 } 1897 #endif 1898 1899 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info)); 1900 1901 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1902 PUBLIC_GLOBAL); 1903 global_offsize = ecore_rd(p_hwfn, p_ptt, addr); 1904 global_addr = SECTION_ADDR(global_offsize, 0); 1905 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt, 1906 global_addr + 1907 OFFSETOF(struct public_global, 1908 mdump_reason)); 1909 1910 if (p_mdump_info->reason) { 1911 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config); 1912 if (rc != ECORE_SUCCESS) 1913 return rc; 1914 1915 p_mdump_info->version = mdump_config.version; 1916 p_mdump_info->config = mdump_config.config; 1917 p_mdump_info->epoch = mdump_config.epoc; 1918 p_mdump_info->num_of_logs = mdump_config.num_of_logs; 1919 p_mdump_info->valid_logs = mdump_config.valid_logs; 1920 1921 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1922 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n", 1923 p_mdump_info->reason, p_mdump_info->version, 1924 p_mdump_info->config, p_mdump_info->epoch, 1925 p_mdump_info->num_of_logs, p_mdump_info->valid_logs); 1926 } else { 1927 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1928 "MFW mdump info: reason %d\n", p_mdump_info->reason); 1929 } 1930 1931 return ECORE_SUCCESS; 1932 } 1933 1934 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, 1935 struct ecore_ptt *p_ptt) 1936 { 1937 struct ecore_mdump_cmd_params mdump_cmd_params; 1938 1939 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1940 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS; 1941 1942 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1943 } 1944 1945 enum _ecore_status_t 1946 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1947 struct ecore_mdump_retain_data *p_mdump_retain) 1948 { 1949 struct ecore_mdump_cmd_params mdump_cmd_params; 1950 struct mdump_retain_data_stc mfw_mdump_retain; 1951 enum _ecore_status_t rc; 1952 1953 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1954 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; 1955 mdump_cmd_params.p_data_dst = &mfw_mdump_retain; 1956 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain); 1957 1958 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1959 if (rc != ECORE_SUCCESS) 1960 return rc; 1961 1962 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1963 DP_INFO(p_hwfn, 1964 "Failed to get the mdump retained data [mcp_resp 0x%x]\n", 1965 mdump_cmd_params.mcp_resp); 1966 return ECORE_UNKNOWN_ERROR; 1967 } 1968 1969 p_mdump_retain->valid = mfw_mdump_retain.valid; 1970 p_mdump_retain->epoch = mfw_mdump_retain.epoch; 1971 p_mdump_retain->pf = mfw_mdump_retain.pf; 1972 p_mdump_retain->status = mfw_mdump_retain.status; 1973 1974 return ECORE_SUCCESS; 1975 } 1976 1977 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, 1978 struct ecore_ptt *p_ptt) 1979 { 1980 struct ecore_mdump_cmd_params mdump_cmd_params; 1981 1982 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); 1983 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN; 1984 1985 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1986 } 1987 1988 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, 1989 struct ecore_ptt *p_ptt) 1990 { 1991 struct ecore_mdump_retain_data mdump_retain; 1992 enum _ecore_status_t rc; 1993 1994 /* In CMT mode - no need for more than a single acknowledgment to the 1995 * MFW, and no more than a single notification to the upper driver. 1996 */ 1997 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) 1998 return; 1999 2000 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); 2001 if (rc == ECORE_SUCCESS && mdump_retain.valid) { 2002 DP_NOTICE(p_hwfn, false, 2003 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", 2004 mdump_retain.epoch, mdump_retain.pf, 2005 mdump_retain.status); 2006 } else { 2007 DP_NOTICE(p_hwfn, false, 2008 "The MFW notified that a critical error occurred in the device\n"); 2009 } 2010 2011 if (p_hwfn->p_dev->allow_mdump) { 2012 DP_NOTICE(p_hwfn, false, 2013 "Not acknowledging the notification to allow the MFW crash dump\n"); 2014 return; 2015 } 2016 2017 DP_NOTICE(p_hwfn, false, 2018 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); 2019 ecore_mcp_mdump_ack(p_hwfn, p_ptt); 2020 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 2021 } 2022 2023 void 2024 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2025 { 2026 struct public_func shmem_info; 2027 u32 port_cfg, val; 2028 2029 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) 2030 return; 2031 2032 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); 2033 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 2034 OFFSETOF(struct public_port, oem_cfg_port)); 2035 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); 2036 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) 2037 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", 2038 val); 2039 2040 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); 2041 if (val == OEM_CFG_SCHED_TYPE_ETS) 2042 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; 2043 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) 2044 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; 2045 else 2046 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", 2047 val); 2048 2049 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 2050 MCP_PF_ID(p_hwfn)); 2051 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); 2052 p_hwfn->ufp_info.tc = (u8)val; 2053 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, 2054 OEM_CFG_FUNC_HOST_PRI_CTRL); 2055 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) 2056 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; 2057 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) 2058 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; 2059 else 2060 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", 2061 val); 2062 2063 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2064 "UFP shmem config: mode = %d tc = %d pri_type = %d\n", 2065 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, 2066 p_hwfn->ufp_info.pri_type); 2067 } 2068 2069 static enum _ecore_status_t 2070 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2071 { 2072 ecore_mcp_read_ufp_config(p_hwfn, p_ptt); 2073 2074 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { 2075 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; 2076 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; 2077 2078 ecore_qm_reconf(p_hwfn, p_ptt); 2079 } else { 2080 /* Merge UFP TC with the dcbx TC data */ 2081 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2082 ECORE_DCBX_OPERATIONAL_MIB); 2083 } 2084 2085 /* update storm FW with negotiation results */ 2086 ecore_sp_pf_update_ufp(p_hwfn); 2087 2088 /* update stag pcp value */ 2089 ecore_sp_pf_update_stag(p_hwfn); 2090 2091 return ECORE_SUCCESS; 2092 } 2093 2094 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, 2095 struct ecore_ptt *p_ptt) 2096 { 2097 struct ecore_mcp_info *info = p_hwfn->mcp_info; 2098 enum _ecore_status_t rc = ECORE_SUCCESS; 2099 bool found = false; 2100 u16 i; 2101 2102 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); 2103 2104 /* Read Messages from MFW */ 2105 ecore_mcp_read_mb(p_hwfn, p_ptt); 2106 2107 /* Compare current messages to old ones */ 2108 for (i = 0; i < info->mfw_mb_length; i++) { 2109 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 2110 continue; 2111 2112 found = true; 2113 2114 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, 2115 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 2116 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 2117 2118 switch (i) { 2119 case MFW_DRV_MSG_LINK_CHANGE: 2120 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); 2121 break; 2122 case MFW_DRV_MSG_VF_DISABLED: 2123 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); 2124 break; 2125 case MFW_DRV_MSG_LLDP_DATA_UPDATED: 2126 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2127 ECORE_DCBX_REMOTE_LLDP_MIB); 2128 break; 2129 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: 2130 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2131 ECORE_DCBX_REMOTE_MIB); 2132 break; 2133 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 2134 ecore_dcbx_mib_update_event(p_hwfn, p_ptt, 2135 ECORE_DCBX_OPERATIONAL_MIB); 2136 /* clear the user-config cache */ 2137 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, 2138 sizeof(struct ecore_dcbx_set)); 2139 break; 2140 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: 2141 ecore_lldp_mib_update_event(p_hwfn, p_ptt); 2142 break; 2143 case MFW_DRV_MSG_OEM_CFG_UPDATE: 2144 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); 2145 break; 2146 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 2147 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); 2148 break; 2149 case MFW_DRV_MSG_ERROR_RECOVERY: 2150 ecore_mcp_handle_process_kill(p_hwfn, p_ptt); 2151 break; 2152 case MFW_DRV_MSG_GET_LAN_STATS: 2153 case MFW_DRV_MSG_GET_FCOE_STATS: 2154 case MFW_DRV_MSG_GET_ISCSI_STATS: 2155 case MFW_DRV_MSG_GET_RDMA_STATS: 2156 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); 2157 break; 2158 case MFW_DRV_MSG_BW_UPDATE: 2159 ecore_mcp_update_bw(p_hwfn, p_ptt); 2160 break; 2161 case MFW_DRV_MSG_S_TAG_UPDATE: 2162 ecore_mcp_update_stag(p_hwfn, p_ptt); 2163 break; 2164 case MFW_DRV_MSG_FAILURE_DETECTED: 2165 ecore_mcp_handle_fan_failure(p_hwfn); 2166 break; 2167 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: 2168 ecore_mcp_handle_critical_error(p_hwfn, p_ptt); 2169 break; 2170 default: 2171 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); 2172 rc = ECORE_INVAL; 2173 } 2174 } 2175 2176 /* ACK everything */ 2177 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 2178 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); 2179 2180 /* MFW expect answer in BE, so we force write in that format */ 2181 ecore_wr(p_hwfn, p_ptt, 2182 info->mfw_mb_addr + sizeof(u32) + 2183 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 2184 sizeof(u32) + i * sizeof(u32), val); 2185 } 2186 2187 if (!found) { 2188 DP_NOTICE(p_hwfn, false, 2189 "Received an MFW message indication but no" 2190 " new message!\n"); 2191 rc = ECORE_INVAL; 2192 } 2193 2194 /* Copy the new mfw messages into the shadow */ 2195 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 2196 2197 return rc; 2198 } 2199 2200 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, 2201 struct ecore_ptt *p_ptt, 2202 u32 *p_mfw_ver, 2203 u32 *p_running_bundle_id) 2204 { 2205 u32 global_offsize; 2206 2207 #ifndef ASIC_ONLY 2208 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { 2209 DP_INFO(p_hwfn, "Emulation: Can't get MFW version\n"); 2210 return ECORE_NOTIMPL; 2211 } 2212 #endif 2213 2214 if (IS_VF(p_hwfn->p_dev)) { 2215 if (p_hwfn->vf_iov_info) { 2216 struct pfvf_acquire_resp_tlv *p_resp; 2217 2218 p_resp = &p_hwfn->vf_iov_info->acquire_resp; 2219 *p_mfw_ver = p_resp->pfdev_info.mfw_ver; 2220 return ECORE_SUCCESS; 2221 } else { 2222 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2223 "VF requested MFW version prior to ACQUIRE\n"); 2224 return ECORE_INVAL; 2225 } 2226 } 2227 2228 global_offsize = ecore_rd(p_hwfn, p_ptt, 2229 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> 2230 public_base, 2231 PUBLIC_GLOBAL)); 2232 *p_mfw_ver = 2233 ecore_rd(p_hwfn, p_ptt, 2234 SECTION_ADDR(global_offsize, 2235 0) + OFFSETOF(struct public_global, mfw_ver)); 2236 2237 if (p_running_bundle_id != OSAL_NULL) { 2238 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, 2239 SECTION_ADDR(global_offsize, 2240 0) + 2241 OFFSETOF(struct public_global, 2242 running_bundle_id)); 2243 } 2244 2245 return ECORE_SUCCESS; 2246 } 2247 2248 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, 2249 struct ecore_ptt *p_ptt, 2250 u32 *p_media_type) 2251 { 2252 *p_media_type = MEDIA_UNSPECIFIED; 2253 2254 /* TODO - Add support for VFs */ 2255 if (IS_VF(p_hwfn->p_dev)) 2256 return ECORE_INVAL; 2257 2258 if (!ecore_mcp_is_init(p_hwfn)) { 2259 #ifndef ASIC_ONLY 2260 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { 2261 DP_INFO(p_hwfn, "Emulation: Can't get media type\n"); 2262 return ECORE_NOTIMPL; 2263 } 2264 #endif 2265 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); 2266 return ECORE_BUSY; 2267 } 2268 2269 if (!p_ptt) 2270 return ECORE_INVAL; 2271 2272 *p_media_type = ecore_rd(p_hwfn, p_ptt, 2273 p_hwfn->mcp_info->port_addr + 2274 OFFSETOF(struct public_port, media_type)); 2275 2276 return ECORE_SUCCESS; 2277 } 2278 2279 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, 2280 struct ecore_ptt *p_ptt, 2281 u32 *p_transceiver_state, 2282 u32 *p_transceiver_type) 2283 { 2284 u32 transceiver_info; 2285 enum _ecore_status_t rc = ECORE_SUCCESS; 2286 2287 /* TODO - Add support for VFs */ 2288 if (IS_VF(p_hwfn->p_dev)) 2289 return ECORE_INVAL; 2290 2291 if (!ecore_mcp_is_init(p_hwfn)) { 2292 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); 2293 return ECORE_BUSY; 2294 } 2295 2296 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE; 2297 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING; 2298 2299 transceiver_info = ecore_rd(p_hwfn, p_ptt, 2300 p_hwfn->mcp_info->port_addr + 2301 offsetof(struct public_port, 2302 transceiver_data)); 2303 2304 *p_transceiver_state = GET_MFW_FIELD(transceiver_info, 2305 ETH_TRANSCEIVER_STATE); 2306 2307 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) { 2308 *p_transceiver_type = GET_MFW_FIELD(transceiver_info, 2309 ETH_TRANSCEIVER_TYPE); 2310 } else { 2311 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN; 2312 } 2313 2314 return rc; 2315 } 2316 2317 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) 2318 { 2319 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && 2320 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && 2321 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) 2322 return 1; 2323 2324 return 0; 2325 } 2326 2327 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, 2328 struct ecore_ptt *p_ptt, 2329 u32 *p_speed_mask) 2330 { 2331 u32 transceiver_type = ETH_TRANSCEIVER_TYPE_NONE, transceiver_state; 2332 2333 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, 2334 &transceiver_type); 2335 2336 2337 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) 2338 return ECORE_INVAL; 2339 2340 switch (transceiver_type) { 2341 case ETH_TRANSCEIVER_TYPE_1G_LX: 2342 case ETH_TRANSCEIVER_TYPE_1G_SX: 2343 case ETH_TRANSCEIVER_TYPE_1G_PCC: 2344 case ETH_TRANSCEIVER_TYPE_1G_ACC: 2345 case ETH_TRANSCEIVER_TYPE_1000BASET: 2346 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2347 break; 2348 2349 case ETH_TRANSCEIVER_TYPE_10G_SR: 2350 case ETH_TRANSCEIVER_TYPE_10G_LR: 2351 case ETH_TRANSCEIVER_TYPE_10G_LRM: 2352 case ETH_TRANSCEIVER_TYPE_10G_ER: 2353 case ETH_TRANSCEIVER_TYPE_10G_PCC: 2354 case ETH_TRANSCEIVER_TYPE_10G_ACC: 2355 case ETH_TRANSCEIVER_TYPE_4x10G: 2356 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2357 break; 2358 2359 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2360 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2361 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2362 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2363 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2364 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2365 break; 2366 2367 case ETH_TRANSCEIVER_TYPE_100G_AOC: 2368 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2369 case ETH_TRANSCEIVER_TYPE_100G_LR4: 2370 case ETH_TRANSCEIVER_TYPE_100G_ER4: 2371 case ETH_TRANSCEIVER_TYPE_100G_ACC: 2372 *p_speed_mask = 2373 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2374 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2375 break; 2376 2377 case ETH_TRANSCEIVER_TYPE_25G_SR: 2378 case ETH_TRANSCEIVER_TYPE_25G_LR: 2379 case ETH_TRANSCEIVER_TYPE_25G_AOC: 2380 case ETH_TRANSCEIVER_TYPE_25G_ACC_S: 2381 case ETH_TRANSCEIVER_TYPE_25G_ACC_M: 2382 case ETH_TRANSCEIVER_TYPE_25G_ACC_L: 2383 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2384 break; 2385 2386 case ETH_TRANSCEIVER_TYPE_25G_CA_N: 2387 case ETH_TRANSCEIVER_TYPE_25G_CA_S: 2388 case ETH_TRANSCEIVER_TYPE_25G_CA_L: 2389 case ETH_TRANSCEIVER_TYPE_4x25G_CR: 2390 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2391 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2392 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2393 break; 2394 2395 case ETH_TRANSCEIVER_TYPE_40G_CR4: 2396 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 2397 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2398 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2399 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2400 break; 2401 2402 case ETH_TRANSCEIVER_TYPE_100G_CR4: 2403 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 2404 *p_speed_mask = 2405 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2406 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | 2407 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2408 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2409 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | 2410 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2411 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2412 break; 2413 2414 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2415 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2416 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: 2417 *p_speed_mask = 2418 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2419 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2420 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2421 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2422 break; 2423 2424 case ETH_TRANSCEIVER_TYPE_XLPPI: 2425 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2426 break; 2427 2428 case ETH_TRANSCEIVER_TYPE_10G_BASET: 2429 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2430 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2431 break; 2432 2433 default: 2434 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", 2435 transceiver_type); 2436 *p_speed_mask = 0xff; 2437 break; 2438 } 2439 2440 return ECORE_SUCCESS; 2441 } 2442 2443 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, 2444 struct ecore_ptt *p_ptt, 2445 u32 *p_board_config) 2446 { 2447 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; 2448 enum _ecore_status_t rc = ECORE_SUCCESS; 2449 2450 /* TODO - Add support for VFs */ 2451 if (IS_VF(p_hwfn->p_dev)) 2452 return ECORE_INVAL; 2453 2454 if (!ecore_mcp_is_init(p_hwfn)) { 2455 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); 2456 return ECORE_BUSY; 2457 } 2458 if (!p_ptt) { 2459 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 2460 rc = ECORE_INVAL; 2461 } else { 2462 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, 2463 MISC_REG_GEN_PURP_CR0); 2464 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, 2465 nvm_cfg_addr + 4); 2466 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2467 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 2468 *p_board_config = ecore_rd(p_hwfn, p_ptt, 2469 port_cfg_addr + 2470 offsetof(struct nvm_cfg1_port, 2471 board_cfg)); 2472 } 2473 2474 return rc; 2475 } 2476 2477 /* @DPDK */ 2478 /* Old MFW has a global configuration for all PFs regarding RDMA support */ 2479 static void 2480 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn, 2481 enum ecore_pci_personality *p_proto) 2482 { 2483 *p_proto = ECORE_PCI_ETH; 2484 2485 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2486 "According to Legacy capabilities, L2 personality is %08x\n", 2487 (u32)*p_proto); 2488 } 2489 2490 /* @DPDK */ 2491 static enum _ecore_status_t 2492 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn, 2493 struct ecore_ptt *p_ptt, 2494 enum ecore_pci_personality *p_proto) 2495 { 2496 u32 resp = 0, param = 0; 2497 enum _ecore_status_t rc; 2498 2499 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 2500 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", 2501 (u32)*p_proto, resp, param); 2502 return ECORE_SUCCESS; 2503 } 2504 2505 static enum _ecore_status_t 2506 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, 2507 struct public_func *p_info, 2508 struct ecore_ptt *p_ptt, 2509 enum ecore_pci_personality *p_proto) 2510 { 2511 enum _ecore_status_t rc = ECORE_SUCCESS; 2512 2513 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 2514 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 2515 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) != 2516 ECORE_SUCCESS) 2517 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); 2518 break; 2519 default: 2520 rc = ECORE_INVAL; 2521 } 2522 2523 return rc; 2524 } 2525 2526 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, 2527 struct ecore_ptt *p_ptt) 2528 { 2529 struct ecore_mcp_function_info *info; 2530 struct public_func shmem_info; 2531 2532 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 2533 info = &p_hwfn->mcp_info->func_info; 2534 2535 info->pause_on_host = (shmem_info.config & 2536 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 2537 2538 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2539 &info->protocol)) { 2540 DP_ERR(p_hwfn, "Unknown personality %08x\n", 2541 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 2542 return ECORE_INVAL; 2543 } 2544 2545 ecore_read_pf_bandwidth(p_hwfn, &shmem_info); 2546 2547 if (shmem_info.mac_upper || shmem_info.mac_lower) { 2548 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 2549 info->mac[1] = (u8)(shmem_info.mac_upper); 2550 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 2551 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 2552 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 2553 info->mac[5] = (u8)(shmem_info.mac_lower); 2554 } else { 2555 /* TODO - are there protocols for which there's no MAC? */ 2556 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); 2557 } 2558 2559 /* TODO - are these calculations true for BE machine? */ 2560 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | 2561 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); 2562 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | 2563 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); 2564 2565 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 2566 2567 info->mtu = (u16)shmem_info.mtu_size; 2568 2569 if (info->mtu == 0) 2570 info->mtu = 1500; 2571 2572 info->mtu = (u16)shmem_info.mtu_size; 2573 2574 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), 2575 "Read configuration from shmem: pause_on_host %02x" 2576 " protocol %02x BW [%02x - %02x]" 2577 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx" 2578 " node %lx ovlan %04x\n", 2579 info->pause_on_host, info->protocol, 2580 info->bandwidth_min, info->bandwidth_max, 2581 info->mac[0], info->mac[1], info->mac[2], 2582 info->mac[3], info->mac[4], info->mac[5], 2583 (unsigned long)info->wwn_port, 2584 (unsigned long)info->wwn_node, info->ovlan); 2585 2586 return ECORE_SUCCESS; 2587 } 2588 2589 struct ecore_mcp_link_params 2590 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) 2591 { 2592 if (!p_hwfn || !p_hwfn->mcp_info) 2593 return OSAL_NULL; 2594 return &p_hwfn->mcp_info->link_input; 2595 } 2596 2597 struct ecore_mcp_link_state 2598 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) 2599 { 2600 if (!p_hwfn || !p_hwfn->mcp_info) 2601 return OSAL_NULL; 2602 2603 #ifndef ASIC_ONLY 2604 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 2605 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); 2606 p_hwfn->mcp_info->link_output.link_up = true; 2607 } 2608 #endif 2609 2610 return &p_hwfn->mcp_info->link_output; 2611 } 2612 2613 struct ecore_mcp_link_capabilities 2614 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) 2615 { 2616 if (!p_hwfn || !p_hwfn->mcp_info) 2617 return OSAL_NULL; 2618 return &p_hwfn->mcp_info->link_capabilities; 2619 } 2620 2621 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, 2622 struct ecore_ptt *p_ptt) 2623 { 2624 u32 resp = 0, param = 0; 2625 enum _ecore_status_t rc; 2626 2627 rc = ecore_mcp_cmd(p_hwfn, p_ptt, 2628 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); 2629 2630 /* Wait for the drain to complete before returning */ 2631 OSAL_MSLEEP(1020); 2632 2633 return rc; 2634 } 2635 2636 const struct ecore_mcp_function_info 2637 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) 2638 { 2639 if (!p_hwfn || !p_hwfn->mcp_info) 2640 return OSAL_NULL; 2641 return &p_hwfn->mcp_info->func_info; 2642 } 2643 2644 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, 2645 struct ecore_ptt *p_ptt, u32 personalities) 2646 { 2647 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; 2648 struct public_func shmem_info; 2649 int i, count = 0, num_pfs; 2650 2651 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); 2652 2653 for (i = 0; i < num_pfs; i++) { 2654 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 2655 MCP_PF_ID_BY_REL(p_hwfn, i)); 2656 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) 2657 continue; 2658 2659 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2660 &protocol) != 2661 ECORE_SUCCESS) 2662 continue; 2663 2664 if ((1 << ((u32)protocol)) & personalities) 2665 count++; 2666 } 2667 2668 return count; 2669 } 2670 2671 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, 2672 struct ecore_ptt *p_ptt, 2673 u32 *p_flash_size) 2674 { 2675 u32 flash_size; 2676 2677 #ifndef ASIC_ONLY 2678 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { 2679 DP_INFO(p_hwfn, "Emulation: Can't get flash size\n"); 2680 return ECORE_NOTIMPL; 2681 } 2682 #endif 2683 2684 if (IS_VF(p_hwfn->p_dev)) 2685 return ECORE_INVAL; 2686 2687 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 2688 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 2689 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 2690 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); 2691 2692 *p_flash_size = flash_size; 2693 2694 return ECORE_SUCCESS; 2695 } 2696 2697 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, 2698 struct ecore_ptt *p_ptt) 2699 { 2700 struct ecore_dev *p_dev = p_hwfn->p_dev; 2701 2702 if (p_dev->recov_in_prog) { 2703 DP_NOTICE(p_hwfn, false, 2704 "Avoid triggering a recovery since such a process" 2705 " is already in progress\n"); 2706 return ECORE_AGAIN; 2707 } 2708 2709 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); 2710 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); 2711 2712 return ECORE_SUCCESS; 2713 } 2714 2715 static enum _ecore_status_t 2716 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, 2717 struct ecore_ptt *p_ptt, 2718 u8 vf_id, u8 num) 2719 { 2720 u32 resp = 0, param = 0, rc_param = 0; 2721 enum _ecore_status_t rc; 2722 2723 /* Only Leader can configure MSIX, and need to take CMT into account */ 2724 2725 if (!IS_LEAD_HWFN(p_hwfn)) 2726 return ECORE_SUCCESS; 2727 num *= p_hwfn->p_dev->num_hwfns; 2728 2729 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & 2730 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; 2731 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & 2732 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; 2733 2734 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, 2735 &resp, &rc_param); 2736 2737 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { 2738 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", 2739 vf_id); 2740 rc = ECORE_INVAL; 2741 } else { 2742 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2743 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", 2744 num, vf_id); 2745 } 2746 2747 return rc; 2748 } 2749 2750 static enum _ecore_status_t 2751 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, 2752 struct ecore_ptt *p_ptt, 2753 u8 num) 2754 { 2755 u32 resp = 0, param = num, rc_param = 0; 2756 enum _ecore_status_t rc; 2757 2758 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, 2759 param, &resp, &rc_param); 2760 2761 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { 2762 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); 2763 rc = ECORE_INVAL; 2764 } else { 2765 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2766 "Requested 0x%02x MSI-x interrupts for VFs\n", 2767 num); 2768 } 2769 2770 return rc; 2771 } 2772 2773 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, 2774 struct ecore_ptt *p_ptt, 2775 u8 vf_id, u8 num) 2776 { 2777 #ifndef ASIC_ONLY 2778 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { 2779 DP_INFO(p_hwfn, 2780 "Emulation: Avoid sending the %s mailbox command\n", 2781 ECORE_IS_BB(p_hwfn->p_dev) ? "CFG_VF_MSIX" : 2782 "CFG_PF_VFS_MSIX"); 2783 return ECORE_SUCCESS; 2784 } 2785 #endif 2786 2787 if (ECORE_IS_BB(p_hwfn->p_dev)) 2788 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); 2789 else 2790 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); 2791 } 2792 2793 enum _ecore_status_t 2794 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2795 struct ecore_mcp_drv_version *p_ver) 2796 { 2797 struct ecore_mcp_mb_params mb_params; 2798 struct drv_version_stc drv_version; 2799 u32 num_words, i; 2800 void *p_name; 2801 OSAL_BE32 val; 2802 enum _ecore_status_t rc; 2803 2804 #ifndef ASIC_ONLY 2805 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 2806 return ECORE_SUCCESS; 2807 #endif 2808 2809 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version)); 2810 drv_version.version = p_ver->version; 2811 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; 2812 for (i = 0; i < num_words; i++) { 2813 /* The driver name is expected to be in a big-endian format */ 2814 p_name = &p_ver->name[i * sizeof(u32)]; 2815 val = OSAL_CPU_TO_BE32(*(u32 *)p_name); 2816 *(u32 *)&drv_version.name[i * sizeof(u32)] = val; 2817 } 2818 2819 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 2820 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 2821 mb_params.p_data_src = &drv_version; 2822 mb_params.data_src_size = sizeof(drv_version); 2823 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2824 if (rc != ECORE_SUCCESS) 2825 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2826 2827 return rc; 2828 } 2829 2830 /* A maximal 100 msec waiting time for the MCP to halt */ 2831 #define ECORE_MCP_HALT_SLEEP_MS 10 2832 #define ECORE_MCP_HALT_MAX_RETRIES 10 2833 2834 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, 2835 struct ecore_ptt *p_ptt) 2836 { 2837 u32 resp = 0, param = 0, cpu_state, cnt = 0; 2838 enum _ecore_status_t rc; 2839 2840 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2841 ¶m); 2842 if (rc != ECORE_SUCCESS) { 2843 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2844 return rc; 2845 } 2846 2847 do { 2848 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); 2849 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2850 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) 2851 break; 2852 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); 2853 2854 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { 2855 DP_NOTICE(p_hwfn, false, 2856 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2857 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); 2858 return ECORE_BUSY; 2859 } 2860 2861 ecore_mcp_cmd_set_blocking(p_hwfn, true); 2862 2863 return ECORE_SUCCESS; 2864 } 2865 2866 #define ECORE_MCP_RESUME_SLEEP_MS 10 2867 2868 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, 2869 struct ecore_ptt *p_ptt) 2870 { 2871 u32 cpu_mode, cpu_state; 2872 2873 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2874 2875 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2876 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; 2877 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); 2878 2879 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); 2880 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2881 2882 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { 2883 DP_NOTICE(p_hwfn, false, 2884 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2885 cpu_mode, cpu_state); 2886 return ECORE_BUSY; 2887 } 2888 2889 ecore_mcp_cmd_set_blocking(p_hwfn, false); 2890 2891 return ECORE_SUCCESS; 2892 } 2893 2894 enum _ecore_status_t 2895 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, 2896 struct ecore_ptt *p_ptt, 2897 enum ecore_ov_client client) 2898 { 2899 u32 resp = 0, param = 0; 2900 u32 drv_mb_param; 2901 enum _ecore_status_t rc; 2902 2903 switch (client) { 2904 case ECORE_OV_CLIENT_DRV: 2905 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; 2906 break; 2907 case ECORE_OV_CLIENT_USER: 2908 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; 2909 break; 2910 case ECORE_OV_CLIENT_VENDOR_SPEC: 2911 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; 2912 break; 2913 default: 2914 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client); 2915 return ECORE_INVAL; 2916 } 2917 2918 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, 2919 drv_mb_param, &resp, ¶m); 2920 if (rc != ECORE_SUCCESS) 2921 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2922 2923 return rc; 2924 } 2925 2926 enum _ecore_status_t 2927 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, 2928 struct ecore_ptt *p_ptt, 2929 enum ecore_ov_driver_state drv_state) 2930 { 2931 u32 resp = 0, param = 0; 2932 u32 drv_mb_param; 2933 enum _ecore_status_t rc; 2934 2935 switch (drv_state) { 2936 case ECORE_OV_DRIVER_STATE_NOT_LOADED: 2937 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; 2938 break; 2939 case ECORE_OV_DRIVER_STATE_DISABLED: 2940 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; 2941 break; 2942 case ECORE_OV_DRIVER_STATE_ACTIVE: 2943 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; 2944 break; 2945 default: 2946 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); 2947 return ECORE_INVAL; 2948 } 2949 2950 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, 2951 drv_mb_param, &resp, ¶m); 2952 if (rc != ECORE_SUCCESS) 2953 DP_ERR(p_hwfn, "Failed to send driver state\n"); 2954 2955 return rc; 2956 } 2957 2958 enum _ecore_status_t 2959 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2960 struct ecore_fc_npiv_tbl *p_table) 2961 { 2962 return 0; 2963 } 2964 2965 enum _ecore_status_t 2966 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2967 u16 mtu) 2968 { 2969 u32 resp = 0, param = 0, drv_mb_param = 0; 2970 enum _ecore_status_t rc; 2971 2972 SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu); 2973 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, 2974 drv_mb_param, &resp, ¶m); 2975 if (rc != ECORE_SUCCESS) 2976 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); 2977 2978 return rc; 2979 } 2980 2981 enum _ecore_status_t 2982 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2983 u8 *mac) 2984 { 2985 struct ecore_mcp_mb_params mb_params; 2986 union drv_union_data union_data; 2987 enum _ecore_status_t rc; 2988 2989 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 2990 mb_params.cmd = DRV_MSG_CODE_SET_VMAC; 2991 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE, 2992 DRV_MSG_CODE_VMAC_TYPE_MAC); 2993 mb_params.param |= MCP_PF_ID(p_hwfn); 2994 OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN); 2995 mb_params.p_data_src = &union_data; 2996 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2997 if (rc != ECORE_SUCCESS) 2998 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); 2999 3000 return rc; 3001 } 3002 3003 enum _ecore_status_t 3004 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3005 enum ecore_ov_eswitch eswitch) 3006 { 3007 enum _ecore_status_t rc; 3008 u32 resp = 0, param = 0; 3009 u32 drv_mb_param; 3010 3011 switch (eswitch) { 3012 case ECORE_OV_ESWITCH_NONE: 3013 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; 3014 break; 3015 case ECORE_OV_ESWITCH_VEB: 3016 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; 3017 break; 3018 case ECORE_OV_ESWITCH_VEPA: 3019 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; 3020 break; 3021 default: 3022 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); 3023 return ECORE_INVAL; 3024 } 3025 3026 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, 3027 drv_mb_param, &resp, ¶m); 3028 if (rc != ECORE_SUCCESS) 3029 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); 3030 3031 return rc; 3032 } 3033 3034 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, 3035 struct ecore_ptt *p_ptt, 3036 enum ecore_led_mode mode) 3037 { 3038 u32 resp = 0, param = 0, drv_mb_param; 3039 enum _ecore_status_t rc; 3040 3041 switch (mode) { 3042 case ECORE_LED_MODE_ON: 3043 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 3044 break; 3045 case ECORE_LED_MODE_OFF: 3046 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 3047 break; 3048 case ECORE_LED_MODE_RESTORE: 3049 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 3050 break; 3051 default: 3052 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); 3053 return ECORE_INVAL; 3054 } 3055 3056 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 3057 drv_mb_param, &resp, ¶m); 3058 if (rc != ECORE_SUCCESS) 3059 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 3060 3061 return rc; 3062 } 3063 3064 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, 3065 struct ecore_ptt *p_ptt, 3066 u32 mask_parities) 3067 { 3068 u32 resp = 0, param = 0; 3069 enum _ecore_status_t rc; 3070 3071 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, 3072 mask_parities, &resp, ¶m); 3073 3074 if (rc != ECORE_SUCCESS) { 3075 DP_ERR(p_hwfn, 3076 "MCP response failure for mask parities, aborting\n"); 3077 } else if (resp != FW_MSG_CODE_OK) { 3078 DP_ERR(p_hwfn, 3079 "MCP did not ack mask parity request. Old MFW?\n"); 3080 rc = ECORE_INVAL; 3081 } 3082 3083 return rc; 3084 } 3085 3086 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, 3087 u8 *p_buf, u32 len) 3088 { 3089 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3090 u32 bytes_left, offset, bytes_to_copy, buf_size; 3091 u32 nvm_offset, resp, param; 3092 struct ecore_ptt *p_ptt; 3093 enum _ecore_status_t rc = ECORE_SUCCESS; 3094 3095 p_ptt = ecore_ptt_acquire(p_hwfn); 3096 if (!p_ptt) 3097 return ECORE_BUSY; 3098 3099 bytes_left = len; 3100 offset = 0; 3101 while (bytes_left > 0) { 3102 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 3103 MCP_DRV_NVM_BUF_LEN); 3104 nvm_offset = (addr + offset) | (bytes_to_copy << 3105 DRV_MB_PARAM_NVM_LEN_OFFSET); 3106 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3107 DRV_MSG_CODE_NVM_READ_NVRAM, 3108 nvm_offset, &resp, ¶m, &buf_size, 3109 (u32 *)(p_buf + offset)); 3110 if (rc != ECORE_SUCCESS) { 3111 DP_NOTICE(p_dev, false, 3112 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", 3113 rc); 3114 resp = FW_MSG_CODE_ERROR; 3115 break; 3116 } 3117 3118 if (resp != FW_MSG_CODE_NVM_OK) { 3119 DP_NOTICE(p_dev, false, 3120 "nvm read failed, resp = 0x%08x\n", resp); 3121 rc = ECORE_UNKNOWN_ERROR; 3122 break; 3123 } 3124 3125 /* This can be a lengthy process, and it's possible scheduler 3126 * isn't preemptible. Sleep a bit to prevent CPU hogging. 3127 */ 3128 if (bytes_left % 0x1000 < 3129 (bytes_left - buf_size) % 0x1000) 3130 OSAL_MSLEEP(1); 3131 3132 offset += buf_size; 3133 bytes_left -= buf_size; 3134 } 3135 3136 p_dev->mcp_nvm_resp = resp; 3137 ecore_ptt_release(p_hwfn, p_ptt); 3138 3139 return rc; 3140 } 3141 3142 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, 3143 u32 addr, u8 *p_buf, u32 *p_len) 3144 { 3145 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3146 struct ecore_ptt *p_ptt; 3147 u32 resp = 0, param; 3148 enum _ecore_status_t rc; 3149 3150 p_ptt = ecore_ptt_acquire(p_hwfn); 3151 if (!p_ptt) 3152 return ECORE_BUSY; 3153 3154 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3155 (cmd == ECORE_PHY_CORE_READ) ? 3156 DRV_MSG_CODE_PHY_CORE_READ : 3157 DRV_MSG_CODE_PHY_RAW_READ, 3158 addr, &resp, ¶m, p_len, (u32 *)p_buf); 3159 if (rc != ECORE_SUCCESS) 3160 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 3161 3162 p_dev->mcp_nvm_resp = resp; 3163 ecore_ptt_release(p_hwfn, p_ptt); 3164 3165 return rc; 3166 } 3167 3168 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) 3169 { 3170 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3171 struct ecore_ptt *p_ptt; 3172 3173 p_ptt = ecore_ptt_acquire(p_hwfn); 3174 if (!p_ptt) 3175 return ECORE_BUSY; 3176 3177 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); 3178 ecore_ptt_release(p_hwfn, p_ptt); 3179 3180 return ECORE_SUCCESS; 3181 } 3182 3183 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) 3184 { 3185 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3186 struct ecore_ptt *p_ptt; 3187 u32 resp = 0, param; 3188 enum _ecore_status_t rc; 3189 3190 p_ptt = ecore_ptt_acquire(p_hwfn); 3191 if (!p_ptt) 3192 return ECORE_BUSY; 3193 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, 3194 &resp, ¶m); 3195 p_dev->mcp_nvm_resp = resp; 3196 ecore_ptt_release(p_hwfn, p_ptt); 3197 3198 return rc; 3199 } 3200 3201 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, 3202 u32 addr) 3203 { 3204 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3205 struct ecore_ptt *p_ptt; 3206 u32 resp = 0, param; 3207 enum _ecore_status_t rc; 3208 3209 p_ptt = ecore_ptt_acquire(p_hwfn); 3210 if (!p_ptt) 3211 return ECORE_BUSY; 3212 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, 3213 &resp, ¶m); 3214 p_dev->mcp_nvm_resp = resp; 3215 ecore_ptt_release(p_hwfn, p_ptt); 3216 3217 return rc; 3218 } 3219 3220 /* rc receives ECORE_INVAL as default parameter because 3221 * it might not enter the while loop if the len is 0 3222 */ 3223 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, 3224 u32 addr, u8 *p_buf, u32 len) 3225 { 3226 u32 buf_idx, buf_size, nvm_cmd, nvm_offset; 3227 u32 resp = FW_MSG_CODE_ERROR, param; 3228 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3229 enum _ecore_status_t rc = ECORE_INVAL; 3230 struct ecore_ptt *p_ptt; 3231 3232 p_ptt = ecore_ptt_acquire(p_hwfn); 3233 if (!p_ptt) 3234 return ECORE_BUSY; 3235 3236 switch (cmd) { 3237 case ECORE_PUT_FILE_DATA: 3238 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; 3239 break; 3240 case ECORE_NVM_WRITE_NVRAM: 3241 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; 3242 break; 3243 case ECORE_EXT_PHY_FW_UPGRADE: 3244 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; 3245 break; 3246 default: 3247 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", 3248 cmd); 3249 rc = ECORE_INVAL; 3250 goto out; 3251 } 3252 3253 buf_idx = 0; 3254 while (buf_idx < len) { 3255 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 3256 MCP_DRV_NVM_BUF_LEN); 3257 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | 3258 addr) + 3259 buf_idx; 3260 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, 3261 &resp, ¶m, buf_size, 3262 (u32 *)&p_buf[buf_idx]); 3263 if (rc != ECORE_SUCCESS) { 3264 DP_NOTICE(p_dev, false, 3265 "ecore_mcp_nvm_write() failed, rc = %d\n", 3266 rc); 3267 resp = FW_MSG_CODE_ERROR; 3268 break; 3269 } 3270 3271 if (resp != FW_MSG_CODE_OK && 3272 resp != FW_MSG_CODE_NVM_OK && 3273 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { 3274 DP_NOTICE(p_dev, false, 3275 "nvm write failed, resp = 0x%08x\n", resp); 3276 rc = ECORE_UNKNOWN_ERROR; 3277 break; 3278 } 3279 3280 /* This can be a lengthy process, and it's possible scheduler 3281 * isn't preemptible. Sleep a bit to prevent CPU hogging. 3282 */ 3283 if (buf_idx % 0x1000 > 3284 (buf_idx + buf_size) % 0x1000) 3285 OSAL_MSLEEP(1); 3286 3287 buf_idx += buf_size; 3288 } 3289 3290 p_dev->mcp_nvm_resp = resp; 3291 out: 3292 ecore_ptt_release(p_hwfn, p_ptt); 3293 3294 return rc; 3295 } 3296 3297 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, 3298 u32 addr, u8 *p_buf, u32 len) 3299 { 3300 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3301 u32 resp = 0, param, nvm_cmd; 3302 struct ecore_ptt *p_ptt; 3303 enum _ecore_status_t rc; 3304 3305 p_ptt = ecore_ptt_acquire(p_hwfn); 3306 if (!p_ptt) 3307 return ECORE_BUSY; 3308 3309 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : 3310 DRV_MSG_CODE_PHY_RAW_WRITE; 3311 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, 3312 &resp, ¶m, len, (u32 *)p_buf); 3313 if (rc != ECORE_SUCCESS) 3314 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); 3315 p_dev->mcp_nvm_resp = resp; 3316 ecore_ptt_release(p_hwfn, p_ptt); 3317 3318 return rc; 3319 } 3320 3321 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, 3322 u32 addr) 3323 { 3324 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3325 struct ecore_ptt *p_ptt; 3326 u32 resp = 0, param; 3327 enum _ecore_status_t rc; 3328 3329 p_ptt = ecore_ptt_acquire(p_hwfn); 3330 if (!p_ptt) 3331 return ECORE_BUSY; 3332 3333 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, 3334 &resp, ¶m); 3335 p_dev->mcp_nvm_resp = resp; 3336 ecore_ptt_release(p_hwfn, p_ptt); 3337 3338 return rc; 3339 } 3340 3341 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, 3342 struct ecore_ptt *p_ptt, 3343 u32 port, u32 addr, u32 offset, 3344 u32 len, u8 *p_buf) 3345 { 3346 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; 3347 u32 resp, param; 3348 enum _ecore_status_t rc; 3349 3350 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 3351 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 3352 addr = offset; 3353 offset = 0; 3354 bytes_left = len; 3355 while (bytes_left > 0) { 3356 bytes_to_copy = OSAL_MIN_T(u32, bytes_left, 3357 MAX_I2C_TRANSACTION_SIZE); 3358 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 3359 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 3360 nvm_offset |= ((addr + offset) << 3361 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 3362 nvm_offset |= (bytes_to_copy << 3363 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 3364 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3365 DRV_MSG_CODE_TRANSCEIVER_READ, 3366 nvm_offset, &resp, ¶m, &buf_size, 3367 (u32 *)(p_buf + offset)); 3368 if (rc != ECORE_SUCCESS) { 3369 DP_NOTICE(p_hwfn, false, 3370 "Failed to send a transceiver read command to the MFW. rc = %d.\n", 3371 rc); 3372 return rc; 3373 } 3374 3375 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3376 return ECORE_NODEV; 3377 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3378 return ECORE_UNKNOWN_ERROR; 3379 3380 offset += buf_size; 3381 bytes_left -= buf_size; 3382 } 3383 3384 return ECORE_SUCCESS; 3385 } 3386 3387 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, 3388 struct ecore_ptt *p_ptt, 3389 u32 port, u32 addr, u32 offset, 3390 u32 len, u8 *p_buf) 3391 { 3392 u32 buf_idx, buf_size, nvm_offset, resp, param; 3393 enum _ecore_status_t rc; 3394 3395 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | 3396 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); 3397 buf_idx = 0; 3398 while (buf_idx < len) { 3399 buf_size = OSAL_MIN_T(u32, (len - buf_idx), 3400 MAX_I2C_TRANSACTION_SIZE); 3401 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 3402 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 3403 nvm_offset |= ((offset + buf_idx) << 3404 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); 3405 nvm_offset |= (buf_size << 3406 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); 3407 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, 3408 DRV_MSG_CODE_TRANSCEIVER_WRITE, 3409 nvm_offset, &resp, ¶m, buf_size, 3410 (u32 *)&p_buf[buf_idx]); 3411 if (rc != ECORE_SUCCESS) { 3412 DP_NOTICE(p_hwfn, false, 3413 "Failed to send a transceiver write command to the MFW. rc = %d.\n", 3414 rc); 3415 return rc; 3416 } 3417 3418 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3419 return ECORE_NODEV; 3420 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3421 return ECORE_UNKNOWN_ERROR; 3422 3423 buf_idx += buf_size; 3424 } 3425 3426 return ECORE_SUCCESS; 3427 } 3428 3429 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, 3430 struct ecore_ptt *p_ptt, 3431 u16 gpio, u32 *gpio_val) 3432 { 3433 enum _ecore_status_t rc = ECORE_SUCCESS; 3434 u32 drv_mb_param = 0, rsp = 0; 3435 3436 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); 3437 3438 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, 3439 drv_mb_param, &rsp, gpio_val); 3440 3441 if (rc != ECORE_SUCCESS) 3442 return rc; 3443 3444 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3445 return ECORE_UNKNOWN_ERROR; 3446 3447 return ECORE_SUCCESS; 3448 } 3449 3450 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, 3451 struct ecore_ptt *p_ptt, 3452 u16 gpio, u16 gpio_val) 3453 { 3454 enum _ecore_status_t rc = ECORE_SUCCESS; 3455 u32 drv_mb_param = 0, param, rsp = 0; 3456 3457 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | 3458 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); 3459 3460 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, 3461 drv_mb_param, &rsp, ¶m); 3462 3463 if (rc != ECORE_SUCCESS) 3464 return rc; 3465 3466 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3467 return ECORE_UNKNOWN_ERROR; 3468 3469 return ECORE_SUCCESS; 3470 } 3471 3472 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, 3473 struct ecore_ptt *p_ptt, 3474 u16 gpio, u32 *gpio_direction, 3475 u32 *gpio_ctrl) 3476 { 3477 u32 drv_mb_param = 0, rsp, val = 0; 3478 enum _ecore_status_t rc = ECORE_SUCCESS; 3479 3480 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; 3481 3482 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, 3483 drv_mb_param, &rsp, &val); 3484 if (rc != ECORE_SUCCESS) 3485 return rc; 3486 3487 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> 3488 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; 3489 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> 3490 DRV_MB_PARAM_GPIO_CTRL_OFFSET; 3491 3492 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) 3493 return ECORE_UNKNOWN_ERROR; 3494 3495 return ECORE_SUCCESS; 3496 } 3497 3498 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, 3499 struct ecore_ptt *p_ptt) 3500 { 3501 u32 drv_mb_param = 0, rsp, param; 3502 enum _ecore_status_t rc = ECORE_SUCCESS; 3503 3504 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << 3505 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3506 3507 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3508 drv_mb_param, &rsp, ¶m); 3509 3510 if (rc != ECORE_SUCCESS) 3511 return rc; 3512 3513 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3514 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3515 rc = ECORE_UNKNOWN_ERROR; 3516 3517 return rc; 3518 } 3519 3520 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, 3521 struct ecore_ptt *p_ptt) 3522 { 3523 u32 drv_mb_param, rsp, param; 3524 enum _ecore_status_t rc = ECORE_SUCCESS; 3525 3526 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << 3527 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3528 3529 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3530 drv_mb_param, &rsp, ¶m); 3531 3532 if (rc != ECORE_SUCCESS) 3533 return rc; 3534 3535 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3536 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3537 rc = ECORE_UNKNOWN_ERROR; 3538 3539 return rc; 3540 } 3541 3542 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( 3543 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images) 3544 { 3545 u32 drv_mb_param = 0, rsp = 0; 3546 enum _ecore_status_t rc = ECORE_SUCCESS; 3547 3548 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << 3549 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3550 3551 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3552 drv_mb_param, &rsp, num_images); 3553 3554 if (rc != ECORE_SUCCESS) 3555 return rc; 3556 3557 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) 3558 rc = ECORE_UNKNOWN_ERROR; 3559 3560 return rc; 3561 } 3562 3563 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( 3564 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3565 struct bist_nvm_image_att *p_image_att, u32 image_index) 3566 { 3567 u32 buf_size, nvm_offset, resp, param; 3568 enum _ecore_status_t rc; 3569 3570 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << 3571 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); 3572 nvm_offset |= (image_index << 3573 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); 3574 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3575 nvm_offset, &resp, ¶m, &buf_size, 3576 (u32 *)p_image_att); 3577 if (rc != ECORE_SUCCESS) 3578 return rc; 3579 3580 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3581 (p_image_att->return_code != 1)) 3582 rc = ECORE_UNKNOWN_ERROR; 3583 3584 return rc; 3585 } 3586 3587 enum _ecore_status_t 3588 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, 3589 struct ecore_ptt *p_ptt, 3590 struct ecore_temperature_info *p_temp_info) 3591 { 3592 struct ecore_temperature_sensor *p_temp_sensor; 3593 struct temperature_status_stc mfw_temp_info; 3594 struct ecore_mcp_mb_params mb_params; 3595 u32 val; 3596 enum _ecore_status_t rc; 3597 u8 i; 3598 3599 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3600 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE; 3601 mb_params.p_data_dst = &mfw_temp_info; 3602 mb_params.data_dst_size = sizeof(mfw_temp_info); 3603 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3604 if (rc != ECORE_SUCCESS) 3605 return rc; 3606 3607 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS); 3608 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors, 3609 ECORE_MAX_NUM_OF_SENSORS); 3610 for (i = 0; i < p_temp_info->num_sensors; i++) { 3611 val = mfw_temp_info.sensor[i]; 3612 p_temp_sensor = &p_temp_info->sensors[i]; 3613 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> 3614 SENSOR_LOCATION_OFFSET; 3615 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> 3616 THRESHOLD_HIGH_OFFSET; 3617 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> 3618 CRITICAL_TEMPERATURE_OFFSET; 3619 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> 3620 CURRENT_TEMP_OFFSET; 3621 } 3622 3623 return ECORE_SUCCESS; 3624 } 3625 3626 enum _ecore_status_t ecore_mcp_get_mba_versions( 3627 struct ecore_hwfn *p_hwfn, 3628 struct ecore_ptt *p_ptt, 3629 struct ecore_mba_vers *p_mba_vers) 3630 { 3631 u32 buf_size, resp, param; 3632 enum _ecore_status_t rc; 3633 3634 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, 3635 0, &resp, ¶m, &buf_size, 3636 &p_mba_vers->mba_vers[0]); 3637 3638 if (rc != ECORE_SUCCESS) 3639 return rc; 3640 3641 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 3642 rc = ECORE_UNKNOWN_ERROR; 3643 3644 if (buf_size != MCP_DRV_NVM_BUF_LEN) 3645 rc = ECORE_UNKNOWN_ERROR; 3646 3647 return rc; 3648 } 3649 3650 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, 3651 struct ecore_ptt *p_ptt, 3652 u64 *num_events) 3653 { 3654 u32 rsp; 3655 3656 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS, 3657 0, &rsp, (u32 *)num_events); 3658 } 3659 3660 static enum resource_id_enum 3661 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) 3662 { 3663 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; 3664 3665 switch (res_id) { 3666 case ECORE_SB: 3667 mfw_res_id = RESOURCE_NUM_SB_E; 3668 break; 3669 case ECORE_L2_QUEUE: 3670 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; 3671 break; 3672 case ECORE_VPORT: 3673 mfw_res_id = RESOURCE_NUM_VPORT_E; 3674 break; 3675 case ECORE_RSS_ENG: 3676 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; 3677 break; 3678 case ECORE_PQ: 3679 mfw_res_id = RESOURCE_NUM_PQ_E; 3680 break; 3681 case ECORE_RL: 3682 mfw_res_id = RESOURCE_NUM_RL_E; 3683 break; 3684 case ECORE_MAC: 3685 case ECORE_VLAN: 3686 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3687 mfw_res_id = RESOURCE_VFC_FILTER_E; 3688 break; 3689 case ECORE_ILT: 3690 mfw_res_id = RESOURCE_ILT_E; 3691 break; 3692 case ECORE_LL2_QUEUE: 3693 mfw_res_id = RESOURCE_LL2_QUEUE_E; 3694 break; 3695 case ECORE_RDMA_CNQ_RAM: 3696 case ECORE_CMDQS_CQS: 3697 /* CNQ/CMDQS are the same resource */ 3698 mfw_res_id = RESOURCE_CQS_E; 3699 break; 3700 case ECORE_RDMA_STATS_QUEUE: 3701 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; 3702 break; 3703 case ECORE_BDQ: 3704 mfw_res_id = RESOURCE_BDQ_E; 3705 break; 3706 default: 3707 break; 3708 } 3709 3710 return mfw_res_id; 3711 } 3712 3713 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2 3714 #define ECORE_RESC_ALLOC_VERSION_MINOR 0 3715 #define ECORE_RESC_ALLOC_VERSION \ 3716 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ 3717 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ 3718 (ECORE_RESC_ALLOC_VERSION_MINOR << \ 3719 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) 3720 3721 struct ecore_resc_alloc_in_params { 3722 u32 cmd; 3723 enum ecore_resources res_id; 3724 u32 resc_max_val; 3725 }; 3726 3727 struct ecore_resc_alloc_out_params { 3728 u32 mcp_resp; 3729 u32 mcp_param; 3730 u32 resc_num; 3731 u32 resc_start; 3732 u32 vf_resc_num; 3733 u32 vf_resc_start; 3734 u32 flags; 3735 }; 3736 3737 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100 3738 3739 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) 3740 { 3741 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); 3742 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 3743 enum _ecore_status_t rc; 3744 3745 /* Allow ongoing PCIe transactions to complete */ 3746 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS); 3747 3748 /* Clear the PF's internal FID_enable in the PXP */ 3749 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); 3750 if (rc != ECORE_SUCCESS) 3751 DP_NOTICE(p_hwfn, false, 3752 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", 3753 rc); 3754 3755 return rc; 3756 } 3757 3758 static enum _ecore_status_t 3759 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, 3760 struct ecore_ptt *p_ptt, 3761 struct ecore_resc_alloc_in_params *p_in_params, 3762 struct ecore_resc_alloc_out_params *p_out_params) 3763 { 3764 struct ecore_mcp_mb_params mb_params; 3765 struct resource_info mfw_resc_info; 3766 enum _ecore_status_t rc; 3767 3768 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info)); 3769 3770 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id); 3771 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { 3772 DP_ERR(p_hwfn, 3773 "Failed to match resource %d [%s] with the MFW resources\n", 3774 p_in_params->res_id, 3775 ecore_hw_get_resc_name(p_in_params->res_id)); 3776 return ECORE_INVAL; 3777 } 3778 3779 switch (p_in_params->cmd) { 3780 case DRV_MSG_SET_RESOURCE_VALUE_MSG: 3781 mfw_resc_info.size = p_in_params->resc_max_val; 3782 /* Fallthrough */ 3783 case DRV_MSG_GET_RESOURCE_ALLOC_MSG: 3784 break; 3785 default: 3786 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", 3787 p_in_params->cmd); 3788 return ECORE_INVAL; 3789 } 3790 3791 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 3792 mb_params.cmd = p_in_params->cmd; 3793 mb_params.param = ECORE_RESC_ALLOC_VERSION; 3794 mb_params.p_data_src = &mfw_resc_info; 3795 mb_params.data_src_size = sizeof(mfw_resc_info); 3796 mb_params.p_data_dst = mb_params.p_data_src; 3797 mb_params.data_dst_size = mb_params.data_src_size; 3798 3799 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3800 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", 3801 p_in_params->cmd, p_in_params->res_id, 3802 ecore_hw_get_resc_name(p_in_params->res_id), 3803 GET_MFW_FIELD(mb_params.param, 3804 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3805 GET_MFW_FIELD(mb_params.param, 3806 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3807 p_in_params->resc_max_val); 3808 3809 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3810 if (rc != ECORE_SUCCESS) 3811 return rc; 3812 3813 p_out_params->mcp_resp = mb_params.mcp_resp; 3814 p_out_params->mcp_param = mb_params.mcp_param; 3815 p_out_params->resc_num = mfw_resc_info.size; 3816 p_out_params->resc_start = mfw_resc_info.offset; 3817 p_out_params->vf_resc_num = mfw_resc_info.vf_size; 3818 p_out_params->vf_resc_start = mfw_resc_info.vf_offset; 3819 p_out_params->flags = mfw_resc_info.flags; 3820 3821 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3822 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", 3823 GET_MFW_FIELD(p_out_params->mcp_param, 3824 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3825 GET_MFW_FIELD(p_out_params->mcp_param, 3826 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3827 p_out_params->resc_num, p_out_params->resc_start, 3828 p_out_params->vf_resc_num, p_out_params->vf_resc_start, 3829 p_out_params->flags); 3830 3831 return ECORE_SUCCESS; 3832 } 3833 3834 enum _ecore_status_t 3835 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3836 enum ecore_resources res_id, u32 resc_max_val, 3837 u32 *p_mcp_resp) 3838 { 3839 struct ecore_resc_alloc_out_params out_params; 3840 struct ecore_resc_alloc_in_params in_params; 3841 enum _ecore_status_t rc; 3842 3843 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3844 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; 3845 in_params.res_id = res_id; 3846 in_params.resc_max_val = resc_max_val; 3847 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3848 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3849 &out_params); 3850 if (rc != ECORE_SUCCESS) 3851 return rc; 3852 3853 *p_mcp_resp = out_params.mcp_resp; 3854 3855 return ECORE_SUCCESS; 3856 } 3857 3858 enum _ecore_status_t 3859 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3860 enum ecore_resources res_id, u32 *p_mcp_resp, 3861 u32 *p_resc_num, u32 *p_resc_start) 3862 { 3863 struct ecore_resc_alloc_out_params out_params; 3864 struct ecore_resc_alloc_in_params in_params; 3865 enum _ecore_status_t rc; 3866 3867 OSAL_MEM_ZERO(&in_params, sizeof(in_params)); 3868 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; 3869 in_params.res_id = res_id; 3870 OSAL_MEM_ZERO(&out_params, sizeof(out_params)); 3871 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3872 &out_params); 3873 if (rc != ECORE_SUCCESS) 3874 return rc; 3875 3876 *p_mcp_resp = out_params.mcp_resp; 3877 3878 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3879 *p_resc_num = out_params.resc_num; 3880 *p_resc_start = out_params.resc_start; 3881 } 3882 3883 return ECORE_SUCCESS; 3884 } 3885 3886 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, 3887 struct ecore_ptt *p_ptt) 3888 { 3889 u32 mcp_resp, mcp_param; 3890 3891 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, 3892 &mcp_resp, &mcp_param); 3893 } 3894 3895 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, 3896 struct ecore_ptt *p_ptt, 3897 u32 param, u32 *p_mcp_resp, 3898 u32 *p_mcp_param) 3899 { 3900 enum _ecore_status_t rc; 3901 3902 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, 3903 p_mcp_resp, p_mcp_param); 3904 if (rc != ECORE_SUCCESS) 3905 return rc; 3906 3907 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3908 DP_INFO(p_hwfn, 3909 "The resource command is unsupported by the MFW\n"); 3910 return ECORE_NOTIMPL; 3911 } 3912 3913 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { 3914 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); 3915 3916 DP_NOTICE(p_hwfn, false, 3917 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", 3918 param, opcode); 3919 return ECORE_INVAL; 3920 } 3921 3922 return rc; 3923 } 3924 3925 enum _ecore_status_t 3926 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3927 struct ecore_resc_lock_params *p_params) 3928 { 3929 u32 param = 0, mcp_resp = 0, mcp_param = 0; 3930 u8 opcode; 3931 enum _ecore_status_t rc; 3932 3933 switch (p_params->timeout) { 3934 case ECORE_MCP_RESC_LOCK_TO_DEFAULT: 3935 opcode = RESOURCE_OPCODE_REQ; 3936 p_params->timeout = 0; 3937 break; 3938 case ECORE_MCP_RESC_LOCK_TO_NONE: 3939 opcode = RESOURCE_OPCODE_REQ_WO_AGING; 3940 p_params->timeout = 0; 3941 break; 3942 default: 3943 opcode = RESOURCE_OPCODE_REQ_W_AGING; 3944 break; 3945 } 3946 3947 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3948 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3949 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); 3950 3951 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3952 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", 3953 param, p_params->timeout, opcode, p_params->resource); 3954 3955 /* Attempt to acquire the resource */ 3956 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 3957 &mcp_param); 3958 if (rc != ECORE_SUCCESS) 3959 return rc; 3960 3961 /* Analyze the response */ 3962 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); 3963 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3964 3965 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 3966 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", 3967 mcp_param, opcode, p_params->owner); 3968 3969 switch (opcode) { 3970 case RESOURCE_OPCODE_GNT: 3971 p_params->b_granted = true; 3972 break; 3973 case RESOURCE_OPCODE_BUSY: 3974 p_params->b_granted = false; 3975 break; 3976 default: 3977 DP_NOTICE(p_hwfn, false, 3978 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", 3979 mcp_param, opcode); 3980 return ECORE_INVAL; 3981 } 3982 3983 return ECORE_SUCCESS; 3984 } 3985 3986 enum _ecore_status_t 3987 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 3988 struct ecore_resc_lock_params *p_params) 3989 { 3990 u32 retry_cnt = 0; 3991 enum _ecore_status_t rc; 3992 3993 do { 3994 /* No need for an interval before the first iteration */ 3995 if (retry_cnt) { 3996 if (p_params->sleep_b4_retry) { 3997 u16 retry_interval_in_ms = 3998 DIV_ROUND_UP(p_params->retry_interval, 3999 1000); 4000 4001 OSAL_MSLEEP(retry_interval_in_ms); 4002 } else { 4003 OSAL_UDELAY(p_params->retry_interval); 4004 } 4005 } 4006 4007 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params); 4008 if (rc != ECORE_SUCCESS) 4009 return rc; 4010 4011 if (p_params->b_granted) 4012 break; 4013 } while (retry_cnt++ < p_params->retry_num); 4014 4015 return ECORE_SUCCESS; 4016 } 4017 4018 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, 4019 struct ecore_resc_unlock_params *p_unlock, 4020 enum ecore_resc_lock resource, 4021 bool b_is_permanent) 4022 { 4023 if (p_lock != OSAL_NULL) { 4024 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); 4025 4026 /* Permanent resources don't require aging, and there's no 4027 * point in trying to acquire them more than once since it's 4028 * unexpected another entity would release them. 4029 */ 4030 if (b_is_permanent) { 4031 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; 4032 } else { 4033 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; 4034 p_lock->retry_interval = 4035 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; 4036 p_lock->sleep_b4_retry = true; 4037 } 4038 4039 p_lock->resource = resource; 4040 } 4041 4042 if (p_unlock != OSAL_NULL) { 4043 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); 4044 p_unlock->resource = resource; 4045 } 4046 } 4047 4048 enum _ecore_status_t 4049 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4050 struct ecore_resc_unlock_params *p_params) 4051 { 4052 u32 param = 0, mcp_resp, mcp_param; 4053 u8 opcode; 4054 enum _ecore_status_t rc; 4055 4056 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE 4057 : RESOURCE_OPCODE_RELEASE; 4058 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 4059 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 4060 4061 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 4062 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", 4063 param, opcode, p_params->resource); 4064 4065 /* Attempt to release the resource */ 4066 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, 4067 &mcp_param); 4068 if (rc != ECORE_SUCCESS) 4069 return rc; 4070 4071 /* Analyze the response */ 4072 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 4073 4074 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 4075 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", 4076 mcp_param, opcode); 4077 4078 switch (opcode) { 4079 case RESOURCE_OPCODE_RELEASED_PREVIOUS: 4080 DP_INFO(p_hwfn, 4081 "Resource unlock request for an already released resource [%d]\n", 4082 p_params->resource); 4083 /* Fallthrough */ 4084 case RESOURCE_OPCODE_RELEASED: 4085 p_params->b_released = true; 4086 break; 4087 case RESOURCE_OPCODE_WRONG_OWNER: 4088 p_params->b_released = false; 4089 break; 4090 default: 4091 DP_NOTICE(p_hwfn, false, 4092 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", 4093 mcp_param, opcode); 4094 return ECORE_INVAL; 4095 } 4096 4097 return ECORE_SUCCESS; 4098 } 4099 4100 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn) 4101 { 4102 return !!(p_hwfn->mcp_info->capabilities & 4103 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); 4104 } 4105 4106 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, 4107 struct ecore_ptt *p_ptt) 4108 { 4109 u32 mcp_resp; 4110 enum _ecore_status_t rc; 4111 4112 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 4113 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); 4114 if (rc == ECORE_SUCCESS) 4115 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE), 4116 "MFW supported features: %08x\n", 4117 p_hwfn->mcp_info->capabilities); 4118 4119 return rc; 4120 } 4121 4122 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, 4123 struct ecore_ptt *p_ptt) 4124 { 4125 u32 mcp_resp, mcp_param, features; 4126 4127 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | 4128 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | 4129 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; 4130 4131 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, 4132 features, &mcp_resp, &mcp_param); 4133 } 4134 4135 enum _ecore_status_t 4136 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4137 struct ecore_mcp_drv_attr *p_drv_attr) 4138 { 4139 struct attribute_cmd_write_stc attr_cmd_write; 4140 enum _attribute_commands_e mfw_attr_cmd; 4141 struct ecore_mcp_mb_params mb_params; 4142 enum _ecore_status_t rc; 4143 4144 switch (p_drv_attr->attr_cmd) { 4145 case ECORE_MCP_DRV_ATTR_CMD_READ: 4146 mfw_attr_cmd = ATTRIBUTE_CMD_READ; 4147 break; 4148 case ECORE_MCP_DRV_ATTR_CMD_WRITE: 4149 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; 4150 break; 4151 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: 4152 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; 4153 break; 4154 case ECORE_MCP_DRV_ATTR_CMD_CLEAR: 4155 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; 4156 break; 4157 default: 4158 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", 4159 p_drv_attr->attr_cmd); 4160 return ECORE_INVAL; 4161 } 4162 4163 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 4164 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; 4165 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, 4166 p_drv_attr->attr_num); 4167 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, 4168 mfw_attr_cmd); 4169 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { 4170 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); 4171 attr_cmd_write.val = p_drv_attr->val; 4172 attr_cmd_write.mask = p_drv_attr->mask; 4173 attr_cmd_write.offset = p_drv_attr->offset; 4174 4175 mb_params.p_data_src = &attr_cmd_write; 4176 mb_params.data_src_size = sizeof(attr_cmd_write); 4177 } 4178 4179 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 4180 if (rc != ECORE_SUCCESS) 4181 return rc; 4182 4183 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 4184 DP_INFO(p_hwfn, 4185 "The attribute command is not supported by the MFW\n"); 4186 return ECORE_NOTIMPL; 4187 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { 4188 DP_INFO(p_hwfn, 4189 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", 4190 mb_params.mcp_resp, p_drv_attr->attr_cmd, 4191 p_drv_attr->attr_num); 4192 return ECORE_INVAL; 4193 } 4194 4195 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 4196 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", 4197 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, 4198 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, 4199 mb_params.mcp_param); 4200 4201 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || 4202 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) 4203 p_drv_attr->val = mb_params.mcp_param; 4204 4205 return ECORE_SUCCESS; 4206 } 4207 4208 enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn, 4209 struct ecore_ptt *p_ptt) 4210 { 4211 struct ecore_dev *p_dev = p_hwfn->p_dev; 4212 struct ecore_mcp_mb_params mb_params; 4213 u8 fir_valid, l2_valid; 4214 enum _ecore_status_t rc; 4215 4216 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 4217 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; 4218 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 4219 if (rc != ECORE_SUCCESS) 4220 return rc; 4221 4222 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 4223 DP_INFO(p_hwfn, 4224 "The get_engine_config command is unsupported by the MFW\n"); 4225 return ECORE_NOTIMPL; 4226 } 4227 4228 fir_valid = GET_MFW_FIELD(mb_params.mcp_param, 4229 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); 4230 if (fir_valid) 4231 p_dev->fir_affin = 4232 GET_MFW_FIELD(mb_params.mcp_param, 4233 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); 4234 4235 l2_valid = GET_MFW_FIELD(mb_params.mcp_param, 4236 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); 4237 if (l2_valid) 4238 p_dev->l2_affin_hint = 4239 GET_MFW_FIELD(mb_params.mcp_param, 4240 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); 4241 4242 DP_INFO(p_hwfn, 4243 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", 4244 fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint); 4245 4246 return ECORE_SUCCESS; 4247 } 4248 4249 enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, 4250 struct ecore_ptt *p_ptt) 4251 { 4252 struct ecore_dev *p_dev = p_hwfn->p_dev; 4253 struct ecore_mcp_mb_params mb_params; 4254 enum _ecore_status_t rc; 4255 4256 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); 4257 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; 4258 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 4259 if (rc != ECORE_SUCCESS) 4260 return rc; 4261 4262 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 4263 DP_INFO(p_hwfn, 4264 "The get_ppfid_bitmap command is unsupported by the MFW\n"); 4265 return ECORE_NOTIMPL; 4266 } 4267 4268 p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param, 4269 FW_MB_PARAM_PPFID_BITMAP); 4270 4271 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n", 4272 p_dev->ppfid_bitmap); 4273 4274 return ECORE_SUCCESS; 4275 } 4276 4277 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 4278 u32 offset, u32 val) 4279 { 4280 enum _ecore_status_t rc = ECORE_SUCCESS; 4281 u32 dword = val; 4282 struct ecore_mcp_mb_params mb_params; 4283 4284 OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params)); 4285 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; 4286 mb_params.param = offset; 4287 mb_params.p_data_src = &dword; 4288 mb_params.data_src_size = sizeof(dword); 4289 4290 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 4291 if (rc != ECORE_SUCCESS) { 4292 DP_NOTICE(p_hwfn, false, 4293 "Failed to wol write request, rc = %d\n", rc); 4294 } 4295 4296 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { 4297 DP_NOTICE(p_hwfn, false, 4298 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", 4299 val, offset, mb_params.mcp_resp); 4300 rc = ECORE_UNKNOWN_ERROR; 4301 } 4302 } 4303