xref: /dpdk/drivers/net/qede/base/ecore_mcp.c (revision 9ed26bc7bba91cb67c55f0012d886afccdf7a55a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include "bcm_osal.h"
8 #include "ecore.h"
9 #include "ecore_status.h"
10 #include "nvm_cfg.h"
11 #include "ecore_mcp.h"
12 #include "mcp_public.h"
13 #include "reg_addr.h"
14 #include "ecore_hw.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_sriov.h"
17 #include "ecore_vf.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22 #include "ecore_sp_commands.h"
23 #include "ecore_cxt.h"
24 
25 #define CHIP_MCP_RESP_ITER_US 10
26 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 
28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)	/* Account for 5 sec */
29 #define ECORE_MCP_RESET_RETRIES (50 * 1000)	/* Account for 500 msec */
30 
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
32 	ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
33 		 _val)
34 
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36 	ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
39 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40 		     OFFSETOF(struct public_drv_mb, _field), _val)
41 
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
43 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44 		     OFFSETOF(struct public_drv_mb, _field))
45 
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47 	DRV_ID_PDA_COMP_VER_OFFSET)
48 
49 #define MCP_BYTES_PER_MBIT_OFFSET 17
50 
51 #ifndef ASIC_ONLY
52 static int loaded;
53 static int loaded_port[MAX_NUM_PORTS] = { 0 };
54 #endif
55 
56 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 {
58 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
59 		return false;
60 	return true;
61 }
62 
63 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 {
65 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66 					PUBLIC_PORT);
67 	u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68 
69 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70 						   MFW_PORT(p_hwfn));
71 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
72 		   "port_addr = 0x%x, port_id 0x%02x\n",
73 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
74 }
75 
76 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 {
78 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
79 	OSAL_BE32 tmp;
80 	u32 i;
81 
82 #ifndef ASIC_ONLY
83 	if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
84 		return;
85 #endif
86 
87 	if (!p_hwfn->mcp_info->public_base)
88 		return;
89 
90 	for (i = 0; i < length; i++) {
91 		tmp = ecore_rd(p_hwfn, p_ptt,
92 			       p_hwfn->mcp_info->mfw_mb_addr +
93 			       (i << 2) + sizeof(u32));
94 
95 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
96 		    OSAL_BE32_TO_CPU(tmp);
97 	}
98 }
99 
100 struct ecore_mcp_cmd_elem {
101 	osal_list_entry_t list;
102 	struct ecore_mcp_mb_params *p_mb_params;
103 	u16 expected_seq_num;
104 	bool b_is_completed;
105 };
106 
107 /* Must be called while cmd_lock is acquired */
108 static struct ecore_mcp_cmd_elem *
109 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
110 		       struct ecore_mcp_mb_params *p_mb_params,
111 		       u16 expected_seq_num)
112 {
113 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
114 
115 	p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
116 				 sizeof(*p_cmd_elem));
117 	if (!p_cmd_elem) {
118 		DP_NOTICE(p_hwfn, false,
119 			  "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
120 		goto out;
121 	}
122 
123 	p_cmd_elem->p_mb_params = p_mb_params;
124 	p_cmd_elem->expected_seq_num = expected_seq_num;
125 	OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
126 out:
127 	return p_cmd_elem;
128 }
129 
130 /* Must be called while cmd_lock is acquired */
131 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
132 				   struct ecore_mcp_cmd_elem *p_cmd_elem)
133 {
134 	OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
135 	OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
136 }
137 
138 /* Must be called while cmd_lock is acquired */
139 static struct ecore_mcp_cmd_elem *
140 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
141 {
142 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
143 
144 	OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
145 				 struct ecore_mcp_cmd_elem) {
146 		if (p_cmd_elem->expected_seq_num == seq_num)
147 			return p_cmd_elem;
148 	}
149 
150 	return OSAL_NULL;
151 }
152 
153 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
154 {
155 	if (p_hwfn->mcp_info) {
156 		struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
157 
158 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
159 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
160 
161 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
162 		OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
163 					      &p_hwfn->mcp_info->cmd_list, list,
164 					      struct ecore_mcp_cmd_elem) {
165 			ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
166 		}
167 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
168 
169 #ifdef CONFIG_ECORE_LOCK_ALLOC
170 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
171 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
172 #endif
173 	}
174 
175 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
176 
177 	return ECORE_SUCCESS;
178 }
179 
180 /* Maximum of 1 sec to wait for the SHMEM ready indication */
181 #define ECORE_MCP_SHMEM_RDY_MAX_RETRIES	20
182 #define ECORE_MCP_SHMEM_RDY_ITER_MS	50
183 
184 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
185 						   struct ecore_ptt *p_ptt)
186 {
187 	struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
188 	u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
189 	u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
190 	u32 drv_mb_offsize, mfw_mb_offsize;
191 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
192 
193 #ifndef ASIC_ONLY
194 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
195 		DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
196 		p_info->public_base = 0;
197 		return ECORE_INVAL;
198 	}
199 #endif
200 
201 	p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
202 	if (!p_info->public_base)
203 		return ECORE_INVAL;
204 
205 	p_info->public_base |= GRCBASE_MCP;
206 
207 	/* Get the MFW MB address and number of supported messages */
208 	mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
209 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
210 				  PUBLIC_MFW_MB));
211 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 	p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
213 					      p_info->mfw_mb_addr);
214 
215 	/* @@@TBD:
216 	 * The driver can notify that there was an MCP reset, and read the SHMEM
217 	 * values before the MFW has completed initializing them.
218 	 * As a temporary solution, the "sup_msgs" field is used as a data ready
219 	 * indication.
220 	 * This should be replaced with an actual indication when it is provided
221 	 * by the MFW.
222 	 */
223 	while (!p_info->mfw_mb_length && cnt--) {
224 		OSAL_MSLEEP(msec);
225 		p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
226 						      p_info->mfw_mb_addr);
227 	}
228 
229 	if (!cnt) {
230 		DP_NOTICE(p_hwfn, false,
231 			  "Failed to get the SHMEM ready notification after %d msec\n",
232 			  ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
233 		return ECORE_TIMEOUT;
234 	}
235 
236 	/* Calculate the driver and MFW mailbox address */
237 	drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
238 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
239 						       PUBLIC_DRV_MB));
240 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
241 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
242 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
243 		   " mcp_pf_id = 0x%x\n",
244 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
245 
246 	/* Get the current driver mailbox sequence before sending
247 	 * the first command
248 	 */
249 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
250 	    DRV_MSG_SEQ_NUMBER_MASK;
251 
252 	/* Get current FW pulse sequence */
253 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
254 	    DRV_PULSE_SEQ_MASK;
255 
256 	p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
257 
258 	return ECORE_SUCCESS;
259 }
260 
261 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
262 					struct ecore_ptt *p_ptt)
263 {
264 	struct ecore_mcp_info *p_info;
265 	u32 size;
266 
267 	/* Allocate mcp_info structure */
268 	p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
269 			sizeof(*p_hwfn->mcp_info));
270 	if (!p_hwfn->mcp_info) {
271 		DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
272 		return ECORE_NOMEM;
273 	}
274 	p_info = p_hwfn->mcp_info;
275 
276 	/* Initialize the MFW spinlocks */
277 #ifdef CONFIG_ECORE_LOCK_ALLOC
278 	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
279 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
280 		return ECORE_NOMEM;
281 	}
282 	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
283 		OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
284 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
285 		return ECORE_NOMEM;
286 	}
287 #endif
288 	OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
289 	OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
290 
291 	OSAL_LIST_INIT(&p_info->cmd_list);
292 
293 	if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
294 		DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
295 		/* Do not free mcp_info here, since public_base indicate that
296 		 * the MCP is not initialized
297 		 */
298 		return ECORE_SUCCESS;
299 	}
300 
301 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
302 	p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
303 	p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
304 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
305 		goto err;
306 
307 	return ECORE_SUCCESS;
308 
309 err:
310 	DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
311 	ecore_mcp_free(p_hwfn);
312 	return ECORE_NOMEM;
313 }
314 
315 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
316 				     struct ecore_ptt *p_ptt)
317 {
318 	u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
319 
320 	/* Use MCP history register to check if MCP reset occurred between init
321 	 * time and now.
322 	 */
323 	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
324 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
325 			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
326 			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
327 
328 		ecore_load_mcp_offsets(p_hwfn, p_ptt);
329 		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
330 	}
331 }
332 
333 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
334 				     struct ecore_ptt *p_ptt)
335 {
336 	u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
337 	enum _ecore_status_t rc = ECORE_SUCCESS;
338 
339 #ifndef ASIC_ONLY
340 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
341 		delay = EMUL_MCP_RESP_ITER_US;
342 #endif
343 
344 	if (p_hwfn->mcp_info->b_block_cmd) {
345 		DP_NOTICE(p_hwfn, false,
346 			  "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
347 		return ECORE_ABORTED;
348 	}
349 
350 	/* Ensure that only a single thread is accessing the mailbox */
351 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
352 
353 	org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
354 
355 	/* Set drv command along with the updated sequence */
356 	ecore_mcp_reread_offsets(p_hwfn, p_ptt);
357 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
358 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
359 
360 	do {
361 		/* Wait for MFW response */
362 		OSAL_UDELAY(delay);
363 		/* Give the FW up to 500 second (50*1000*10usec) */
364 	} while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
365 						MISCS_REG_GENERIC_POR_0)) &&
366 		 (cnt++ < ECORE_MCP_RESET_RETRIES));
367 
368 	if (org_mcp_reset_seq !=
369 	    ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
370 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
371 			   "MCP was reset after %d usec\n", cnt * delay);
372 	} else {
373 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
374 		rc = ECORE_AGAIN;
375 	}
376 
377 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
378 
379 	return rc;
380 }
381 
382 /* Must be called while cmd_lock is acquired */
383 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
384 {
385 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
386 
387 	/* There is at most one pending command at a certain time, and if it
388 	 * exists - it is placed at the HEAD of the list.
389 	 */
390 	if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
391 		p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
392 						   struct ecore_mcp_cmd_elem,
393 						   list);
394 		return !p_cmd_elem->b_is_completed;
395 	}
396 
397 	return false;
398 }
399 
400 /* Must be called while cmd_lock is acquired */
401 static enum _ecore_status_t
402 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
403 {
404 	struct ecore_mcp_mb_params *p_mb_params;
405 	struct ecore_mcp_cmd_elem *p_cmd_elem;
406 	u32 mcp_resp;
407 	u16 seq_num;
408 
409 	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
410 	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
411 
412 	/* Return if no new non-handled response has been received */
413 	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
414 		return ECORE_AGAIN;
415 
416 	p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
417 	if (!p_cmd_elem) {
418 		DP_ERR(p_hwfn,
419 		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
420 		       seq_num);
421 		return ECORE_UNKNOWN_ERROR;
422 	}
423 
424 	p_mb_params = p_cmd_elem->p_mb_params;
425 
426 	/* Get the MFW response along with the sequence number */
427 	p_mb_params->mcp_resp = mcp_resp;
428 
429 	/* Get the MFW param */
430 	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
431 
432 	/* Get the union data */
433 	if (p_mb_params->p_data_dst != OSAL_NULL &&
434 	    p_mb_params->data_dst_size) {
435 		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
436 				      OFFSETOF(struct public_drv_mb,
437 					       union_data);
438 		ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
439 				  union_data_addr, p_mb_params->data_dst_size);
440 	}
441 
442 	p_cmd_elem->b_is_completed = true;
443 
444 	return ECORE_SUCCESS;
445 }
446 
447 /* Must be called while cmd_lock is acquired */
448 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
449 				      struct ecore_ptt *p_ptt,
450 				      struct ecore_mcp_mb_params *p_mb_params,
451 				      u16 seq_num)
452 {
453 	union drv_union_data union_data;
454 	u32 union_data_addr;
455 
456 	/* Set the union data */
457 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
458 			  OFFSETOF(struct public_drv_mb, union_data);
459 	OSAL_MEM_ZERO(&union_data, sizeof(union_data));
460 	if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
461 		OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
462 			    p_mb_params->data_src_size);
463 	ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
464 			sizeof(union_data));
465 
466 	/* Set the drv param */
467 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
468 
469 	/* Set the drv command along with the sequence number */
470 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
471 
472 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
473 		   "MFW mailbox: command 0x%08x param 0x%08x\n",
474 		   (p_mb_params->cmd | seq_num), p_mb_params->param);
475 }
476 
477 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
478 				       bool block_cmd)
479 {
480 	p_hwfn->mcp_info->b_block_cmd = block_cmd;
481 
482 	DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
483 		block_cmd ? "Block" : "Unblock");
484 }
485 
486 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
487 			      struct ecore_ptt *p_ptt)
488 {
489 	u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
490 
491 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
492 	cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
493 	cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
494 	OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
495 	cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
496 	OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
497 	cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
498 
499 	DP_NOTICE(p_hwfn, false,
500 		  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
501 		  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
502 }
503 
504 static enum _ecore_status_t
505 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
506 			 struct ecore_mcp_mb_params *p_mb_params,
507 			 u32 max_retries, u32 delay)
508 {
509 	struct ecore_mcp_cmd_elem *p_cmd_elem;
510 	u32 cnt = 0;
511 	u16 seq_num;
512 	enum _ecore_status_t rc = ECORE_SUCCESS;
513 
514 	/* Wait until the mailbox is non-occupied */
515 	do {
516 		/* Exit the loop if there is no pending command, or if the
517 		 * pending command is completed during this iteration.
518 		 * The spinlock stays locked until the command is sent.
519 		 */
520 
521 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
522 
523 		if (!ecore_mcp_has_pending_cmd(p_hwfn))
524 			break;
525 
526 		rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
527 		if (rc == ECORE_SUCCESS)
528 			break;
529 		else if (rc != ECORE_AGAIN)
530 			goto err;
531 
532 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
533 		OSAL_UDELAY(delay);
534 		OSAL_MFW_CMD_PREEMPT(p_hwfn);
535 	} while (++cnt < max_retries);
536 
537 	if (cnt >= max_retries) {
538 		DP_NOTICE(p_hwfn, false,
539 			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
540 			  p_mb_params->cmd, p_mb_params->param);
541 		return ECORE_AGAIN;
542 	}
543 
544 	/* Send the mailbox command */
545 	ecore_mcp_reread_offsets(p_hwfn, p_ptt);
546 	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
547 	p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
548 	if (!p_cmd_elem) {
549 		rc = ECORE_NOMEM;
550 		goto err;
551 	}
552 
553 	__ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
554 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
555 
556 	/* Wait for the MFW response */
557 	do {
558 		/* Exit the loop if the command is already completed, or if the
559 		 * command is completed during this iteration.
560 		 * The spinlock stays locked until the list element is removed.
561 		 */
562 
563 		OSAL_UDELAY(delay);
564 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
565 
566 		if (p_cmd_elem->b_is_completed)
567 			break;
568 
569 		rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
570 		if (rc == ECORE_SUCCESS)
571 			break;
572 		else if (rc != ECORE_AGAIN)
573 			goto err;
574 
575 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
576 		OSAL_MFW_CMD_PREEMPT(p_hwfn);
577 	} while (++cnt < max_retries);
578 
579 	if (cnt >= max_retries) {
580 		DP_NOTICE(p_hwfn, false,
581 			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
582 			  p_mb_params->cmd, p_mb_params->param);
583 		ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
584 
585 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
586 		ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
587 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
588 
589 		ecore_mcp_cmd_set_blocking(p_hwfn, true);
590 		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
591 		return ECORE_AGAIN;
592 	}
593 
594 	ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
595 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
596 
597 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
598 		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
599 		   p_mb_params->mcp_resp, p_mb_params->mcp_param,
600 		   (cnt * delay) / 1000, (cnt * delay) % 1000);
601 
602 	/* Clear the sequence number from the MFW response */
603 	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
604 
605 	return ECORE_SUCCESS;
606 
607 err:
608 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
609 	return rc;
610 }
611 
612 static enum _ecore_status_t
613 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
614 			struct ecore_ptt *p_ptt,
615 			struct ecore_mcp_mb_params *p_mb_params)
616 {
617 	osal_size_t union_data_size = sizeof(union drv_union_data);
618 	u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
619 	u32 delay = CHIP_MCP_RESP_ITER_US;
620 
621 #ifndef ASIC_ONLY
622 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
623 		delay = EMUL_MCP_RESP_ITER_US;
624 	/* There is a built-in delay of 100usec in each MFW response read */
625 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
626 		max_retries /= 10;
627 #endif
628 
629 	/* MCP not initialized */
630 	if (!ecore_mcp_is_init(p_hwfn)) {
631 		DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
632 		return ECORE_BUSY;
633 	}
634 
635 	if (p_mb_params->data_src_size > union_data_size ||
636 	    p_mb_params->data_dst_size > union_data_size) {
637 		DP_ERR(p_hwfn,
638 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
639 		       p_mb_params->data_src_size, p_mb_params->data_dst_size,
640 		       union_data_size);
641 		return ECORE_INVAL;
642 	}
643 
644 	if (p_hwfn->mcp_info->b_block_cmd) {
645 		DP_NOTICE(p_hwfn, false,
646 			  "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
647 			  p_mb_params->cmd, p_mb_params->param);
648 		return ECORE_ABORTED;
649 	}
650 
651 	return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
652 					delay);
653 }
654 
655 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
656 				   struct ecore_ptt *p_ptt, u32 cmd, u32 param,
657 				   u32 *o_mcp_resp, u32 *o_mcp_param)
658 {
659 	struct ecore_mcp_mb_params mb_params;
660 	enum _ecore_status_t rc;
661 
662 #ifndef ASIC_ONLY
663 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
664 		if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
665 			loaded--;
666 			loaded_port[p_hwfn->port_id]--;
667 			DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
668 				   loaded);
669 		}
670 		return ECORE_SUCCESS;
671 	}
672 #endif
673 
674 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
675 	mb_params.cmd = cmd;
676 	mb_params.param = param;
677 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
678 	if (rc != ECORE_SUCCESS)
679 		return rc;
680 
681 	*o_mcp_resp = mb_params.mcp_resp;
682 	*o_mcp_param = mb_params.mcp_param;
683 
684 	return ECORE_SUCCESS;
685 }
686 
687 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
688 					  struct ecore_ptt *p_ptt,
689 					  u32 cmd,
690 					  u32 param,
691 					  u32 *o_mcp_resp,
692 					  u32 *o_mcp_param,
693 					  u32 i_txn_size, u32 *i_buf)
694 {
695 	struct ecore_mcp_mb_params mb_params;
696 	enum _ecore_status_t rc;
697 
698 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
699 	mb_params.cmd = cmd;
700 	mb_params.param = param;
701 	mb_params.p_data_src = i_buf;
702 	mb_params.data_src_size = (u8)i_txn_size;
703 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
704 	if (rc != ECORE_SUCCESS)
705 		return rc;
706 
707 	*o_mcp_resp = mb_params.mcp_resp;
708 	*o_mcp_param = mb_params.mcp_param;
709 
710 	return ECORE_SUCCESS;
711 }
712 
713 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
714 					  struct ecore_ptt *p_ptt,
715 					  u32 cmd,
716 					  u32 param,
717 					  u32 *o_mcp_resp,
718 					  u32 *o_mcp_param,
719 					  u32 *o_txn_size, u32 *o_buf)
720 {
721 	struct ecore_mcp_mb_params mb_params;
722 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
723 	enum _ecore_status_t rc;
724 
725 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
726 	mb_params.cmd = cmd;
727 	mb_params.param = param;
728 	mb_params.p_data_dst = raw_data;
729 
730 	/* Use the maximal value since the actual one is part of the response */
731 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
732 
733 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
734 	if (rc != ECORE_SUCCESS)
735 		return rc;
736 
737 	*o_mcp_resp = mb_params.mcp_resp;
738 	*o_mcp_param = mb_params.mcp_param;
739 
740 	*o_txn_size = *o_mcp_param;
741 	/* @DPDK */
742 	OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
743 
744 	return ECORE_SUCCESS;
745 }
746 
747 #ifndef ASIC_ONLY
748 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
749 				    u32 *p_load_code)
750 {
751 	static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
752 
753 	if (!loaded)
754 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
755 	else if (!loaded_port[p_hwfn->port_id])
756 		load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
757 	else
758 		load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
759 
760 	/* On CMT, always tell that it's engine */
761 	if (ECORE_IS_CMT(p_hwfn->p_dev))
762 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
763 
764 	*p_load_code = load_phase;
765 	loaded++;
766 	loaded_port[p_hwfn->port_id]++;
767 
768 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
769 		   "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
770 		   *p_load_code, loaded, p_hwfn->port_id,
771 		   loaded_port[p_hwfn->port_id]);
772 }
773 #endif
774 
775 static bool
776 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
777 			 enum ecore_override_force_load override_force_load)
778 {
779 	bool can_force_load = false;
780 
781 	switch (override_force_load) {
782 	case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
783 		can_force_load = true;
784 		break;
785 	case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
786 		can_force_load = false;
787 		break;
788 	default:
789 		can_force_load = (drv_role == DRV_ROLE_OS &&
790 				  exist_drv_role == DRV_ROLE_PREBOOT) ||
791 				 (drv_role == DRV_ROLE_KDUMP &&
792 				  exist_drv_role == DRV_ROLE_OS);
793 		break;
794 	}
795 
796 	return can_force_load;
797 }
798 
799 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
800 						      struct ecore_ptt *p_ptt)
801 {
802 	u32 resp = 0, param = 0;
803 	enum _ecore_status_t rc;
804 
805 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
806 			   &resp, &param);
807 	if (rc != ECORE_SUCCESS)
808 		DP_NOTICE(p_hwfn, false,
809 			  "Failed to send cancel load request, rc = %d\n", rc);
810 
811 	return rc;
812 }
813 
814 #define CONFIG_ECORE_L2_BITMAP_IDX	(0x1 << 0)
815 #define CONFIG_ECORE_SRIOV_BITMAP_IDX	(0x1 << 1)
816 #define CONFIG_ECORE_ROCE_BITMAP_IDX	(0x1 << 2)
817 #define CONFIG_ECORE_IWARP_BITMAP_IDX	(0x1 << 3)
818 #define CONFIG_ECORE_FCOE_BITMAP_IDX	(0x1 << 4)
819 #define CONFIG_ECORE_ISCSI_BITMAP_IDX	(0x1 << 5)
820 #define CONFIG_ECORE_LL2_BITMAP_IDX	(0x1 << 6)
821 
822 static u32 ecore_get_config_bitmap(void)
823 {
824 	u32 config_bitmap = 0x0;
825 
826 #ifdef CONFIG_ECORE_L2
827 	config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
828 #endif
829 #ifdef CONFIG_ECORE_SRIOV
830 	config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
831 #endif
832 #ifdef CONFIG_ECORE_ROCE
833 	config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
834 #endif
835 #ifdef CONFIG_ECORE_IWARP
836 	config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
837 #endif
838 #ifdef CONFIG_ECORE_FCOE
839 	config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
840 #endif
841 #ifdef CONFIG_ECORE_ISCSI
842 	config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
843 #endif
844 #ifdef CONFIG_ECORE_LL2
845 	config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
846 #endif
847 
848 	return config_bitmap;
849 }
850 
851 struct ecore_load_req_in_params {
852 	u8 hsi_ver;
853 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT	0
854 #define ECORE_LOAD_REQ_HSI_VER_1	1
855 	u32 drv_ver_0;
856 	u32 drv_ver_1;
857 	u32 fw_ver;
858 	u8 drv_role;
859 	u8 timeout_val;
860 	u8 force_cmd;
861 	bool avoid_eng_reset;
862 };
863 
864 struct ecore_load_req_out_params {
865 	u32 load_code;
866 	u32 exist_drv_ver_0;
867 	u32 exist_drv_ver_1;
868 	u32 exist_fw_ver;
869 	u8 exist_drv_role;
870 	u8 mfw_hsi_ver;
871 	bool drv_exists;
872 };
873 
874 static enum _ecore_status_t
875 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
876 		     struct ecore_load_req_in_params *p_in_params,
877 		     struct ecore_load_req_out_params *p_out_params)
878 {
879 	struct ecore_mcp_mb_params mb_params;
880 	struct load_req_stc load_req;
881 	struct load_rsp_stc load_rsp;
882 	u32 hsi_ver;
883 	enum _ecore_status_t rc;
884 
885 	OSAL_MEM_ZERO(&load_req, sizeof(load_req));
886 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
887 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
888 	load_req.fw_ver = p_in_params->fw_ver;
889 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
890 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
891 		      p_in_params->timeout_val);
892 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
893 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
894 		      p_in_params->avoid_eng_reset);
895 
896 	hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
897 		  DRV_ID_MCP_HSI_VER_CURRENT :
898 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
899 
900 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
901 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
902 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
903 	mb_params.p_data_src = &load_req;
904 	mb_params.data_src_size = sizeof(load_req);
905 	mb_params.p_data_dst = &load_rsp;
906 	mb_params.data_dst_size = sizeof(load_rsp);
907 
908 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
909 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
910 		   mb_params.param,
911 		   GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
912 		   GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
913 		   GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
914 		   GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
915 
916 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
917 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
918 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
919 			   load_req.drv_ver_0, load_req.drv_ver_1,
920 			   load_req.fw_ver, load_req.misc0,
921 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
922 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
923 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
924 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
925 
926 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
927 	if (rc != ECORE_SUCCESS) {
928 		DP_NOTICE(p_hwfn, false,
929 			  "Failed to send load request, rc = %d\n", rc);
930 		return rc;
931 	}
932 
933 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
934 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
935 	p_out_params->load_code = mb_params.mcp_resp;
936 
937 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
938 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
939 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
940 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
941 			   load_rsp.drv_ver_0, load_rsp.drv_ver_1,
942 			   load_rsp.fw_ver, load_rsp.misc0,
943 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
944 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
945 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
946 
947 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
948 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
949 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
950 		p_out_params->exist_drv_role =
951 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
952 		p_out_params->mfw_hsi_ver =
953 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
954 		p_out_params->drv_exists =
955 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
956 			LOAD_RSP_FLAGS0_DRV_EXISTS;
957 	}
958 
959 	return ECORE_SUCCESS;
960 }
961 
962 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
963 				   u8 *p_mfw_drv_role)
964 {
965 	switch (drv_role) {
966 	case ECORE_DRV_ROLE_OS:
967 		*p_mfw_drv_role = DRV_ROLE_OS;
968 		break;
969 	case ECORE_DRV_ROLE_KDUMP:
970 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
971 		break;
972 	}
973 }
974 
975 enum ecore_load_req_force {
976 	ECORE_LOAD_REQ_FORCE_NONE,
977 	ECORE_LOAD_REQ_FORCE_PF,
978 	ECORE_LOAD_REQ_FORCE_ALL,
979 };
980 
981 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
982 				    u8 *p_mfw_force_cmd)
983 {
984 	switch (force_cmd) {
985 	case ECORE_LOAD_REQ_FORCE_NONE:
986 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
987 		break;
988 	case ECORE_LOAD_REQ_FORCE_PF:
989 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
990 		break;
991 	case ECORE_LOAD_REQ_FORCE_ALL:
992 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
993 		break;
994 	}
995 }
996 
997 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
998 					struct ecore_ptt *p_ptt,
999 					struct ecore_load_req_params *p_params)
1000 {
1001 	struct ecore_load_req_out_params out_params;
1002 	struct ecore_load_req_in_params in_params;
1003 	u8 mfw_drv_role = 0, mfw_force_cmd;
1004 	enum _ecore_status_t rc;
1005 
1006 #ifndef ASIC_ONLY
1007 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1008 		ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
1009 		return ECORE_SUCCESS;
1010 	}
1011 #endif
1012 
1013 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1014 	in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1015 	in_params.drv_ver_0 = ECORE_VERSION;
1016 	in_params.drv_ver_1 = ecore_get_config_bitmap();
1017 	in_params.fw_ver = STORM_FW_VERSION;
1018 	ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1019 	in_params.drv_role = mfw_drv_role;
1020 	in_params.timeout_val = p_params->timeout_val;
1021 	ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1022 	in_params.force_cmd = mfw_force_cmd;
1023 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1024 
1025 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1026 	rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1027 	if (rc != ECORE_SUCCESS)
1028 		return rc;
1029 
1030 	/* First handle cases where another load request should/might be sent:
1031 	 * - MFW expects the old interface [HSI version = 1]
1032 	 * - MFW responds that a force load request is required
1033 	 */
1034 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1035 		DP_INFO(p_hwfn,
1036 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1037 
1038 		in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1039 		OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1040 		rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1041 					  &out_params);
1042 		if (rc != ECORE_SUCCESS)
1043 			return rc;
1044 	} else if (out_params.load_code ==
1045 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1046 		if (ecore_mcp_can_force_load(in_params.drv_role,
1047 					     out_params.exist_drv_role,
1048 					     p_params->override_force_load)) {
1049 			DP_INFO(p_hwfn,
1050 				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1051 				in_params.drv_role, in_params.fw_ver,
1052 				in_params.drv_ver_0, in_params.drv_ver_1,
1053 				out_params.exist_drv_role,
1054 				out_params.exist_fw_ver,
1055 				out_params.exist_drv_ver_0,
1056 				out_params.exist_drv_ver_1);
1057 
1058 			ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1059 						&mfw_force_cmd);
1060 
1061 			in_params.force_cmd = mfw_force_cmd;
1062 			OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1063 			rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1064 						  &out_params);
1065 			if (rc != ECORE_SUCCESS)
1066 				return rc;
1067 		} else {
1068 			DP_NOTICE(p_hwfn, false,
1069 				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1070 				  in_params.drv_role, in_params.fw_ver,
1071 				  in_params.drv_ver_0, in_params.drv_ver_1,
1072 				  out_params.exist_drv_role,
1073 				  out_params.exist_fw_ver,
1074 				  out_params.exist_drv_ver_0,
1075 				  out_params.exist_drv_ver_1);
1076 
1077 			ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1078 			return ECORE_BUSY;
1079 		}
1080 	}
1081 
1082 	/* Now handle the other types of responses.
1083 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1084 	 * expected here after the additional revised load requests were sent.
1085 	 */
1086 	switch (out_params.load_code) {
1087 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
1088 	case FW_MSG_CODE_DRV_LOAD_PORT:
1089 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1090 		if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1091 		    out_params.drv_exists) {
1092 			/* The role and fw/driver version match, but the PF is
1093 			 * already loaded and has not been unloaded gracefully.
1094 			 * This is unexpected since a quasi-FLR request was
1095 			 * previously sent as part of ecore_hw_prepare().
1096 			 */
1097 			DP_NOTICE(p_hwfn, false,
1098 				  "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1099 			return ECORE_INVAL;
1100 		}
1101 		break;
1102 	default:
1103 		DP_NOTICE(p_hwfn, false,
1104 			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1105 			  out_params.load_code);
1106 		return ECORE_BUSY;
1107 	}
1108 
1109 	p_params->load_code = out_params.load_code;
1110 
1111 	return ECORE_SUCCESS;
1112 }
1113 
1114 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1115 					 struct ecore_ptt *p_ptt)
1116 {
1117 	u32 resp = 0, param = 0;
1118 	enum _ecore_status_t rc;
1119 
1120 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1121 			   &param);
1122 	if (rc != ECORE_SUCCESS) {
1123 		DP_NOTICE(p_hwfn, false,
1124 			  "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1125 		return rc;
1126 	}
1127 
1128 	/* Check if there is a DID mismatch between nvm-cfg/efuse */
1129 	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1130 		DP_NOTICE(p_hwfn, false,
1131 			  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1132 
1133 	return ECORE_SUCCESS;
1134 }
1135 
1136 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1137 					  struct ecore_ptt *p_ptt)
1138 {
1139 	u32 wol_param, mcp_resp, mcp_param;
1140 
1141 	/* @DPDK */
1142 	wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1143 
1144 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1145 			     &mcp_resp, &mcp_param);
1146 }
1147 
1148 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1149 					   struct ecore_ptt *p_ptt)
1150 {
1151 	struct ecore_mcp_mb_params mb_params;
1152 	struct mcp_mac wol_mac;
1153 
1154 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1155 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1156 
1157 	return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1158 }
1159 
1160 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1161 				    struct ecore_ptt *p_ptt)
1162 {
1163 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1164 					PUBLIC_PATH);
1165 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1166 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1167 				     ECORE_PATH_ID(p_hwfn));
1168 	u32 disabled_vfs[VF_MAX_STATIC / 32];
1169 	int i;
1170 
1171 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1172 		   "Reading Disabled VF information from [offset %08x],"
1173 		   " path_addr %08x\n",
1174 		   mfw_path_offsize, path_addr);
1175 
1176 	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1177 		disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1178 					   path_addr +
1179 					   OFFSETOF(struct public_path,
1180 						    mcp_vf_disabled) +
1181 					   sizeof(u32) * i);
1182 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1183 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1184 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1185 	}
1186 
1187 	if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1188 		OSAL_VF_FLR_UPDATE(p_hwfn);
1189 }
1190 
1191 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1192 					  struct ecore_ptt *p_ptt,
1193 					  u32 *vfs_to_ack)
1194 {
1195 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1196 					PUBLIC_FUNC);
1197 	u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1198 	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1199 				     MCP_PF_ID(p_hwfn));
1200 	struct ecore_mcp_mb_params mb_params;
1201 	enum _ecore_status_t rc;
1202 	int i;
1203 
1204 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1205 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1206 			   "Acking VFs [%08x,...,%08x] - %08x\n",
1207 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1208 
1209 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1210 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1211 	mb_params.p_data_src = vfs_to_ack;
1212 	mb_params.data_src_size = VF_MAX_STATIC / 8;
1213 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1214 				     &mb_params);
1215 	if (rc != ECORE_SUCCESS) {
1216 		DP_NOTICE(p_hwfn, false,
1217 			  "Failed to pass ACK for VF flr to MFW\n");
1218 		return ECORE_TIMEOUT;
1219 	}
1220 
1221 	/* TMP - clear the ACK bits; should be done by MFW */
1222 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1223 		ecore_wr(p_hwfn, p_ptt,
1224 			 func_addr +
1225 			 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1226 			 i * sizeof(u32), 0);
1227 
1228 	return rc;
1229 }
1230 
1231 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1232 						struct ecore_ptt *p_ptt)
1233 {
1234 	u32 transceiver_state;
1235 
1236 	transceiver_state = ecore_rd(p_hwfn, p_ptt,
1237 				     p_hwfn->mcp_info->port_addr +
1238 				     OFFSETOF(struct public_port,
1239 					      transceiver_data));
1240 
1241 	DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1242 		   "Received transceiver state update [0x%08x] from mfw"
1243 		   " [Addr 0x%x]\n",
1244 		   transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1245 					    OFFSETOF(struct public_port,
1246 						     transceiver_data)));
1247 
1248 	transceiver_state = GET_MFW_FIELD(transceiver_state,
1249 					  ETH_TRANSCEIVER_STATE);
1250 
1251 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1252 		DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1253 	else
1254 		DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1255 
1256 	OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1257 }
1258 
1259 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1260 				      struct ecore_ptt *p_ptt,
1261 				      struct ecore_mcp_link_state *p_link)
1262 {
1263 	u32 eee_status, val;
1264 
1265 	p_link->eee_adv_caps = 0;
1266 	p_link->eee_lp_adv_caps = 0;
1267 	eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1268 				     OFFSETOF(struct public_port, eee_status));
1269 	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1270 	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1271 	if (val & EEE_1G_ADV)
1272 		p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1273 	if (val & EEE_10G_ADV)
1274 		p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1275 	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1276 	if (val & EEE_1G_ADV)
1277 		p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1278 	if (val & EEE_10G_ADV)
1279 		p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1280 }
1281 
1282 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1283 				    struct ecore_ptt *p_ptt,
1284 				    struct public_func *p_data,
1285 				    int pfid)
1286 {
1287 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1288 					PUBLIC_FUNC);
1289 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1290 	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1291 	u32 i, size;
1292 
1293 	OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1294 
1295 	size = OSAL_MIN_T(u32, sizeof(*p_data),
1296 			  SECTION_SIZE(mfw_path_offsize));
1297 	for (i = 0; i < size / sizeof(u32); i++)
1298 		((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1299 					      func_addr + (i << 2));
1300 
1301 	return size;
1302 }
1303 
1304 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1305 					 struct ecore_ptt *p_ptt,
1306 					 bool b_reset)
1307 {
1308 	struct ecore_mcp_link_state *p_link;
1309 	u8 max_bw, min_bw;
1310 	u32 status = 0;
1311 
1312 	/* Prevent SW/attentions from doing this at the same time */
1313 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1314 
1315 	p_link = &p_hwfn->mcp_info->link_output;
1316 	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1317 	if (!b_reset) {
1318 		status = ecore_rd(p_hwfn, p_ptt,
1319 				  p_hwfn->mcp_info->port_addr +
1320 				  OFFSETOF(struct public_port, link_status));
1321 		DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1322 			   "Received link update [0x%08x] from mfw"
1323 			   " [Addr 0x%x]\n",
1324 			   status, (u32)(p_hwfn->mcp_info->port_addr +
1325 					  OFFSETOF(struct public_port,
1326 						   link_status)));
1327 	} else {
1328 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1329 			   "Resetting link indications\n");
1330 		goto out;
1331 	}
1332 
1333 	if (p_hwfn->b_drv_link_init) {
1334 		/* Link indication with modern MFW arrives as per-PF
1335 		 * indication.
1336 		 */
1337 		if (p_hwfn->mcp_info->capabilities &
1338 		    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1339 			struct public_func shmem_info;
1340 
1341 			ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1342 						 MCP_PF_ID(p_hwfn));
1343 			p_link->link_up = !!(shmem_info.status &
1344 					     FUNC_STATUS_VIRTUAL_LINK_UP);
1345 		} else {
1346 			p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1347 		}
1348 	} else {
1349 		p_link->link_up = false;
1350 	}
1351 
1352 	p_link->full_duplex = true;
1353 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1354 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1355 		p_link->speed = 100000;
1356 		break;
1357 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1358 		p_link->speed = 50000;
1359 		break;
1360 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1361 		p_link->speed = 40000;
1362 		break;
1363 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1364 		p_link->speed = 25000;
1365 		break;
1366 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1367 		p_link->speed = 20000;
1368 		break;
1369 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1370 		p_link->speed = 10000;
1371 		break;
1372 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1373 		p_link->full_duplex = false;
1374 		/* Fall-through */
1375 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1376 		p_link->speed = 1000;
1377 		break;
1378 	default:
1379 		p_link->speed = 0;
1380 	}
1381 
1382 	/* We never store total line speed as p_link->speed is
1383 	 * again changes according to bandwidth allocation.
1384 	 */
1385 	if (p_link->link_up && p_link->speed)
1386 		p_link->line_speed = p_link->speed;
1387 	else
1388 		p_link->line_speed = 0;
1389 
1390 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1391 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1392 
1393 	/* Max bandwidth configuration */
1394 	__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1395 					   p_link, max_bw);
1396 
1397 	/* Min bandwidth configuration */
1398 	__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1399 					   p_link, min_bw);
1400 	ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1401 					      p_link->min_pf_rate);
1402 
1403 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1404 	p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1405 	p_link->parallel_detection = !!(status &
1406 					 LINK_STATUS_PARALLEL_DETECTION_USED);
1407 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1408 
1409 	p_link->partner_adv_speed |=
1410 	    (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1411 	    ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1412 	p_link->partner_adv_speed |=
1413 	    (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1414 	    ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1415 	p_link->partner_adv_speed |=
1416 	    (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1417 	    ECORE_LINK_PARTNER_SPEED_10G : 0;
1418 	p_link->partner_adv_speed |=
1419 	    (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1420 	    ECORE_LINK_PARTNER_SPEED_20G : 0;
1421 	p_link->partner_adv_speed |=
1422 	    (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1423 	    ECORE_LINK_PARTNER_SPEED_25G : 0;
1424 	p_link->partner_adv_speed |=
1425 	    (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1426 	    ECORE_LINK_PARTNER_SPEED_40G : 0;
1427 	p_link->partner_adv_speed |=
1428 	    (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1429 	    ECORE_LINK_PARTNER_SPEED_50G : 0;
1430 	p_link->partner_adv_speed |=
1431 	    (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1432 	    ECORE_LINK_PARTNER_SPEED_100G : 0;
1433 
1434 	p_link->partner_tx_flow_ctrl_en =
1435 	    !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1436 	p_link->partner_rx_flow_ctrl_en =
1437 	    !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1438 
1439 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1440 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1441 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1442 		break;
1443 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1444 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1445 		break;
1446 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1447 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1448 		break;
1449 	default:
1450 		p_link->partner_adv_pause = 0;
1451 	}
1452 
1453 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1454 
1455 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1456 		ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1457 
1458 	OSAL_LINK_UPDATE(p_hwfn);
1459 out:
1460 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1461 }
1462 
1463 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1464 					struct ecore_ptt *p_ptt, bool b_up)
1465 {
1466 	struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1467 	struct ecore_mcp_mb_params mb_params;
1468 	struct eth_phy_cfg phy_cfg;
1469 	enum _ecore_status_t rc = ECORE_SUCCESS;
1470 	u32 cmd;
1471 
1472 #ifndef ASIC_ONLY
1473 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1474 		return ECORE_SUCCESS;
1475 #endif
1476 
1477 	/* Set the shmem configuration according to params */
1478 	OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1479 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1480 	if (!params->speed.autoneg)
1481 		phy_cfg.speed = params->speed.forced_speed;
1482 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1483 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1484 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1485 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1486 	phy_cfg.loopback_mode = params->loopback_mode;
1487 
1488 	/* There are MFWs that share this capability regardless of whether
1489 	 * this is feasible or not. And given that at the very least adv_caps
1490 	 * would be set internally by ecore, we want to make sure LFA would
1491 	 * still work.
1492 	 */
1493 	if ((p_hwfn->mcp_info->capabilities &
1494 	     FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1495 	    params->eee.enable) {
1496 		phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1497 		if (params->eee.tx_lpi_enable)
1498 			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1499 		if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1500 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1501 		if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1502 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1503 		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1504 				    EEE_TX_TIMER_USEC_OFFSET) &
1505 					EEE_TX_TIMER_USEC_MASK;
1506 	}
1507 
1508 	p_hwfn->b_drv_link_init = b_up;
1509 
1510 	if (b_up)
1511 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1512 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1513 			   phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1514 			   phy_cfg.loopback_mode);
1515 	else
1516 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1517 
1518 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1519 	mb_params.cmd = cmd;
1520 	mb_params.p_data_src = &phy_cfg;
1521 	mb_params.data_src_size = sizeof(phy_cfg);
1522 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1523 
1524 	/* if mcp fails to respond we must abort */
1525 	if (rc != ECORE_SUCCESS) {
1526 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1527 		return rc;
1528 	}
1529 
1530 	/* Mimic link-change attention, done for several reasons:
1531 	 *  - On reset, there's no guarantee MFW would trigger
1532 	 *    an attention.
1533 	 *  - On initialization, older MFWs might not indicate link change
1534 	 *    during LFA, so we'll never get an UP indication.
1535 	 */
1536 	ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1537 
1538 	return ECORE_SUCCESS;
1539 }
1540 
1541 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1542 				   struct ecore_ptt *p_ptt)
1543 {
1544 	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1545 
1546 	/* TODO - Add support for VFs */
1547 	if (IS_VF(p_hwfn->p_dev))
1548 		return ECORE_INVAL;
1549 
1550 	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1551 						 PUBLIC_PATH);
1552 	path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1553 	path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1554 
1555 	proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1556 				 path_addr +
1557 				 OFFSETOF(struct public_path, process_kill)) &
1558 	    PROCESS_KILL_COUNTER_MASK;
1559 
1560 	return proc_kill_cnt;
1561 }
1562 
1563 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1564 					  struct ecore_ptt *p_ptt)
1565 {
1566 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1567 	u32 proc_kill_cnt;
1568 
1569 	/* Prevent possible attentions/interrupts during the recovery handling
1570 	 * and till its load phase, during which they will be re-enabled.
1571 	 */
1572 	ecore_int_igu_disable_int(p_hwfn, p_ptt);
1573 
1574 	DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1575 
1576 	/* The following operations should be done once, and thus in CMT mode
1577 	 * are carried out by only the first HW function.
1578 	 */
1579 	if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1580 		return;
1581 
1582 	if (p_dev->recov_in_prog) {
1583 		DP_NOTICE(p_hwfn, false,
1584 			  "Ignoring the indication since a recovery"
1585 			  " process is already in progress\n");
1586 		return;
1587 	}
1588 
1589 	p_dev->recov_in_prog = true;
1590 
1591 	proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1592 	DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1593 
1594 	OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1595 }
1596 
1597 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1598 					  struct ecore_ptt *p_ptt,
1599 					  enum MFW_DRV_MSG_TYPE type)
1600 {
1601 	enum ecore_mcp_protocol_type stats_type;
1602 	union ecore_mcp_protocol_stats stats;
1603 	struct ecore_mcp_mb_params mb_params;
1604 	u32 hsi_param;
1605 	enum _ecore_status_t rc;
1606 
1607 	switch (type) {
1608 	case MFW_DRV_MSG_GET_LAN_STATS:
1609 		stats_type = ECORE_MCP_LAN_STATS;
1610 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1611 		break;
1612 	default:
1613 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1614 			   "Invalid protocol type %d\n", type);
1615 		return;
1616 	}
1617 
1618 	OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1619 
1620 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1621 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1622 	mb_params.param = hsi_param;
1623 	mb_params.p_data_src = &stats;
1624 	mb_params.data_src_size = sizeof(stats);
1625 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1626 	if (rc != ECORE_SUCCESS)
1627 		DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1628 }
1629 
1630 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1631 				    struct public_func *p_shmem_info)
1632 {
1633 	struct ecore_mcp_function_info *p_info;
1634 
1635 	p_info = &p_hwfn->mcp_info->func_info;
1636 
1637 	/* TODO - bandwidth min/max should have valid values of 1-100,
1638 	 * as well as some indication that the feature is disabled.
1639 	 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1640 	 * limit and correct value to min `1' and max `100' if limit isn't in
1641 	 * range.
1642 	 */
1643 	p_info->bandwidth_min = (p_shmem_info->config &
1644 				 FUNC_MF_CFG_MIN_BW_MASK) >>
1645 	    FUNC_MF_CFG_MIN_BW_OFFSET;
1646 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1647 		DP_INFO(p_hwfn,
1648 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1649 			p_info->bandwidth_min);
1650 		p_info->bandwidth_min = 1;
1651 	}
1652 
1653 	p_info->bandwidth_max = (p_shmem_info->config &
1654 				 FUNC_MF_CFG_MAX_BW_MASK) >>
1655 	    FUNC_MF_CFG_MAX_BW_OFFSET;
1656 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1657 		DP_INFO(p_hwfn,
1658 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1659 			p_info->bandwidth_max);
1660 		p_info->bandwidth_max = 100;
1661 	}
1662 }
1663 
1664 static void
1665 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1666 {
1667 	struct ecore_mcp_function_info *p_info;
1668 	struct public_func shmem_info;
1669 	u32 resp = 0, param = 0;
1670 
1671 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1672 
1673 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1674 
1675 	p_info = &p_hwfn->mcp_info->func_info;
1676 
1677 	ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1678 
1679 	ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1680 
1681 	/* Acknowledge the MFW */
1682 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1683 		      &param);
1684 }
1685 
1686 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1687 				  struct ecore_ptt *p_ptt)
1688 {
1689 	struct public_func shmem_info;
1690 	u32 resp = 0, param = 0;
1691 
1692 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1693 				 MCP_PF_ID(p_hwfn));
1694 
1695 	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1696 						 FUNC_MF_CFG_OV_STAG_MASK;
1697 	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1698 	if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
1699 		if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
1700 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1701 				 p_hwfn->hw_info.ovlan);
1702 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1703 
1704 			/* Configure DB to add external vlan to EDPM packets */
1705 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1706 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1707 				 p_hwfn->hw_info.ovlan);
1708 		} else {
1709 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1710 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1711 
1712 			/* Configure DB to add external vlan to EDPM packets */
1713 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1714 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1715 		}
1716 
1717 		ecore_sp_pf_update_stag(p_hwfn);
1718 	}
1719 
1720 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan  = %d hw_mode = 0x%x\n",
1721 		   p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1722 	OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1723 
1724 	/* Acknowledge the MFW */
1725 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1726 		      &resp, &param);
1727 }
1728 
1729 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1730 {
1731 	/* A single notification should be sent to upper driver in CMT mode */
1732 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1733 		return;
1734 
1735 	DP_NOTICE(p_hwfn, false,
1736 		  "Fan failure was detected on the network interface card"
1737 		  " and it's going to be shut down.\n");
1738 
1739 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1740 }
1741 
1742 struct ecore_mdump_cmd_params {
1743 	u32 cmd;
1744 	void *p_data_src;
1745 	u8 data_src_size;
1746 	void *p_data_dst;
1747 	u8 data_dst_size;
1748 	u32 mcp_resp;
1749 };
1750 
1751 static enum _ecore_status_t
1752 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1753 		    struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1754 {
1755 	struct ecore_mcp_mb_params mb_params;
1756 	enum _ecore_status_t rc;
1757 
1758 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1759 	mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1760 	mb_params.param = p_mdump_cmd_params->cmd;
1761 	mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1762 	mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1763 	mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1764 	mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1765 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1766 	if (rc != ECORE_SUCCESS)
1767 		return rc;
1768 
1769 	p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1770 
1771 	if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1772 		DP_INFO(p_hwfn,
1773 			"The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1774 			p_mdump_cmd_params->cmd);
1775 		rc = ECORE_NOTIMPL;
1776 	} else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1777 		DP_INFO(p_hwfn,
1778 			"The mdump command is not supported by the MFW\n");
1779 		rc = ECORE_NOTIMPL;
1780 	}
1781 
1782 	return rc;
1783 }
1784 
1785 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1786 						struct ecore_ptt *p_ptt)
1787 {
1788 	struct ecore_mdump_cmd_params mdump_cmd_params;
1789 
1790 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1791 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1792 
1793 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1794 }
1795 
1796 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1797 						struct ecore_ptt *p_ptt,
1798 						u32 epoch)
1799 {
1800 	struct ecore_mdump_cmd_params mdump_cmd_params;
1801 
1802 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1803 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1804 	mdump_cmd_params.p_data_src = &epoch;
1805 	mdump_cmd_params.data_src_size = sizeof(epoch);
1806 
1807 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1808 }
1809 
1810 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1811 					     struct ecore_ptt *p_ptt)
1812 {
1813 	struct ecore_mdump_cmd_params mdump_cmd_params;
1814 
1815 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1816 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1817 
1818 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1819 }
1820 
1821 static enum _ecore_status_t
1822 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1823 			   struct mdump_config_stc *p_mdump_config)
1824 {
1825 	struct ecore_mdump_cmd_params mdump_cmd_params;
1826 	enum _ecore_status_t rc;
1827 
1828 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1829 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1830 	mdump_cmd_params.p_data_dst = p_mdump_config;
1831 	mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1832 
1833 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1834 	if (rc != ECORE_SUCCESS)
1835 		return rc;
1836 
1837 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1838 		DP_INFO(p_hwfn,
1839 			"Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1840 			mdump_cmd_params.mcp_resp);
1841 		rc = ECORE_UNKNOWN_ERROR;
1842 	}
1843 
1844 	return rc;
1845 }
1846 
1847 enum _ecore_status_t
1848 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1849 			 struct ecore_mdump_info *p_mdump_info)
1850 {
1851 	u32 addr, global_offsize, global_addr;
1852 	struct mdump_config_stc mdump_config;
1853 	enum _ecore_status_t rc;
1854 
1855 	OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1856 
1857 	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1858 				    PUBLIC_GLOBAL);
1859 	global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1860 	global_addr = SECTION_ADDR(global_offsize, 0);
1861 	p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1862 					global_addr +
1863 					OFFSETOF(struct public_global,
1864 						 mdump_reason));
1865 
1866 	if (p_mdump_info->reason) {
1867 		rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1868 		if (rc != ECORE_SUCCESS)
1869 			return rc;
1870 
1871 		p_mdump_info->version = mdump_config.version;
1872 		p_mdump_info->config = mdump_config.config;
1873 		p_mdump_info->epoch = mdump_config.epoc;
1874 		p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1875 		p_mdump_info->valid_logs = mdump_config.valid_logs;
1876 
1877 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1878 			   "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1879 			   p_mdump_info->reason, p_mdump_info->version,
1880 			   p_mdump_info->config, p_mdump_info->epoch,
1881 			   p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1882 	} else {
1883 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1884 			   "MFW mdump info: reason %d\n", p_mdump_info->reason);
1885 	}
1886 
1887 	return ECORE_SUCCESS;
1888 }
1889 
1890 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1891 						struct ecore_ptt *p_ptt)
1892 {
1893 	struct ecore_mdump_cmd_params mdump_cmd_params;
1894 
1895 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1896 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1897 
1898 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1899 }
1900 
1901 enum _ecore_status_t
1902 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1903 			   struct ecore_mdump_retain_data *p_mdump_retain)
1904 {
1905 	struct ecore_mdump_cmd_params mdump_cmd_params;
1906 	struct mdump_retain_data_stc mfw_mdump_retain;
1907 	enum _ecore_status_t rc;
1908 
1909 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1910 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1911 	mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1912 	mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1913 
1914 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1915 	if (rc != ECORE_SUCCESS)
1916 		return rc;
1917 
1918 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1919 		DP_INFO(p_hwfn,
1920 			"Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1921 			mdump_cmd_params.mcp_resp);
1922 		return ECORE_UNKNOWN_ERROR;
1923 	}
1924 
1925 	p_mdump_retain->valid = mfw_mdump_retain.valid;
1926 	p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1927 	p_mdump_retain->pf = mfw_mdump_retain.pf;
1928 	p_mdump_retain->status = mfw_mdump_retain.status;
1929 
1930 	return ECORE_SUCCESS;
1931 }
1932 
1933 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1934 						struct ecore_ptt *p_ptt)
1935 {
1936 	struct ecore_mdump_cmd_params mdump_cmd_params;
1937 
1938 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1939 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1940 
1941 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1942 }
1943 
1944 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1945 					    struct ecore_ptt *p_ptt)
1946 {
1947 	struct ecore_mdump_retain_data mdump_retain;
1948 	enum _ecore_status_t rc;
1949 
1950 	/* In CMT mode - no need for more than a single acknowledgment to the
1951 	 * MFW, and no more than a single notification to the upper driver.
1952 	 */
1953 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1954 		return;
1955 
1956 	rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1957 	if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1958 		DP_NOTICE(p_hwfn, false,
1959 			  "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1960 			  mdump_retain.epoch, mdump_retain.pf,
1961 			  mdump_retain.status);
1962 	} else {
1963 		DP_NOTICE(p_hwfn, false,
1964 			  "The MFW notified that a critical error occurred in the device\n");
1965 	}
1966 
1967 	if (p_hwfn->p_dev->allow_mdump) {
1968 		DP_NOTICE(p_hwfn, false,
1969 			  "Not acknowledging the notification to allow the MFW crash dump\n");
1970 		return;
1971 	}
1972 
1973 	DP_NOTICE(p_hwfn, false,
1974 		  "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1975 	ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1976 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1977 }
1978 
1979 void
1980 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1981 {
1982 	struct public_func shmem_info;
1983 	u32 port_cfg, val;
1984 
1985 	if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
1986 		return;
1987 
1988 	OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1989 	port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1990 			    OFFSETOF(struct public_port, oem_cfg_port));
1991 	val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
1992 	if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1993 		DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type  %d\n",
1994 			  val);
1995 
1996 	val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
1997 	if (val == OEM_CFG_SCHED_TYPE_ETS)
1998 		p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
1999 	else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
2000 		p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
2001 	else
2002 		DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
2003 			  val);
2004 
2005 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2006 				 MCP_PF_ID(p_hwfn));
2007 	val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
2008 	p_hwfn->ufp_info.tc = (u8)val;
2009 	val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
2010 			    OEM_CFG_FUNC_HOST_PRI_CTRL);
2011 	if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
2012 		p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
2013 	else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
2014 		p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
2015 	else
2016 		DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
2017 			  val);
2018 
2019 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2020 		   "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
2021 		   p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
2022 		   p_hwfn->ufp_info.pri_type);
2023 }
2024 
2025 static enum _ecore_status_t
2026 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2027 {
2028 	ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
2029 
2030 	if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
2031 		p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2032 		p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
2033 
2034 		ecore_qm_reconf(p_hwfn, p_ptt);
2035 	} else {
2036 		/* Merge UFP TC with the dcbx TC data */
2037 		ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2038 					    ECORE_DCBX_OPERATIONAL_MIB);
2039 	}
2040 
2041 	/* update storm FW with negotiation results */
2042 	ecore_sp_pf_update_ufp(p_hwfn);
2043 
2044 	return ECORE_SUCCESS;
2045 }
2046 
2047 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
2048 					     struct ecore_ptt *p_ptt)
2049 {
2050 	struct ecore_mcp_info *info = p_hwfn->mcp_info;
2051 	enum _ecore_status_t rc = ECORE_SUCCESS;
2052 	bool found = false;
2053 	u16 i;
2054 
2055 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
2056 
2057 	/* Read Messages from MFW */
2058 	ecore_mcp_read_mb(p_hwfn, p_ptt);
2059 
2060 	/* Compare current messages to old ones */
2061 	for (i = 0; i < info->mfw_mb_length; i++) {
2062 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2063 			continue;
2064 
2065 		found = true;
2066 
2067 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2068 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2069 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2070 
2071 		switch (i) {
2072 		case MFW_DRV_MSG_LINK_CHANGE:
2073 			ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2074 			break;
2075 		case MFW_DRV_MSG_VF_DISABLED:
2076 			ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2077 			break;
2078 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2079 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2080 						    ECORE_DCBX_REMOTE_LLDP_MIB);
2081 			break;
2082 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2083 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2084 						    ECORE_DCBX_REMOTE_MIB);
2085 			break;
2086 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2087 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2088 						    ECORE_DCBX_OPERATIONAL_MIB);
2089 			/* clear the user-config cache */
2090 			OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2091 				    sizeof(struct ecore_dcbx_set));
2092 			break;
2093 		case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2094 			ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2095 			break;
2096 		case MFW_DRV_MSG_OEM_CFG_UPDATE:
2097 			ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2098 			break;
2099 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2100 			ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2101 			break;
2102 		case MFW_DRV_MSG_ERROR_RECOVERY:
2103 			ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2104 			break;
2105 		case MFW_DRV_MSG_GET_LAN_STATS:
2106 		case MFW_DRV_MSG_GET_FCOE_STATS:
2107 		case MFW_DRV_MSG_GET_ISCSI_STATS:
2108 		case MFW_DRV_MSG_GET_RDMA_STATS:
2109 			ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2110 			break;
2111 		case MFW_DRV_MSG_BW_UPDATE:
2112 			ecore_mcp_update_bw(p_hwfn, p_ptt);
2113 			break;
2114 		case MFW_DRV_MSG_S_TAG_UPDATE:
2115 			ecore_mcp_update_stag(p_hwfn, p_ptt);
2116 			break;
2117 		case MFW_DRV_MSG_FAILURE_DETECTED:
2118 			ecore_mcp_handle_fan_failure(p_hwfn);
2119 			break;
2120 		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2121 			ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2122 			break;
2123 		default:
2124 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2125 			rc = ECORE_INVAL;
2126 		}
2127 	}
2128 
2129 	/* ACK everything */
2130 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2131 		OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2132 
2133 		/* MFW expect answer in BE, so we force write in that format */
2134 		ecore_wr(p_hwfn, p_ptt,
2135 			 info->mfw_mb_addr + sizeof(u32) +
2136 			 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2137 			 sizeof(u32) + i * sizeof(u32), val);
2138 	}
2139 
2140 	if (!found) {
2141 		DP_NOTICE(p_hwfn, false,
2142 			  "Received an MFW message indication but no"
2143 			  " new message!\n");
2144 		rc = ECORE_INVAL;
2145 	}
2146 
2147 	/* Copy the new mfw messages into the shadow */
2148 	OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2149 
2150 	return rc;
2151 }
2152 
2153 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2154 					   struct ecore_ptt *p_ptt,
2155 					   u32 *p_mfw_ver,
2156 					   u32 *p_running_bundle_id)
2157 {
2158 	u32 global_offsize;
2159 
2160 #ifndef ASIC_ONLY
2161 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2162 		DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2163 		return ECORE_SUCCESS;
2164 	}
2165 #endif
2166 
2167 	if (IS_VF(p_hwfn->p_dev)) {
2168 		if (p_hwfn->vf_iov_info) {
2169 			struct pfvf_acquire_resp_tlv *p_resp;
2170 
2171 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2172 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2173 			return ECORE_SUCCESS;
2174 		} else {
2175 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2176 				   "VF requested MFW version prior to ACQUIRE\n");
2177 			return ECORE_INVAL;
2178 		}
2179 	}
2180 
2181 	global_offsize = ecore_rd(p_hwfn, p_ptt,
2182 				  SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2183 						       public_base,
2184 						       PUBLIC_GLOBAL));
2185 	*p_mfw_ver =
2186 	    ecore_rd(p_hwfn, p_ptt,
2187 		     SECTION_ADDR(global_offsize,
2188 				  0) + OFFSETOF(struct public_global, mfw_ver));
2189 
2190 	if (p_running_bundle_id != OSAL_NULL) {
2191 		*p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2192 						SECTION_ADDR(global_offsize,
2193 							     0) +
2194 						OFFSETOF(struct public_global,
2195 							 running_bundle_id));
2196 	}
2197 
2198 	return ECORE_SUCCESS;
2199 }
2200 
2201 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2202 					      struct ecore_ptt *p_ptt,
2203 					      u32 *p_media_type)
2204 {
2205 	enum _ecore_status_t rc = ECORE_SUCCESS;
2206 
2207 	/* TODO - Add support for VFs */
2208 	if (IS_VF(p_hwfn->p_dev))
2209 		return ECORE_INVAL;
2210 
2211 	if (!ecore_mcp_is_init(p_hwfn)) {
2212 		DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2213 		return ECORE_BUSY;
2214 	}
2215 
2216 	if (!p_ptt) {
2217 		*p_media_type = MEDIA_UNSPECIFIED;
2218 		rc = ECORE_INVAL;
2219 	} else {
2220 		*p_media_type = ecore_rd(p_hwfn, p_ptt,
2221 					 p_hwfn->mcp_info->port_addr +
2222 					 OFFSETOF(struct public_port,
2223 						  media_type));
2224 	}
2225 
2226 	return ECORE_SUCCESS;
2227 }
2228 
2229 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2230 						    struct ecore_ptt *p_ptt,
2231 						    u32 *p_transceiver_state,
2232 						    u32 *p_transceiver_type)
2233 {
2234 	u32 transceiver_info;
2235 	enum _ecore_status_t rc = ECORE_SUCCESS;
2236 
2237 	/* TODO - Add support for VFs */
2238 	if (IS_VF(p_hwfn->p_dev))
2239 		return ECORE_INVAL;
2240 
2241 	if (!ecore_mcp_is_init(p_hwfn)) {
2242 		DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2243 		return ECORE_BUSY;
2244 	}
2245 
2246 	*p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2247 	*p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2248 
2249 	transceiver_info = ecore_rd(p_hwfn, p_ptt,
2250 				    p_hwfn->mcp_info->port_addr +
2251 				    offsetof(struct public_port,
2252 				    transceiver_data));
2253 
2254 	*p_transceiver_state = GET_MFW_FIELD(transceiver_info,
2255 					     ETH_TRANSCEIVER_STATE);
2256 
2257 	if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
2258 		*p_transceiver_type = GET_MFW_FIELD(transceiver_info,
2259 					    ETH_TRANSCEIVER_TYPE);
2260 	} else {
2261 		*p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2262 	}
2263 
2264 	return rc;
2265 }
2266 
2267 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2268 {
2269 	if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2270 	    ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2271 	    (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2272 		return 1;
2273 
2274 	return 0;
2275 }
2276 
2277 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2278 						struct ecore_ptt *p_ptt,
2279 						u32 *p_speed_mask)
2280 {
2281 	u32 transceiver_type, transceiver_state;
2282 
2283 	ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2284 				       &transceiver_type);
2285 
2286 
2287 	if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
2288 		return ECORE_INVAL;
2289 
2290 	switch (transceiver_type) {
2291 	case ETH_TRANSCEIVER_TYPE_1G_LX:
2292 	case ETH_TRANSCEIVER_TYPE_1G_SX:
2293 	case ETH_TRANSCEIVER_TYPE_1G_PCC:
2294 	case ETH_TRANSCEIVER_TYPE_1G_ACC:
2295 	case ETH_TRANSCEIVER_TYPE_1000BASET:
2296 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2297 		break;
2298 
2299 	case ETH_TRANSCEIVER_TYPE_10G_SR:
2300 	case ETH_TRANSCEIVER_TYPE_10G_LR:
2301 	case ETH_TRANSCEIVER_TYPE_10G_LRM:
2302 	case ETH_TRANSCEIVER_TYPE_10G_ER:
2303 	case ETH_TRANSCEIVER_TYPE_10G_PCC:
2304 	case ETH_TRANSCEIVER_TYPE_10G_ACC:
2305 	case ETH_TRANSCEIVER_TYPE_4x10G:
2306 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2307 		break;
2308 
2309 	case ETH_TRANSCEIVER_TYPE_40G_LR4:
2310 	case ETH_TRANSCEIVER_TYPE_40G_SR4:
2311 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2312 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2313 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2314 		 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2315 		break;
2316 
2317 	case ETH_TRANSCEIVER_TYPE_100G_AOC:
2318 	case ETH_TRANSCEIVER_TYPE_100G_SR4:
2319 	case ETH_TRANSCEIVER_TYPE_100G_LR4:
2320 	case ETH_TRANSCEIVER_TYPE_100G_ER4:
2321 	case ETH_TRANSCEIVER_TYPE_100G_ACC:
2322 		*p_speed_mask =
2323 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2324 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2325 		break;
2326 
2327 	case ETH_TRANSCEIVER_TYPE_25G_SR:
2328 	case ETH_TRANSCEIVER_TYPE_25G_LR:
2329 	case ETH_TRANSCEIVER_TYPE_25G_AOC:
2330 	case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2331 	case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2332 	case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2333 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2334 		break;
2335 
2336 	case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2337 	case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2338 	case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2339 	case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2340 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2341 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2342 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2343 		break;
2344 
2345 	case ETH_TRANSCEIVER_TYPE_40G_CR4:
2346 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2347 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2348 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2349 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2350 		break;
2351 
2352 	case ETH_TRANSCEIVER_TYPE_100G_CR4:
2353 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2354 		*p_speed_mask =
2355 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2356 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2357 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2358 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2359 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2360 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2361 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2362 		break;
2363 
2364 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2365 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2366 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2367 		*p_speed_mask =
2368 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2369 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2370 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2371 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2372 		break;
2373 
2374 	case ETH_TRANSCEIVER_TYPE_XLPPI:
2375 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2376 		break;
2377 
2378 	case ETH_TRANSCEIVER_TYPE_10G_BASET:
2379 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2380 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2381 		break;
2382 
2383 	default:
2384 		DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2385 			transceiver_type);
2386 		*p_speed_mask = 0xff;
2387 		break;
2388 	}
2389 
2390 	return ECORE_SUCCESS;
2391 }
2392 
2393 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2394 						struct ecore_ptt *p_ptt,
2395 						u32 *p_board_config)
2396 {
2397 	u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2398 	enum _ecore_status_t rc = ECORE_SUCCESS;
2399 
2400 	/* TODO - Add support for VFs */
2401 	if (IS_VF(p_hwfn->p_dev))
2402 		return ECORE_INVAL;
2403 
2404 	if (!ecore_mcp_is_init(p_hwfn)) {
2405 		DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2406 		return ECORE_BUSY;
2407 	}
2408 	if (!p_ptt) {
2409 		*p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2410 		rc = ECORE_INVAL;
2411 	} else {
2412 		nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2413 					MISC_REG_GEN_PURP_CR0);
2414 		nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2415 					   nvm_cfg_addr + 4);
2416 		port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2417 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2418 		*p_board_config  =  ecore_rd(p_hwfn, p_ptt,
2419 					     port_cfg_addr +
2420 					     offsetof(struct nvm_cfg1_port,
2421 					     board_cfg));
2422 	}
2423 
2424 	return rc;
2425 }
2426 
2427 /* @DPDK */
2428 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2429 static void
2430 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2431 				 enum ecore_pci_personality *p_proto)
2432 {
2433 	*p_proto = ECORE_PCI_ETH;
2434 
2435 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2436 		   "According to Legacy capabilities, L2 personality is %08x\n",
2437 		   (u32)*p_proto);
2438 }
2439 
2440 /* @DPDK */
2441 static enum _ecore_status_t
2442 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2443 			      struct ecore_ptt *p_ptt,
2444 			      enum ecore_pci_personality *p_proto)
2445 {
2446 	u32 resp = 0, param = 0;
2447 	enum _ecore_status_t rc;
2448 
2449 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2450 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2451 		   (u32)*p_proto, resp, param);
2452 	return ECORE_SUCCESS;
2453 }
2454 
2455 static enum _ecore_status_t
2456 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2457 			  struct public_func *p_info,
2458 			  struct ecore_ptt *p_ptt,
2459 			  enum ecore_pci_personality *p_proto)
2460 {
2461 	enum _ecore_status_t rc = ECORE_SUCCESS;
2462 
2463 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2464 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2465 		if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2466 		    ECORE_SUCCESS)
2467 			ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2468 		break;
2469 	default:
2470 		rc = ECORE_INVAL;
2471 	}
2472 
2473 	return rc;
2474 }
2475 
2476 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2477 						    struct ecore_ptt *p_ptt)
2478 {
2479 	struct ecore_mcp_function_info *info;
2480 	struct public_func shmem_info;
2481 
2482 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2483 	info = &p_hwfn->mcp_info->func_info;
2484 
2485 	info->pause_on_host = (shmem_info.config &
2486 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2487 
2488 	if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2489 				      &info->protocol)) {
2490 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2491 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2492 		return ECORE_INVAL;
2493 	}
2494 
2495 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2496 
2497 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2498 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2499 		info->mac[1] = (u8)(shmem_info.mac_upper);
2500 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2501 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2502 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2503 		info->mac[5] = (u8)(shmem_info.mac_lower);
2504 	} else {
2505 		/* TODO - are there protocols for which there's no MAC? */
2506 		DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2507 	}
2508 
2509 	/* TODO - are these calculations true for BE machine? */
2510 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2511 			 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2512 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2513 			 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2514 
2515 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2516 
2517 	info->mtu = (u16)shmem_info.mtu_size;
2518 
2519 	if (info->mtu == 0)
2520 		info->mtu = 1500;
2521 
2522 	info->mtu = (u16)shmem_info.mtu_size;
2523 
2524 	DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2525 		   "Read configuration from shmem: pause_on_host %02x"
2526 		    " protocol %02x BW [%02x - %02x]"
2527 		    " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2528 		    " node %lx ovlan %04x\n",
2529 		   info->pause_on_host, info->protocol,
2530 		   info->bandwidth_min, info->bandwidth_max,
2531 		   info->mac[0], info->mac[1], info->mac[2],
2532 		   info->mac[3], info->mac[4], info->mac[5],
2533 		   (unsigned long)info->wwn_port,
2534 		   (unsigned long)info->wwn_node, info->ovlan);
2535 
2536 	return ECORE_SUCCESS;
2537 }
2538 
2539 struct ecore_mcp_link_params
2540 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2541 {
2542 	if (!p_hwfn || !p_hwfn->mcp_info)
2543 		return OSAL_NULL;
2544 	return &p_hwfn->mcp_info->link_input;
2545 }
2546 
2547 struct ecore_mcp_link_state
2548 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2549 {
2550 	if (!p_hwfn || !p_hwfn->mcp_info)
2551 		return OSAL_NULL;
2552 
2553 #ifndef ASIC_ONLY
2554 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2555 		DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2556 		p_hwfn->mcp_info->link_output.link_up = true;
2557 	}
2558 #endif
2559 
2560 	return &p_hwfn->mcp_info->link_output;
2561 }
2562 
2563 struct ecore_mcp_link_capabilities
2564 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2565 {
2566 	if (!p_hwfn || !p_hwfn->mcp_info)
2567 		return OSAL_NULL;
2568 	return &p_hwfn->mcp_info->link_capabilities;
2569 }
2570 
2571 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2572 				     struct ecore_ptt *p_ptt)
2573 {
2574 	u32 resp = 0, param = 0;
2575 	enum _ecore_status_t rc;
2576 
2577 	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2578 			   DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2579 
2580 	/* Wait for the drain to complete before returning */
2581 	OSAL_MSLEEP(1020);
2582 
2583 	return rc;
2584 }
2585 
2586 const struct ecore_mcp_function_info
2587 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2588 {
2589 	if (!p_hwfn || !p_hwfn->mcp_info)
2590 		return OSAL_NULL;
2591 	return &p_hwfn->mcp_info->func_info;
2592 }
2593 
2594 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2595 				  struct ecore_ptt *p_ptt, u32 personalities)
2596 {
2597 	enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2598 	struct public_func shmem_info;
2599 	int i, count = 0, num_pfs;
2600 
2601 	num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2602 
2603 	for (i = 0; i < num_pfs; i++) {
2604 		ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2605 					 MCP_PF_ID_BY_REL(p_hwfn, i));
2606 		if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2607 			continue;
2608 
2609 		if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2610 					      &protocol) !=
2611 		    ECORE_SUCCESS)
2612 			continue;
2613 
2614 		if ((1 << ((u32)protocol)) & personalities)
2615 			count++;
2616 	}
2617 
2618 	return count;
2619 }
2620 
2621 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2622 					      struct ecore_ptt *p_ptt,
2623 					      u32 *p_flash_size)
2624 {
2625 	u32 flash_size;
2626 
2627 #ifndef ASIC_ONLY
2628 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2629 		DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2630 		return ECORE_INVAL;
2631 	}
2632 #endif
2633 
2634 	if (IS_VF(p_hwfn->p_dev))
2635 		return ECORE_INVAL;
2636 
2637 	flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2638 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2639 		     MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2640 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2641 
2642 	*p_flash_size = flash_size;
2643 
2644 	return ECORE_SUCCESS;
2645 }
2646 
2647 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2648 						  struct ecore_ptt *p_ptt)
2649 {
2650 	struct ecore_dev *p_dev = p_hwfn->p_dev;
2651 
2652 	if (p_dev->recov_in_prog) {
2653 		DP_NOTICE(p_hwfn, false,
2654 			  "Avoid triggering a recovery since such a process"
2655 			  " is already in progress\n");
2656 		return ECORE_AGAIN;
2657 	}
2658 
2659 	DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2660 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2661 
2662 	return ECORE_SUCCESS;
2663 }
2664 
2665 static enum _ecore_status_t
2666 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2667 			    struct ecore_ptt *p_ptt,
2668 			    u8 vf_id, u8 num)
2669 {
2670 	u32 resp = 0, param = 0, rc_param = 0;
2671 	enum _ecore_status_t rc;
2672 
2673 /* Only Leader can configure MSIX, and need to take CMT into account */
2674 
2675 	if (!IS_LEAD_HWFN(p_hwfn))
2676 		return ECORE_SUCCESS;
2677 	num *= p_hwfn->p_dev->num_hwfns;
2678 
2679 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2680 	    DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2681 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2682 	    DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2683 
2684 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2685 			   &resp, &rc_param);
2686 
2687 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2688 		DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2689 			  vf_id);
2690 		rc = ECORE_INVAL;
2691 	} else {
2692 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2693 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2694 			    num, vf_id);
2695 	}
2696 
2697 	return rc;
2698 }
2699 
2700 static enum _ecore_status_t
2701 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2702 			    struct ecore_ptt *p_ptt,
2703 			    u8 num)
2704 {
2705 	u32 resp = 0, param = num, rc_param = 0;
2706 	enum _ecore_status_t rc;
2707 
2708 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2709 			   param, &resp, &rc_param);
2710 
2711 	if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2712 		DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2713 		rc = ECORE_INVAL;
2714 	} else {
2715 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2716 			   "Requested 0x%02x MSI-x interrupts for VFs\n",
2717 			   num);
2718 	}
2719 
2720 	return rc;
2721 }
2722 
2723 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2724 					      struct ecore_ptt *p_ptt,
2725 					      u8 vf_id, u8 num)
2726 {
2727 	if (ECORE_IS_BB(p_hwfn->p_dev))
2728 		return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2729 	else
2730 		return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2731 }
2732 
2733 enum _ecore_status_t
2734 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2735 			   struct ecore_mcp_drv_version *p_ver)
2736 {
2737 	struct ecore_mcp_mb_params mb_params;
2738 	struct drv_version_stc drv_version;
2739 	u32 num_words, i;
2740 	void *p_name;
2741 	OSAL_BE32 val;
2742 	enum _ecore_status_t rc;
2743 
2744 #ifndef ASIC_ONLY
2745 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2746 		return ECORE_SUCCESS;
2747 #endif
2748 
2749 	OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2750 	drv_version.version = p_ver->version;
2751 	num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2752 	for (i = 0; i < num_words; i++) {
2753 		/* The driver name is expected to be in a big-endian format */
2754 		p_name = &p_ver->name[i * sizeof(u32)];
2755 		val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2756 		*(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2757 	}
2758 
2759 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2760 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2761 	mb_params.p_data_src = &drv_version;
2762 	mb_params.data_src_size = sizeof(drv_version);
2763 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2764 	if (rc != ECORE_SUCCESS)
2765 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2766 
2767 	return rc;
2768 }
2769 
2770 /* A maximal 100 msec waiting time for the MCP to halt */
2771 #define ECORE_MCP_HALT_SLEEP_MS		10
2772 #define ECORE_MCP_HALT_MAX_RETRIES	10
2773 
2774 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2775 				    struct ecore_ptt *p_ptt)
2776 {
2777 	u32 resp = 0, param = 0, cpu_state, cnt = 0;
2778 	enum _ecore_status_t rc;
2779 
2780 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2781 			   &param);
2782 	if (rc != ECORE_SUCCESS) {
2783 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2784 		return rc;
2785 	}
2786 
2787 	do {
2788 		OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2789 		cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2790 		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2791 			break;
2792 	} while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2793 
2794 	if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2795 		DP_NOTICE(p_hwfn, false,
2796 			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2797 			  ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2798 		return ECORE_BUSY;
2799 	}
2800 
2801 	ecore_mcp_cmd_set_blocking(p_hwfn, true);
2802 
2803 	return ECORE_SUCCESS;
2804 }
2805 
2806 #define ECORE_MCP_RESUME_SLEEP_MS	10
2807 
2808 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2809 				      struct ecore_ptt *p_ptt)
2810 {
2811 	u32 cpu_mode, cpu_state;
2812 
2813 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2814 
2815 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2816 	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2817 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2818 
2819 	OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2820 	cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2821 
2822 	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2823 		DP_NOTICE(p_hwfn, false,
2824 			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2825 			  cpu_mode, cpu_state);
2826 		return ECORE_BUSY;
2827 	}
2828 
2829 	ecore_mcp_cmd_set_blocking(p_hwfn, false);
2830 
2831 	return ECORE_SUCCESS;
2832 }
2833 
2834 enum _ecore_status_t
2835 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2836 				   struct ecore_ptt *p_ptt,
2837 				   enum ecore_ov_client client)
2838 {
2839 	u32 resp = 0, param = 0;
2840 	u32 drv_mb_param;
2841 	enum _ecore_status_t rc;
2842 
2843 	switch (client) {
2844 	case ECORE_OV_CLIENT_DRV:
2845 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2846 		break;
2847 	case ECORE_OV_CLIENT_USER:
2848 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2849 		break;
2850 	case ECORE_OV_CLIENT_VENDOR_SPEC:
2851 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2852 		break;
2853 	default:
2854 		DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2855 		return ECORE_INVAL;
2856 	}
2857 
2858 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2859 			   drv_mb_param, &resp, &param);
2860 	if (rc != ECORE_SUCCESS)
2861 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2862 
2863 	return rc;
2864 }
2865 
2866 enum _ecore_status_t
2867 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2868 				 struct ecore_ptt *p_ptt,
2869 				 enum ecore_ov_driver_state drv_state)
2870 {
2871 	u32 resp = 0, param = 0;
2872 	u32 drv_mb_param;
2873 	enum _ecore_status_t rc;
2874 
2875 	switch (drv_state) {
2876 	case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2877 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2878 		break;
2879 	case ECORE_OV_DRIVER_STATE_DISABLED:
2880 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2881 		break;
2882 	case ECORE_OV_DRIVER_STATE_ACTIVE:
2883 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2884 		break;
2885 	default:
2886 		DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2887 		return ECORE_INVAL;
2888 	}
2889 
2890 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2891 			   drv_mb_param, &resp, &param);
2892 	if (rc != ECORE_SUCCESS)
2893 		DP_ERR(p_hwfn, "Failed to send driver state\n");
2894 
2895 	return rc;
2896 }
2897 
2898 enum _ecore_status_t
2899 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2900 			 struct ecore_fc_npiv_tbl *p_table)
2901 {
2902 	return 0;
2903 }
2904 
2905 enum _ecore_status_t
2906 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2907 			u16 mtu)
2908 {
2909 	u32 resp = 0, param = 0, drv_mb_param = 0;
2910 	enum _ecore_status_t rc;
2911 
2912 	SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
2913 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2914 			   drv_mb_param, &resp, &param);
2915 	if (rc != ECORE_SUCCESS)
2916 		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2917 
2918 	return rc;
2919 }
2920 
2921 enum _ecore_status_t
2922 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2923 			u8 *mac)
2924 {
2925 	struct ecore_mcp_mb_params mb_params;
2926 	union drv_union_data union_data;
2927 	enum _ecore_status_t rc;
2928 
2929 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2930 	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2931 	SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
2932 		      DRV_MSG_CODE_VMAC_TYPE_MAC);
2933 	mb_params.param |= MCP_PF_ID(p_hwfn);
2934 	OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
2935 	mb_params.p_data_src = &union_data;
2936 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2937 	if (rc != ECORE_SUCCESS)
2938 		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2939 
2940 	return rc;
2941 }
2942 
2943 enum _ecore_status_t
2944 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2945 			    enum ecore_ov_eswitch eswitch)
2946 {
2947 	enum _ecore_status_t rc;
2948 	u32 resp = 0, param = 0;
2949 	u32 drv_mb_param;
2950 
2951 	switch (eswitch) {
2952 	case ECORE_OV_ESWITCH_NONE:
2953 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2954 		break;
2955 	case ECORE_OV_ESWITCH_VEB:
2956 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2957 		break;
2958 	case ECORE_OV_ESWITCH_VEPA:
2959 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2960 		break;
2961 	default:
2962 		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2963 		return ECORE_INVAL;
2964 	}
2965 
2966 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2967 			   drv_mb_param, &resp, &param);
2968 	if (rc != ECORE_SUCCESS)
2969 		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2970 
2971 	return rc;
2972 }
2973 
2974 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2975 				       struct ecore_ptt *p_ptt,
2976 				       enum ecore_led_mode mode)
2977 {
2978 	u32 resp = 0, param = 0, drv_mb_param;
2979 	enum _ecore_status_t rc;
2980 
2981 	switch (mode) {
2982 	case ECORE_LED_MODE_ON:
2983 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2984 		break;
2985 	case ECORE_LED_MODE_OFF:
2986 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2987 		break;
2988 	case ECORE_LED_MODE_RESTORE:
2989 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2990 		break;
2991 	default:
2992 		DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2993 		return ECORE_INVAL;
2994 	}
2995 
2996 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2997 			   drv_mb_param, &resp, &param);
2998 	if (rc != ECORE_SUCCESS)
2999 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3000 
3001 	return rc;
3002 }
3003 
3004 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
3005 					     struct ecore_ptt *p_ptt,
3006 					     u32 mask_parities)
3007 {
3008 	u32 resp = 0, param = 0;
3009 	enum _ecore_status_t rc;
3010 
3011 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3012 			   mask_parities, &resp, &param);
3013 
3014 	if (rc != ECORE_SUCCESS) {
3015 		DP_ERR(p_hwfn,
3016 		       "MCP response failure for mask parities, aborting\n");
3017 	} else if (resp != FW_MSG_CODE_OK) {
3018 		DP_ERR(p_hwfn,
3019 		       "MCP did not ack mask parity request. Old MFW?\n");
3020 		rc = ECORE_INVAL;
3021 	}
3022 
3023 	return rc;
3024 }
3025 
3026 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
3027 					u8 *p_buf, u32 len)
3028 {
3029 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3030 	u32 bytes_left, offset, bytes_to_copy, buf_size;
3031 	u32 nvm_offset, resp, param;
3032 	struct ecore_ptt *p_ptt;
3033 	enum _ecore_status_t rc = ECORE_SUCCESS;
3034 
3035 	p_ptt = ecore_ptt_acquire(p_hwfn);
3036 	if (!p_ptt)
3037 		return ECORE_BUSY;
3038 
3039 	bytes_left = len;
3040 	offset = 0;
3041 	while (bytes_left > 0) {
3042 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3043 					   MCP_DRV_NVM_BUF_LEN);
3044 		nvm_offset = (addr + offset) | (bytes_to_copy <<
3045 						DRV_MB_PARAM_NVM_LEN_OFFSET);
3046 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3047 					  DRV_MSG_CODE_NVM_READ_NVRAM,
3048 					  nvm_offset, &resp, &param, &buf_size,
3049 					  (u32 *)(p_buf + offset));
3050 		if (rc != ECORE_SUCCESS) {
3051 			DP_NOTICE(p_dev, false,
3052 				  "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
3053 				  rc);
3054 			resp = FW_MSG_CODE_ERROR;
3055 			break;
3056 		}
3057 
3058 		if (resp != FW_MSG_CODE_NVM_OK) {
3059 			DP_NOTICE(p_dev, false,
3060 				  "nvm read failed, resp = 0x%08x\n", resp);
3061 			rc = ECORE_UNKNOWN_ERROR;
3062 			break;
3063 		}
3064 
3065 		/* This can be a lengthy process, and it's possible scheduler
3066 		 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3067 		 */
3068 		if (bytes_left % 0x1000 <
3069 		    (bytes_left - buf_size) % 0x1000)
3070 			OSAL_MSLEEP(1);
3071 
3072 		offset += buf_size;
3073 		bytes_left -= buf_size;
3074 	}
3075 
3076 	p_dev->mcp_nvm_resp = resp;
3077 	ecore_ptt_release(p_hwfn, p_ptt);
3078 
3079 	return rc;
3080 }
3081 
3082 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
3083 					u32 addr, u8 *p_buf, u32 *p_len)
3084 {
3085 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3086 	struct ecore_ptt *p_ptt;
3087 	u32 resp = 0, param;
3088 	enum _ecore_status_t rc;
3089 
3090 	p_ptt = ecore_ptt_acquire(p_hwfn);
3091 	if (!p_ptt)
3092 		return ECORE_BUSY;
3093 
3094 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3095 				  (cmd == ECORE_PHY_CORE_READ) ?
3096 				  DRV_MSG_CODE_PHY_CORE_READ :
3097 				  DRV_MSG_CODE_PHY_RAW_READ,
3098 				  addr, &resp, &param, p_len, (u32 *)p_buf);
3099 	if (rc != ECORE_SUCCESS)
3100 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3101 
3102 	p_dev->mcp_nvm_resp = resp;
3103 	ecore_ptt_release(p_hwfn, p_ptt);
3104 
3105 	return rc;
3106 }
3107 
3108 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3109 {
3110 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3111 	struct ecore_ptt *p_ptt;
3112 
3113 	p_ptt = ecore_ptt_acquire(p_hwfn);
3114 	if (!p_ptt)
3115 		return ECORE_BUSY;
3116 
3117 	OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3118 	ecore_ptt_release(p_hwfn, p_ptt);
3119 
3120 	return ECORE_SUCCESS;
3121 }
3122 
3123 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
3124 {
3125 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3126 	struct ecore_ptt *p_ptt;
3127 	u32 resp = 0, param;
3128 	enum _ecore_status_t rc;
3129 
3130 	p_ptt = ecore_ptt_acquire(p_hwfn);
3131 	if (!p_ptt)
3132 		return ECORE_BUSY;
3133 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3134 			   &resp, &param);
3135 	p_dev->mcp_nvm_resp = resp;
3136 	ecore_ptt_release(p_hwfn, p_ptt);
3137 
3138 	return rc;
3139 }
3140 
3141 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3142 						  u32 addr)
3143 {
3144 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3145 	struct ecore_ptt *p_ptt;
3146 	u32 resp = 0, param;
3147 	enum _ecore_status_t rc;
3148 
3149 	p_ptt = ecore_ptt_acquire(p_hwfn);
3150 	if (!p_ptt)
3151 		return ECORE_BUSY;
3152 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3153 			   &resp, &param);
3154 	p_dev->mcp_nvm_resp = resp;
3155 	ecore_ptt_release(p_hwfn, p_ptt);
3156 
3157 	return rc;
3158 }
3159 
3160 /* rc receives ECORE_INVAL as default parameter because
3161  * it might not enter the while loop if the len is 0
3162  */
3163 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3164 					 u32 addr, u8 *p_buf, u32 len)
3165 {
3166 	u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
3167 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3168 	enum _ecore_status_t rc = ECORE_INVAL;
3169 	struct ecore_ptt *p_ptt;
3170 
3171 	p_ptt = ecore_ptt_acquire(p_hwfn);
3172 	if (!p_ptt)
3173 		return ECORE_BUSY;
3174 
3175 	switch (cmd) {
3176 	case ECORE_PUT_FILE_DATA:
3177 		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3178 		break;
3179 	case ECORE_NVM_WRITE_NVRAM:
3180 		nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3181 		break;
3182 	case ECORE_EXT_PHY_FW_UPGRADE:
3183 		nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3184 		break;
3185 	default:
3186 		DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3187 			  cmd);
3188 		rc = ECORE_INVAL;
3189 		goto out;
3190 	}
3191 
3192 	buf_idx = 0;
3193 	while (buf_idx < len) {
3194 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3195 				      MCP_DRV_NVM_BUF_LEN);
3196 		nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3197 			      addr) +
3198 			     buf_idx;
3199 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3200 					  &resp, &param, buf_size,
3201 					  (u32 *)&p_buf[buf_idx]);
3202 		if (rc != ECORE_SUCCESS) {
3203 			DP_NOTICE(p_dev, false,
3204 				  "ecore_mcp_nvm_write() failed, rc = %d\n",
3205 				  rc);
3206 			resp = FW_MSG_CODE_ERROR;
3207 			break;
3208 		}
3209 
3210 		if (resp != FW_MSG_CODE_OK &&
3211 		    resp != FW_MSG_CODE_NVM_OK &&
3212 		    resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3213 			DP_NOTICE(p_dev, false,
3214 				  "nvm write failed, resp = 0x%08x\n", resp);
3215 			rc = ECORE_UNKNOWN_ERROR;
3216 			break;
3217 		}
3218 
3219 		/* This can be a lengthy process, and it's possible scheduler
3220 		 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3221 		 */
3222 		if (buf_idx % 0x1000 >
3223 		    (buf_idx + buf_size) % 0x1000)
3224 			OSAL_MSLEEP(1);
3225 
3226 		buf_idx += buf_size;
3227 	}
3228 
3229 	p_dev->mcp_nvm_resp = resp;
3230 out:
3231 	ecore_ptt_release(p_hwfn, p_ptt);
3232 
3233 	return rc;
3234 }
3235 
3236 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3237 					 u32 addr, u8 *p_buf, u32 len)
3238 {
3239 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3240 	u32 resp = 0, param, nvm_cmd;
3241 	struct ecore_ptt *p_ptt;
3242 	enum _ecore_status_t rc;
3243 
3244 	p_ptt = ecore_ptt_acquire(p_hwfn);
3245 	if (!p_ptt)
3246 		return ECORE_BUSY;
3247 
3248 	nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ?  DRV_MSG_CODE_PHY_CORE_WRITE :
3249 			DRV_MSG_CODE_PHY_RAW_WRITE;
3250 	rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3251 				  &resp, &param, len, (u32 *)p_buf);
3252 	if (rc != ECORE_SUCCESS)
3253 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3254 	p_dev->mcp_nvm_resp = resp;
3255 	ecore_ptt_release(p_hwfn, p_ptt);
3256 
3257 	return rc;
3258 }
3259 
3260 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3261 						   u32 addr)
3262 {
3263 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3264 	struct ecore_ptt *p_ptt;
3265 	u32 resp, param;
3266 	enum _ecore_status_t rc;
3267 
3268 	p_ptt = ecore_ptt_acquire(p_hwfn);
3269 	if (!p_ptt)
3270 		return ECORE_BUSY;
3271 
3272 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3273 			   &resp, &param);
3274 	p_dev->mcp_nvm_resp = resp;
3275 	ecore_ptt_release(p_hwfn, p_ptt);
3276 
3277 	return rc;
3278 }
3279 
3280 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3281 					    struct ecore_ptt *p_ptt,
3282 					    u32 port, u32 addr, u32 offset,
3283 					    u32 len, u8 *p_buf)
3284 {
3285 	u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3286 	u32 resp, param;
3287 	enum _ecore_status_t rc;
3288 
3289 	nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3290 			(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3291 	addr = offset;
3292 	offset = 0;
3293 	bytes_left = len;
3294 	while (bytes_left > 0) {
3295 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3296 					   MAX_I2C_TRANSACTION_SIZE);
3297 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3298 			       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3299 		nvm_offset |= ((addr + offset) <<
3300 				DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3301 		nvm_offset |= (bytes_to_copy <<
3302 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3303 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3304 					  DRV_MSG_CODE_TRANSCEIVER_READ,
3305 					  nvm_offset, &resp, &param, &buf_size,
3306 					  (u32 *)(p_buf + offset));
3307 		if (rc != ECORE_SUCCESS) {
3308 			DP_NOTICE(p_hwfn, false,
3309 				  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3310 				  rc);
3311 			return rc;
3312 		}
3313 
3314 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3315 			return ECORE_NODEV;
3316 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3317 			return ECORE_UNKNOWN_ERROR;
3318 
3319 		offset += buf_size;
3320 		bytes_left -= buf_size;
3321 	}
3322 
3323 	return ECORE_SUCCESS;
3324 }
3325 
3326 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3327 					     struct ecore_ptt *p_ptt,
3328 					     u32 port, u32 addr, u32 offset,
3329 					     u32 len, u8 *p_buf)
3330 {
3331 	u32 buf_idx, buf_size, nvm_offset, resp, param;
3332 	enum _ecore_status_t rc;
3333 
3334 	nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3335 			(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3336 	buf_idx = 0;
3337 	while (buf_idx < len) {
3338 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3339 				      MAX_I2C_TRANSACTION_SIZE);
3340 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3341 				 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3342 		nvm_offset |= ((offset + buf_idx) <<
3343 				 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3344 		nvm_offset |= (buf_size <<
3345 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3346 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3347 					  DRV_MSG_CODE_TRANSCEIVER_WRITE,
3348 					  nvm_offset, &resp, &param, buf_size,
3349 					  (u32 *)&p_buf[buf_idx]);
3350 		if (rc != ECORE_SUCCESS) {
3351 			DP_NOTICE(p_hwfn, false,
3352 				  "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3353 				  rc);
3354 			return rc;
3355 		}
3356 
3357 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3358 			return ECORE_NODEV;
3359 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3360 			return ECORE_UNKNOWN_ERROR;
3361 
3362 		buf_idx += buf_size;
3363 	}
3364 
3365 	return ECORE_SUCCESS;
3366 }
3367 
3368 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3369 					 struct ecore_ptt *p_ptt,
3370 					 u16 gpio, u32 *gpio_val)
3371 {
3372 	enum _ecore_status_t rc = ECORE_SUCCESS;
3373 	u32 drv_mb_param = 0, rsp;
3374 
3375 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3376 
3377 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3378 			   drv_mb_param, &rsp, gpio_val);
3379 
3380 	if (rc != ECORE_SUCCESS)
3381 		return rc;
3382 
3383 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3384 		return ECORE_UNKNOWN_ERROR;
3385 
3386 	return ECORE_SUCCESS;
3387 }
3388 
3389 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3390 					  struct ecore_ptt *p_ptt,
3391 					  u16 gpio, u16 gpio_val)
3392 {
3393 	enum _ecore_status_t rc = ECORE_SUCCESS;
3394 	u32 drv_mb_param = 0, param, rsp;
3395 
3396 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3397 		(gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3398 
3399 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3400 			   drv_mb_param, &rsp, &param);
3401 
3402 	if (rc != ECORE_SUCCESS)
3403 		return rc;
3404 
3405 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3406 		return ECORE_UNKNOWN_ERROR;
3407 
3408 	return ECORE_SUCCESS;
3409 }
3410 
3411 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3412 					 struct ecore_ptt *p_ptt,
3413 					 u16 gpio, u32 *gpio_direction,
3414 					 u32 *gpio_ctrl)
3415 {
3416 	u32 drv_mb_param = 0, rsp, val = 0;
3417 	enum _ecore_status_t rc = ECORE_SUCCESS;
3418 
3419 	drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3420 
3421 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3422 			   drv_mb_param, &rsp, &val);
3423 	if (rc != ECORE_SUCCESS)
3424 		return rc;
3425 
3426 	*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3427 			   DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3428 	*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3429 		      DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3430 
3431 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3432 		return ECORE_UNKNOWN_ERROR;
3433 
3434 	return ECORE_SUCCESS;
3435 }
3436 
3437 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3438 						  struct ecore_ptt *p_ptt)
3439 {
3440 	u32 drv_mb_param = 0, rsp, param;
3441 	enum _ecore_status_t rc = ECORE_SUCCESS;
3442 
3443 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3444 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3445 
3446 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3447 			   drv_mb_param, &rsp, &param);
3448 
3449 	if (rc != ECORE_SUCCESS)
3450 		return rc;
3451 
3452 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3453 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3454 		rc = ECORE_UNKNOWN_ERROR;
3455 
3456 	return rc;
3457 }
3458 
3459 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3460 					       struct ecore_ptt *p_ptt)
3461 {
3462 	u32 drv_mb_param, rsp, param;
3463 	enum _ecore_status_t rc = ECORE_SUCCESS;
3464 
3465 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3466 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3467 
3468 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3469 			   drv_mb_param, &rsp, &param);
3470 
3471 	if (rc != ECORE_SUCCESS)
3472 		return rc;
3473 
3474 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3475 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3476 		rc = ECORE_UNKNOWN_ERROR;
3477 
3478 	return rc;
3479 }
3480 
3481 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3482 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3483 {
3484 	u32 drv_mb_param = 0, rsp;
3485 	enum _ecore_status_t rc = ECORE_SUCCESS;
3486 
3487 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3488 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3489 
3490 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3491 			   drv_mb_param, &rsp, num_images);
3492 
3493 	if (rc != ECORE_SUCCESS)
3494 		return rc;
3495 
3496 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3497 		rc = ECORE_UNKNOWN_ERROR;
3498 
3499 	return rc;
3500 }
3501 
3502 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3503 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3504 	struct bist_nvm_image_att *p_image_att, u32 image_index)
3505 {
3506 	u32 buf_size, nvm_offset, resp, param;
3507 	enum _ecore_status_t rc;
3508 
3509 	nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3510 				    DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3511 	nvm_offset |= (image_index <<
3512 		       DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3513 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3514 				  nvm_offset, &resp, &param, &buf_size,
3515 				  (u32 *)p_image_att);
3516 	if (rc != ECORE_SUCCESS)
3517 		return rc;
3518 
3519 	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3520 	    (p_image_att->return_code != 1))
3521 		rc = ECORE_UNKNOWN_ERROR;
3522 
3523 	return rc;
3524 }
3525 
3526 enum _ecore_status_t
3527 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3528 			       struct ecore_ptt *p_ptt,
3529 			       struct ecore_temperature_info *p_temp_info)
3530 {
3531 	struct ecore_temperature_sensor *p_temp_sensor;
3532 	struct temperature_status_stc mfw_temp_info;
3533 	struct ecore_mcp_mb_params mb_params;
3534 	u32 val;
3535 	enum _ecore_status_t rc;
3536 	u8 i;
3537 
3538 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3539 	mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3540 	mb_params.p_data_dst = &mfw_temp_info;
3541 	mb_params.data_dst_size = sizeof(mfw_temp_info);
3542 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3543 	if (rc != ECORE_SUCCESS)
3544 		return rc;
3545 
3546 	OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3547 	p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3548 					      ECORE_MAX_NUM_OF_SENSORS);
3549 	for (i = 0; i < p_temp_info->num_sensors; i++) {
3550 		val = mfw_temp_info.sensor[i];
3551 		p_temp_sensor = &p_temp_info->sensors[i];
3552 		p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3553 						 SENSOR_LOCATION_OFFSET;
3554 		p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3555 						THRESHOLD_HIGH_OFFSET;
3556 		p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3557 					  CRITICAL_TEMPERATURE_OFFSET;
3558 		p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3559 					      CURRENT_TEMP_OFFSET;
3560 	}
3561 
3562 	return ECORE_SUCCESS;
3563 }
3564 
3565 enum _ecore_status_t ecore_mcp_get_mba_versions(
3566 	struct ecore_hwfn *p_hwfn,
3567 	struct ecore_ptt *p_ptt,
3568 	struct ecore_mba_vers *p_mba_vers)
3569 {
3570 	u32 buf_size, resp, param;
3571 	enum _ecore_status_t rc;
3572 
3573 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3574 				  0, &resp, &param, &buf_size,
3575 				  &p_mba_vers->mba_vers[0]);
3576 
3577 	if (rc != ECORE_SUCCESS)
3578 		return rc;
3579 
3580 	if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3581 		rc = ECORE_UNKNOWN_ERROR;
3582 
3583 	if (buf_size != MCP_DRV_NVM_BUF_LEN)
3584 		rc = ECORE_UNKNOWN_ERROR;
3585 
3586 	return rc;
3587 }
3588 
3589 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3590 					      struct ecore_ptt *p_ptt,
3591 					      u64 *num_events)
3592 {
3593 	u32 rsp;
3594 
3595 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3596 			     0, &rsp, (u32 *)num_events);
3597 }
3598 
3599 static enum resource_id_enum
3600 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3601 {
3602 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3603 
3604 	switch (res_id) {
3605 	case ECORE_SB:
3606 		mfw_res_id = RESOURCE_NUM_SB_E;
3607 		break;
3608 	case ECORE_L2_QUEUE:
3609 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3610 		break;
3611 	case ECORE_VPORT:
3612 		mfw_res_id = RESOURCE_NUM_VPORT_E;
3613 		break;
3614 	case ECORE_RSS_ENG:
3615 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3616 		break;
3617 	case ECORE_PQ:
3618 		mfw_res_id = RESOURCE_NUM_PQ_E;
3619 		break;
3620 	case ECORE_RL:
3621 		mfw_res_id = RESOURCE_NUM_RL_E;
3622 		break;
3623 	case ECORE_MAC:
3624 	case ECORE_VLAN:
3625 		/* Each VFC resource can accommodate both a MAC and a VLAN */
3626 		mfw_res_id = RESOURCE_VFC_FILTER_E;
3627 		break;
3628 	case ECORE_ILT:
3629 		mfw_res_id = RESOURCE_ILT_E;
3630 		break;
3631 	case ECORE_LL2_QUEUE:
3632 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3633 		break;
3634 	case ECORE_RDMA_CNQ_RAM:
3635 	case ECORE_CMDQS_CQS:
3636 		/* CNQ/CMDQS are the same resource */
3637 		mfw_res_id = RESOURCE_CQS_E;
3638 		break;
3639 	case ECORE_RDMA_STATS_QUEUE:
3640 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3641 		break;
3642 	case ECORE_BDQ:
3643 		mfw_res_id = RESOURCE_BDQ_E;
3644 		break;
3645 	default:
3646 		break;
3647 	}
3648 
3649 	return mfw_res_id;
3650 }
3651 
3652 #define ECORE_RESC_ALLOC_VERSION_MAJOR	2
3653 #define ECORE_RESC_ALLOC_VERSION_MINOR	0
3654 #define ECORE_RESC_ALLOC_VERSION				\
3655 	((ECORE_RESC_ALLOC_VERSION_MAJOR <<			\
3656 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) |	\
3657 	 (ECORE_RESC_ALLOC_VERSION_MINOR <<			\
3658 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3659 
3660 struct ecore_resc_alloc_in_params {
3661 	u32 cmd;
3662 	enum ecore_resources res_id;
3663 	u32 resc_max_val;
3664 };
3665 
3666 struct ecore_resc_alloc_out_params {
3667 	u32 mcp_resp;
3668 	u32 mcp_param;
3669 	u32 resc_num;
3670 	u32 resc_start;
3671 	u32 vf_resc_num;
3672 	u32 vf_resc_start;
3673 	u32 flags;
3674 };
3675 
3676 #define ECORE_RECOVERY_PROLOG_SLEEP_MS	100
3677 
3678 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3679 {
3680 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3681 	struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3682 	enum _ecore_status_t rc;
3683 
3684 	/* Allow ongoing PCIe transactions to complete */
3685 	OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3686 
3687 	/* Clear the PF's internal FID_enable in the PXP */
3688 	rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3689 	if (rc != ECORE_SUCCESS)
3690 		DP_NOTICE(p_hwfn, false,
3691 			  "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3692 			  rc);
3693 
3694 	return rc;
3695 }
3696 
3697 static enum _ecore_status_t
3698 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3699 			      struct ecore_ptt *p_ptt,
3700 			      struct ecore_resc_alloc_in_params *p_in_params,
3701 			      struct ecore_resc_alloc_out_params *p_out_params)
3702 {
3703 	struct ecore_mcp_mb_params mb_params;
3704 	struct resource_info mfw_resc_info;
3705 	enum _ecore_status_t rc;
3706 
3707 	OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3708 
3709 	mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3710 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3711 		DP_ERR(p_hwfn,
3712 		       "Failed to match resource %d [%s] with the MFW resources\n",
3713 		       p_in_params->res_id,
3714 		       ecore_hw_get_resc_name(p_in_params->res_id));
3715 		return ECORE_INVAL;
3716 	}
3717 
3718 	switch (p_in_params->cmd) {
3719 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3720 		mfw_resc_info.size = p_in_params->resc_max_val;
3721 		/* Fallthrough */
3722 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3723 		break;
3724 	default:
3725 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3726 		       p_in_params->cmd);
3727 		return ECORE_INVAL;
3728 	}
3729 
3730 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3731 	mb_params.cmd = p_in_params->cmd;
3732 	mb_params.param = ECORE_RESC_ALLOC_VERSION;
3733 	mb_params.p_data_src = &mfw_resc_info;
3734 	mb_params.data_src_size = sizeof(mfw_resc_info);
3735 	mb_params.p_data_dst = mb_params.p_data_src;
3736 	mb_params.data_dst_size = mb_params.data_src_size;
3737 
3738 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3739 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3740 		   p_in_params->cmd, p_in_params->res_id,
3741 		   ecore_hw_get_resc_name(p_in_params->res_id),
3742 		   GET_MFW_FIELD(mb_params.param,
3743 				 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3744 		   GET_MFW_FIELD(mb_params.param,
3745 				 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3746 		   p_in_params->resc_max_val);
3747 
3748 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3749 	if (rc != ECORE_SUCCESS)
3750 		return rc;
3751 
3752 	p_out_params->mcp_resp = mb_params.mcp_resp;
3753 	p_out_params->mcp_param = mb_params.mcp_param;
3754 	p_out_params->resc_num = mfw_resc_info.size;
3755 	p_out_params->resc_start = mfw_resc_info.offset;
3756 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3757 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3758 	p_out_params->flags = mfw_resc_info.flags;
3759 
3760 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3761 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3762 		   GET_MFW_FIELD(p_out_params->mcp_param,
3763 				 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3764 		   GET_MFW_FIELD(p_out_params->mcp_param,
3765 				 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3766 		   p_out_params->resc_num, p_out_params->resc_start,
3767 		   p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3768 		   p_out_params->flags);
3769 
3770 	return ECORE_SUCCESS;
3771 }
3772 
3773 enum _ecore_status_t
3774 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3775 			   enum ecore_resources res_id, u32 resc_max_val,
3776 			   u32 *p_mcp_resp)
3777 {
3778 	struct ecore_resc_alloc_out_params out_params;
3779 	struct ecore_resc_alloc_in_params in_params;
3780 	enum _ecore_status_t rc;
3781 
3782 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3783 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3784 	in_params.res_id = res_id;
3785 	in_params.resc_max_val = resc_max_val;
3786 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3787 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3788 					   &out_params);
3789 	if (rc != ECORE_SUCCESS)
3790 		return rc;
3791 
3792 	*p_mcp_resp = out_params.mcp_resp;
3793 
3794 	return ECORE_SUCCESS;
3795 }
3796 
3797 enum _ecore_status_t
3798 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3799 			enum ecore_resources res_id, u32 *p_mcp_resp,
3800 			u32 *p_resc_num, u32 *p_resc_start)
3801 {
3802 	struct ecore_resc_alloc_out_params out_params;
3803 	struct ecore_resc_alloc_in_params in_params;
3804 	enum _ecore_status_t rc;
3805 
3806 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3807 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3808 	in_params.res_id = res_id;
3809 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3810 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3811 					   &out_params);
3812 	if (rc != ECORE_SUCCESS)
3813 		return rc;
3814 
3815 	*p_mcp_resp = out_params.mcp_resp;
3816 
3817 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3818 		*p_resc_num = out_params.resc_num;
3819 		*p_resc_start = out_params.resc_start;
3820 	}
3821 
3822 	return ECORE_SUCCESS;
3823 }
3824 
3825 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3826 					       struct ecore_ptt *p_ptt)
3827 {
3828 	u32 mcp_resp, mcp_param;
3829 
3830 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3831 			     &mcp_resp, &mcp_param);
3832 }
3833 
3834 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3835 						   struct ecore_ptt *p_ptt,
3836 						   u32 param, u32 *p_mcp_resp,
3837 						   u32 *p_mcp_param)
3838 {
3839 	enum _ecore_status_t rc;
3840 
3841 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3842 			   p_mcp_resp, p_mcp_param);
3843 	if (rc != ECORE_SUCCESS)
3844 		return rc;
3845 
3846 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3847 		DP_INFO(p_hwfn,
3848 			"The resource command is unsupported by the MFW\n");
3849 		return ECORE_NOTIMPL;
3850 	}
3851 
3852 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3853 		u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3854 
3855 		DP_NOTICE(p_hwfn, false,
3856 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3857 			  param, opcode);
3858 		return ECORE_INVAL;
3859 	}
3860 
3861 	return rc;
3862 }
3863 
3864 enum _ecore_status_t
3865 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3866 		      struct ecore_resc_lock_params *p_params)
3867 {
3868 	u32 param = 0, mcp_resp, mcp_param;
3869 	u8 opcode;
3870 	enum _ecore_status_t rc;
3871 
3872 	switch (p_params->timeout) {
3873 	case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3874 		opcode = RESOURCE_OPCODE_REQ;
3875 		p_params->timeout = 0;
3876 		break;
3877 	case ECORE_MCP_RESC_LOCK_TO_NONE:
3878 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3879 		p_params->timeout = 0;
3880 		break;
3881 	default:
3882 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3883 		break;
3884 	}
3885 
3886 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3887 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3888 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3889 
3890 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3891 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3892 		   param, p_params->timeout, opcode, p_params->resource);
3893 
3894 	/* Attempt to acquire the resource */
3895 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3896 				    &mcp_param);
3897 	if (rc != ECORE_SUCCESS)
3898 		return rc;
3899 
3900 	/* Analyze the response */
3901 	p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3902 	opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3903 
3904 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3905 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3906 		   mcp_param, opcode, p_params->owner);
3907 
3908 	switch (opcode) {
3909 	case RESOURCE_OPCODE_GNT:
3910 		p_params->b_granted = true;
3911 		break;
3912 	case RESOURCE_OPCODE_BUSY:
3913 		p_params->b_granted = false;
3914 		break;
3915 	default:
3916 		DP_NOTICE(p_hwfn, false,
3917 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3918 			  mcp_param, opcode);
3919 		return ECORE_INVAL;
3920 	}
3921 
3922 	return ECORE_SUCCESS;
3923 }
3924 
3925 enum _ecore_status_t
3926 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3927 		    struct ecore_resc_lock_params *p_params)
3928 {
3929 	u32 retry_cnt = 0;
3930 	enum _ecore_status_t rc;
3931 
3932 	do {
3933 		/* No need for an interval before the first iteration */
3934 		if (retry_cnt) {
3935 			if (p_params->sleep_b4_retry) {
3936 				u16 retry_interval_in_ms =
3937 					DIV_ROUND_UP(p_params->retry_interval,
3938 						     1000);
3939 
3940 				OSAL_MSLEEP(retry_interval_in_ms);
3941 			} else {
3942 				OSAL_UDELAY(p_params->retry_interval);
3943 			}
3944 		}
3945 
3946 		rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3947 		if (rc != ECORE_SUCCESS)
3948 			return rc;
3949 
3950 		if (p_params->b_granted)
3951 			break;
3952 	} while (retry_cnt++ < p_params->retry_num);
3953 
3954 	return ECORE_SUCCESS;
3955 }
3956 
3957 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
3958 				      struct ecore_resc_unlock_params *p_unlock,
3959 				      enum ecore_resc_lock resource,
3960 				      bool b_is_permanent)
3961 {
3962 	if (p_lock != OSAL_NULL) {
3963 		OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3964 
3965 		/* Permanent resources don't require aging, and there's no
3966 		 * point in trying to acquire them more than once since it's
3967 		 * unexpected another entity would release them.
3968 		 */
3969 		if (b_is_permanent) {
3970 			p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3971 		} else {
3972 			p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3973 			p_lock->retry_interval =
3974 					ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3975 			p_lock->sleep_b4_retry = true;
3976 		}
3977 
3978 		p_lock->resource = resource;
3979 	}
3980 
3981 	if (p_unlock != OSAL_NULL) {
3982 		OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3983 		p_unlock->resource = resource;
3984 	}
3985 }
3986 
3987 enum _ecore_status_t
3988 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3989 		      struct ecore_resc_unlock_params *p_params)
3990 {
3991 	u32 param = 0, mcp_resp, mcp_param;
3992 	u8 opcode;
3993 	enum _ecore_status_t rc;
3994 
3995 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3996 				   : RESOURCE_OPCODE_RELEASE;
3997 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3998 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3999 
4000 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4001 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
4002 		   param, opcode, p_params->resource);
4003 
4004 	/* Attempt to release the resource */
4005 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4006 				    &mcp_param);
4007 	if (rc != ECORE_SUCCESS)
4008 		return rc;
4009 
4010 	/* Analyze the response */
4011 	opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4012 
4013 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4014 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4015 		   mcp_param, opcode);
4016 
4017 	switch (opcode) {
4018 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4019 		DP_INFO(p_hwfn,
4020 			"Resource unlock request for an already released resource [%d]\n",
4021 			p_params->resource);
4022 		/* Fallthrough */
4023 	case RESOURCE_OPCODE_RELEASED:
4024 		p_params->b_released = true;
4025 		break;
4026 	case RESOURCE_OPCODE_WRONG_OWNER:
4027 		p_params->b_released = false;
4028 		break;
4029 	default:
4030 		DP_NOTICE(p_hwfn, false,
4031 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4032 			  mcp_param, opcode);
4033 		return ECORE_INVAL;
4034 	}
4035 
4036 	return ECORE_SUCCESS;
4037 }
4038 
4039 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
4040 {
4041 	return !!(p_hwfn->mcp_info->capabilities &
4042 		  FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
4043 }
4044 
4045 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4046 						struct ecore_ptt *p_ptt)
4047 {
4048 	u32 mcp_resp;
4049 	enum _ecore_status_t rc;
4050 
4051 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4052 			   0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4053 	if (rc == ECORE_SUCCESS)
4054 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4055 			   "MFW supported features: %08x\n",
4056 			   p_hwfn->mcp_info->capabilities);
4057 
4058 	return rc;
4059 }
4060 
4061 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4062 						struct ecore_ptt *p_ptt)
4063 {
4064 	u32 mcp_resp, mcp_param, features;
4065 
4066 	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4067 		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
4068 		   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
4069 
4070 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4071 			     features, &mcp_resp, &mcp_param);
4072 }
4073 
4074 enum _ecore_status_t
4075 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4076 			struct ecore_mcp_drv_attr *p_drv_attr)
4077 {
4078 	struct attribute_cmd_write_stc attr_cmd_write;
4079 	enum _attribute_commands_e mfw_attr_cmd;
4080 	struct ecore_mcp_mb_params mb_params;
4081 	enum _ecore_status_t rc;
4082 
4083 	switch (p_drv_attr->attr_cmd) {
4084 	case ECORE_MCP_DRV_ATTR_CMD_READ:
4085 		mfw_attr_cmd = ATTRIBUTE_CMD_READ;
4086 		break;
4087 	case ECORE_MCP_DRV_ATTR_CMD_WRITE:
4088 		mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
4089 		break;
4090 	case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
4091 		mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
4092 		break;
4093 	case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
4094 		mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
4095 		break;
4096 	default:
4097 		DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
4098 			  p_drv_attr->attr_cmd);
4099 		return ECORE_INVAL;
4100 	}
4101 
4102 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4103 	mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
4104 	SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
4105 		      p_drv_attr->attr_num);
4106 	SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
4107 		      mfw_attr_cmd);
4108 	if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
4109 		OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
4110 		attr_cmd_write.val = p_drv_attr->val;
4111 		attr_cmd_write.mask = p_drv_attr->mask;
4112 		attr_cmd_write.offset = p_drv_attr->offset;
4113 
4114 		mb_params.p_data_src = &attr_cmd_write;
4115 		mb_params.data_src_size = sizeof(attr_cmd_write);
4116 	}
4117 
4118 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4119 	if (rc != ECORE_SUCCESS)
4120 		return rc;
4121 
4122 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4123 		DP_INFO(p_hwfn,
4124 			"The attribute command is not supported by the MFW\n");
4125 		return ECORE_NOTIMPL;
4126 	} else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4127 		DP_INFO(p_hwfn,
4128 			"Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
4129 			mb_params.mcp_resp, p_drv_attr->attr_cmd,
4130 			p_drv_attr->attr_num);
4131 		return ECORE_INVAL;
4132 	}
4133 
4134 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4135 		   "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
4136 		   p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
4137 		   p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
4138 		   mb_params.mcp_param);
4139 
4140 	if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4141 	    p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4142 		p_drv_attr->val = mb_params.mcp_param;
4143 
4144 	return ECORE_SUCCESS;
4145 }
4146 
4147 enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
4148 						 struct ecore_ptt *p_ptt)
4149 {
4150 	struct ecore_dev *p_dev = p_hwfn->p_dev;
4151 	struct ecore_mcp_mb_params mb_params;
4152 	u8 fir_valid, l2_valid;
4153 	enum _ecore_status_t rc;
4154 
4155 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4156 	mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
4157 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4158 	if (rc != ECORE_SUCCESS)
4159 		return rc;
4160 
4161 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4162 		DP_INFO(p_hwfn,
4163 			"The get_engine_config command is unsupported by the MFW\n");
4164 		return ECORE_NOTIMPL;
4165 	}
4166 
4167 	fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
4168 				  FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
4169 	if (fir_valid)
4170 		p_dev->fir_affin =
4171 			GET_MFW_FIELD(mb_params.mcp_param,
4172 				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
4173 
4174 	l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
4175 				 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
4176 	if (l2_valid)
4177 		p_dev->l2_affin_hint =
4178 			GET_MFW_FIELD(mb_params.mcp_param,
4179 				      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
4180 
4181 	DP_INFO(p_hwfn,
4182 		"Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
4183 		fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
4184 
4185 	return ECORE_SUCCESS;
4186 }
4187 
4188 enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4189 						struct ecore_ptt *p_ptt)
4190 {
4191 	struct ecore_dev *p_dev = p_hwfn->p_dev;
4192 	struct ecore_mcp_mb_params mb_params;
4193 	enum _ecore_status_t rc;
4194 
4195 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4196 	mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
4197 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4198 	if (rc != ECORE_SUCCESS)
4199 		return rc;
4200 
4201 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4202 		DP_INFO(p_hwfn,
4203 			"The get_ppfid_bitmap command is unsupported by the MFW\n");
4204 		return ECORE_NOTIMPL;
4205 	}
4206 
4207 	p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
4208 					    FW_MB_PARAM_PPFID_BITMAP);
4209 
4210 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
4211 		   p_dev->ppfid_bitmap);
4212 
4213 	return ECORE_SUCCESS;
4214 }
4215 
4216 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4217 		      u32 offset, u32 val)
4218 {
4219 	enum _ecore_status_t	   rc = ECORE_SUCCESS;
4220 	u32			   dword = val;
4221 	struct ecore_mcp_mb_params mb_params;
4222 
4223 	OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
4224 	mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4225 	mb_params.param = offset;
4226 	mb_params.p_data_src = &dword;
4227 	mb_params.data_src_size = sizeof(dword);
4228 
4229 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4230 	if (rc != ECORE_SUCCESS) {
4231 		DP_NOTICE(p_hwfn, false,
4232 			  "Failed to wol write request, rc = %d\n", rc);
4233 	}
4234 
4235 	if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4236 		DP_NOTICE(p_hwfn, false,
4237 			  "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4238 			  val, offset, mb_params.mcp_resp);
4239 		rc = ECORE_UNKNOWN_ERROR;
4240 	}
4241 }
4242