xref: /dpdk/drivers/net/qede/base/ecore_mcp.c (revision 0857b942113874c69dc3db5df11a828ee3cc9b6b)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_vf.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23 
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
26 
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)	/* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000)	/* Account for 500 msec */
29 
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 	ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
32 		 _val)
33 
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 	ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
36 
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 		     OFFSETOF(struct public_drv_mb, _field), _val)
40 
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 		     OFFSETOF(struct public_drv_mb, _field))
44 
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 	DRV_ID_PDA_COMP_VER_SHIFT)
47 
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
49 
50 #ifndef ASIC_ONLY
51 static int loaded;
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
53 #endif
54 
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
56 {
57 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
58 		return false;
59 	return true;
60 }
61 
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
63 {
64 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
65 					PUBLIC_PORT);
66 	u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
67 
68 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
69 						   MFW_PORT(p_hwfn));
70 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71 		   "port_addr = 0x%x, port_id 0x%02x\n",
72 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
73 }
74 
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
76 {
77 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
78 	OSAL_BE32 tmp;
79 	u32 i;
80 
81 #ifndef ASIC_ONLY
82 	if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
83 		return;
84 #endif
85 
86 	if (!p_hwfn->mcp_info->public_base)
87 		return;
88 
89 	for (i = 0; i < length; i++) {
90 		tmp = ecore_rd(p_hwfn, p_ptt,
91 			       p_hwfn->mcp_info->mfw_mb_addr +
92 			       (i << 2) + sizeof(u32));
93 
94 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95 		    OSAL_BE32_TO_CPU(tmp);
96 	}
97 }
98 
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
100 {
101 	if (p_hwfn->mcp_info) {
102 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
105 	}
106 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
107 
108 	return ECORE_SUCCESS;
109 }
110 
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112 						   struct ecore_ptt *p_ptt)
113 {
114 	struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115 	u32 drv_mb_offsize, mfw_mb_offsize;
116 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117 
118 #ifndef ASIC_ONLY
119 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120 		DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121 		p_info->public_base = 0;
122 		return ECORE_INVAL;
123 	}
124 #endif
125 
126 	p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127 	if (!p_info->public_base)
128 		return ECORE_INVAL;
129 
130 	p_info->public_base |= GRCBASE_MCP;
131 
132 	/* Calculate the driver and MFW mailbox address */
133 	drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
135 						       PUBLIC_DRV_MB));
136 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139 		   " mcp_pf_id = 0x%x\n",
140 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
141 
142 	/* Set the MFW MB address */
143 	mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
145 						       PUBLIC_MFW_MB));
146 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147 	p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148 					       p_info->mfw_mb_addr);
149 
150 	/* Get the current driver mailbox sequence before sending
151 	 * the first command
152 	 */
153 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154 	    DRV_MSG_SEQ_NUMBER_MASK;
155 
156 	/* Get current FW pulse sequence */
157 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158 	    DRV_PULSE_SEQ_MASK;
159 
160 	p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161 					  MISCS_REG_GENERIC_POR_0);
162 
163 	return ECORE_SUCCESS;
164 }
165 
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167 					struct ecore_ptt *p_ptt)
168 {
169 	struct ecore_mcp_info *p_info;
170 	u32 size;
171 
172 	/* Allocate mcp_info structure */
173 	p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174 				       sizeof(*p_hwfn->mcp_info));
175 	if (!p_hwfn->mcp_info)
176 		goto err;
177 	p_info = p_hwfn->mcp_info;
178 
179 	if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180 		DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181 		/* Do not free mcp_info here, since public_base indicate that
182 		 * the MCP is not initialized
183 		 */
184 		return ECORE_SUCCESS;
185 	}
186 
187 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188 	p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189 	p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191 		goto err;
192 
193 	/* Initialize the MFW spinlock */
194 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195 	OSAL_SPIN_LOCK_INIT(&p_info->lock);
196 
197 	return ECORE_SUCCESS;
198 
199 err:
200 	DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201 	ecore_mcp_free(p_hwfn);
202 	return ECORE_NOMEM;
203 }
204 
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206  * The lock is achieved in most cases by holding a spinlock, causing other
207  * threads to wait till a previous access is done.
208  * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209  * access is achieved by setting a blocking flag, which will fail other
210  * competing contexts to send their mailboxes.
211  */
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
213 					      u32 cmd)
214 {
215 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
216 
217 	/* The spinlock shouldn't be acquired when the mailbox command is
218 	 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219 	 * pending [UN]LOAD_REQ command of another PF together with a spinlock
220 	 * (i.e. interrupts are disabled) - can lead to a deadlock.
221 	 * It is assumed that for a single PF, no other mailbox commands can be
222 	 * sent from another context while sending LOAD_REQ, and that any
223 	 * parallel commands to UNLOAD_REQ can be cancelled.
224 	 */
225 	if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226 		p_hwfn->mcp_info->block_mb_sending = false;
227 
228 	if (p_hwfn->mcp_info->block_mb_sending) {
229 		DP_NOTICE(p_hwfn, false,
230 			  "Trying to send a MFW mailbox command [0x%x]"
231 			  " in parallel to [UN]LOAD_REQ. Aborting.\n",
232 			  cmd);
233 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
234 		return ECORE_BUSY;
235 	}
236 
237 	if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238 		p_hwfn->mcp_info->block_mb_sending = true;
239 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
240 	}
241 
242 	return ECORE_SUCCESS;
243 }
244 
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
246 {
247 	if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
249 }
250 
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252 				     struct ecore_ptt *p_ptt)
253 {
254 	u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255 	u32 delay = CHIP_MCP_RESP_ITER_US;
256 	u32 org_mcp_reset_seq, cnt = 0;
257 	enum _ecore_status_t rc = ECORE_SUCCESS;
258 
259 #ifndef ASIC_ONLY
260 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261 		delay = EMUL_MCP_RESP_ITER_US;
262 #endif
263 
264 	/* Ensure that only a single thread is accessing the mailbox at a
265 	 * certain time.
266 	 */
267 	rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268 	if (rc != ECORE_SUCCESS)
269 		return rc;
270 
271 	/* Set drv command along with the updated sequence */
272 	org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
274 
275 	do {
276 		/* Wait for MFW response */
277 		OSAL_UDELAY(delay);
278 		/* Give the FW up to 500 second (50*1000*10usec) */
279 	} while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280 						MISCS_REG_GENERIC_POR_0)) &&
281 		 (cnt++ < ECORE_MCP_RESET_RETRIES));
282 
283 	if (org_mcp_reset_seq !=
284 	    ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286 			   "MCP was reset after %d usec\n", cnt * delay);
287 	} else {
288 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
289 		rc = ECORE_AGAIN;
290 	}
291 
292 	ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
293 
294 	return rc;
295 }
296 
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298 					     struct ecore_ptt *p_ptt,
299 					     u32 cmd, u32 param,
300 					     u32 *o_mcp_resp,
301 					     u32 *o_mcp_param)
302 {
303 	u32 delay = CHIP_MCP_RESP_ITER_US;
304 	u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305 	u32 seq, cnt = 1, actual_mb_seq;
306 	enum _ecore_status_t rc = ECORE_SUCCESS;
307 
308 #ifndef ASIC_ONLY
309 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310 		delay = EMUL_MCP_RESP_ITER_US;
311 	/* There is a built-in delay of 100usec in each MFW response read */
312 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
313 		max_retries /= 10;
314 #endif
315 
316 	/* Get actual driver mailbox sequence */
317 	actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318 	    DRV_MSG_SEQ_NUMBER_MASK;
319 
320 	/* Use MCP history register to check if MCP reset occurred between
321 	 * init time and now.
322 	 */
323 	if (p_hwfn->mcp_info->mcp_hist !=
324 	    ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326 		ecore_load_mcp_offsets(p_hwfn, p_ptt);
327 		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
328 	}
329 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
330 
331 	/* Set drv param */
332 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
333 
334 	/* Set drv command along with the updated sequence */
335 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
336 
337 	do {
338 		/* Wait for MFW response */
339 		OSAL_UDELAY(delay);
340 		*o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
341 
342 		/* Give the FW up to 5 second (500*10ms) */
343 	} while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344 		 (cnt++ < max_retries));
345 
346 	/* Is this a reply to our command? */
347 	if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348 		*o_mcp_resp &= FW_MSG_CODE_MASK;
349 		/* Get the MCP param */
350 		*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
351 	} else {
352 		/* FW BUG! */
353 		DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
354 		       cmd, param);
355 		*o_mcp_resp = 0;
356 		rc = ECORE_AGAIN;
357 		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
358 	}
359 	return rc;
360 }
361 
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364 			struct ecore_ptt *p_ptt,
365 			struct ecore_mcp_mb_params *p_mb_params)
366 {
367 	union drv_union_data union_data;
368 	u32 union_data_addr;
369 	enum _ecore_status_t rc;
370 
371 	/* MCP not initialized */
372 	if (!ecore_mcp_is_init(p_hwfn)) {
373 		DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
374 		return ECORE_BUSY;
375 	}
376 
377 	if (p_mb_params->data_src_size > sizeof(union_data) ||
378 	    p_mb_params->data_dst_size > sizeof(union_data)) {
379 		DP_ERR(p_hwfn,
380 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
381 		       p_mb_params->data_src_size, p_mb_params->data_dst_size,
382 		       sizeof(union_data));
383 		return ECORE_INVAL;
384 	}
385 
386 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
387 			  OFFSETOF(struct public_drv_mb, union_data);
388 
389 	/* Ensure that only a single thread is accessing the mailbox at a
390 	 * certain time.
391 	 */
392 	rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
393 	if (rc != ECORE_SUCCESS)
394 		return rc;
395 
396 	OSAL_MEM_ZERO(&union_data, sizeof(union_data));
397 	if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
398 		OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
399 			    p_mb_params->data_src_size);
400 	ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
401 			sizeof(union_data));
402 
403 	rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
404 			      p_mb_params->param, &p_mb_params->mcp_resp,
405 			      &p_mb_params->mcp_param);
406 
407 	if (p_mb_params->p_data_dst != OSAL_NULL &&
408 	    p_mb_params->data_dst_size)
409 		ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
410 				  union_data_addr, p_mb_params->data_dst_size);
411 
412 	ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
413 
414 	return rc;
415 }
416 
417 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
418 				   struct ecore_ptt *p_ptt, u32 cmd, u32 param,
419 				   u32 *o_mcp_resp, u32 *o_mcp_param)
420 {
421 	struct ecore_mcp_mb_params mb_params;
422 	enum _ecore_status_t rc;
423 
424 #ifndef ASIC_ONLY
425 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
426 		if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
427 			loaded--;
428 			loaded_port[p_hwfn->port_id]--;
429 			DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
430 				   loaded);
431 		}
432 		return ECORE_SUCCESS;
433 	}
434 #endif
435 
436 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
437 	mb_params.cmd = cmd;
438 	mb_params.param = param;
439 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
440 	if (rc != ECORE_SUCCESS)
441 		return rc;
442 
443 	*o_mcp_resp = mb_params.mcp_resp;
444 	*o_mcp_param = mb_params.mcp_param;
445 
446 	return ECORE_SUCCESS;
447 }
448 
449 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
450 					  struct ecore_ptt *p_ptt,
451 					  u32 cmd,
452 					  u32 param,
453 					  u32 *o_mcp_resp,
454 					  u32 *o_mcp_param,
455 					  u32 i_txn_size, u32 *i_buf)
456 {
457 	struct ecore_mcp_mb_params mb_params;
458 	enum _ecore_status_t rc;
459 
460 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
461 	mb_params.cmd = cmd;
462 	mb_params.param = param;
463 	mb_params.p_data_src = i_buf;
464 	mb_params.data_src_size = (u8)i_txn_size;
465 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
466 	if (rc != ECORE_SUCCESS)
467 		return rc;
468 
469 	*o_mcp_resp = mb_params.mcp_resp;
470 	*o_mcp_param = mb_params.mcp_param;
471 
472 	return ECORE_SUCCESS;
473 }
474 
475 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
476 					  struct ecore_ptt *p_ptt,
477 					  u32 cmd,
478 					  u32 param,
479 					  u32 *o_mcp_resp,
480 					  u32 *o_mcp_param,
481 					  u32 *o_txn_size, u32 *o_buf)
482 {
483 	struct ecore_mcp_mb_params mb_params;
484 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
485 	enum _ecore_status_t rc;
486 
487 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
488 	mb_params.cmd = cmd;
489 	mb_params.param = param;
490 	mb_params.p_data_dst = raw_data;
491 
492 	/* Use the maximal value since the actual one is part of the response */
493 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
494 
495 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
496 	if (rc != ECORE_SUCCESS)
497 		return rc;
498 
499 	*o_mcp_resp = mb_params.mcp_resp;
500 	*o_mcp_param = mb_params.mcp_param;
501 
502 	*o_txn_size = *o_mcp_param;
503 	/* @DPDK */
504 	OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
505 
506 	return ECORE_SUCCESS;
507 }
508 
509 #ifndef ASIC_ONLY
510 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
511 				    u32 *p_load_code)
512 {
513 	static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
514 
515 	if (!loaded)
516 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
517 	else if (!loaded_port[p_hwfn->port_id])
518 		load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
519 	else
520 		load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
521 
522 	/* On CMT, always tell that it's engine */
523 	if (p_hwfn->p_dev->num_hwfns > 1)
524 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
525 
526 	*p_load_code = load_phase;
527 	loaded++;
528 	loaded_port[p_hwfn->port_id]++;
529 
530 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
531 		   "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
532 		   *p_load_code, loaded, p_hwfn->port_id,
533 		   loaded_port[p_hwfn->port_id]);
534 }
535 #endif
536 
537 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
538 {
539 	return (drv_role == DRV_ROLE_OS &&
540 		exist_drv_role == DRV_ROLE_PREBOOT) ||
541 	       (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
542 }
543 
544 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
545 						      struct ecore_ptt *p_ptt)
546 {
547 	u32 resp = 0, param = 0;
548 	enum _ecore_status_t rc;
549 
550 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
551 			   &resp, &param);
552 	if (rc != ECORE_SUCCESS)
553 		DP_NOTICE(p_hwfn, false,
554 			  "Failed to send cancel load request, rc = %d\n", rc);
555 
556 	return rc;
557 }
558 
559 #define CONFIG_ECORE_L2_BITMAP_IDX	(0x1 << 0)
560 #define CONFIG_ECORE_SRIOV_BITMAP_IDX	(0x1 << 1)
561 #define CONFIG_ECORE_ROCE_BITMAP_IDX	(0x1 << 2)
562 #define CONFIG_ECORE_IWARP_BITMAP_IDX	(0x1 << 3)
563 #define CONFIG_ECORE_FCOE_BITMAP_IDX	(0x1 << 4)
564 #define CONFIG_ECORE_ISCSI_BITMAP_IDX	(0x1 << 5)
565 #define CONFIG_ECORE_LL2_BITMAP_IDX	(0x1 << 6)
566 
567 static u32 ecore_get_config_bitmap(void)
568 {
569 	u32 config_bitmap = 0x0;
570 
571 #ifdef CONFIG_ECORE_L2
572 	config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
573 #endif
574 #ifdef CONFIG_ECORE_SRIOV
575 	config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
576 #endif
577 #ifdef CONFIG_ECORE_ROCE
578 	config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
579 #endif
580 #ifdef CONFIG_ECORE_IWARP
581 	config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
582 #endif
583 #ifdef CONFIG_ECORE_FCOE
584 	config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
585 #endif
586 #ifdef CONFIG_ECORE_ISCSI
587 	config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
588 #endif
589 #ifdef CONFIG_ECORE_LL2
590 	config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
591 #endif
592 
593 	return config_bitmap;
594 }
595 
596 struct ecore_load_req_in_params {
597 	u8 hsi_ver;
598 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT	0
599 #define ECORE_LOAD_REQ_HSI_VER_1	1
600 	u32 drv_ver_0;
601 	u32 drv_ver_1;
602 	u32 fw_ver;
603 	u8 drv_role;
604 	u8 timeout_val;
605 	u8 force_cmd;
606 	bool avoid_eng_reset;
607 };
608 
609 struct ecore_load_req_out_params {
610 	u32 load_code;
611 	u32 exist_drv_ver_0;
612 	u32 exist_drv_ver_1;
613 	u32 exist_fw_ver;
614 	u8 exist_drv_role;
615 	u8 mfw_hsi_ver;
616 	bool drv_exists;
617 };
618 
619 static enum _ecore_status_t
620 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
621 		     struct ecore_load_req_in_params *p_in_params,
622 		     struct ecore_load_req_out_params *p_out_params)
623 {
624 	struct ecore_mcp_mb_params mb_params;
625 	struct load_req_stc load_req;
626 	struct load_rsp_stc load_rsp;
627 	u32 hsi_ver;
628 	enum _ecore_status_t rc;
629 
630 	OSAL_MEM_ZERO(&load_req, sizeof(load_req));
631 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
632 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
633 	load_req.fw_ver = p_in_params->fw_ver;
634 	ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
635 			    p_in_params->drv_role);
636 	ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
637 			    p_in_params->timeout_val);
638 	ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
639 			    p_in_params->force_cmd);
640 	ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
641 			    p_in_params->avoid_eng_reset);
642 
643 	hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
644 		  DRV_ID_MCP_HSI_VER_CURRENT :
645 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
646 
647 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
648 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
649 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
650 	mb_params.p_data_src = &load_req;
651 	mb_params.data_src_size = sizeof(load_req);
652 	mb_params.p_data_dst = &load_rsp;
653 	mb_params.data_dst_size = sizeof(load_rsp);
654 
655 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
656 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
657 		   mb_params.param,
658 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
659 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
660 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
661 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
662 
663 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
664 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
665 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
666 			   load_req.drv_ver_0, load_req.drv_ver_1,
667 			   load_req.fw_ver, load_req.misc0,
668 			   ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
669 			   ECORE_MFW_GET_FIELD(load_req.misc0,
670 					       LOAD_REQ_LOCK_TO),
671 			   ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
672 			   ECORE_MFW_GET_FIELD(load_req.misc0,
673 					       LOAD_REQ_FLAGS0));
674 
675 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
676 	if (rc != ECORE_SUCCESS) {
677 		DP_NOTICE(p_hwfn, false,
678 			  "Failed to send load request, rc = %d\n", rc);
679 		return rc;
680 	}
681 
682 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
683 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
684 	p_out_params->load_code = mb_params.mcp_resp;
685 
686 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
687 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
688 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
689 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
690 			   load_rsp.drv_ver_0, load_rsp.drv_ver_1,
691 			   load_rsp.fw_ver, load_rsp.misc0,
692 			   ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
693 			   ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
694 			   ECORE_MFW_GET_FIELD(load_rsp.misc0,
695 					       LOAD_RSP_FLAGS0));
696 
697 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
698 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
699 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
700 		p_out_params->exist_drv_role =
701 			ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
702 		p_out_params->mfw_hsi_ver =
703 			ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
704 		p_out_params->drv_exists =
705 			ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
706 			LOAD_RSP_FLAGS0_DRV_EXISTS;
707 	}
708 
709 	return ECORE_SUCCESS;
710 }
711 
712 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
713 						   enum ecore_drv_role drv_role,
714 						   u8 *p_mfw_drv_role)
715 {
716 	switch (drv_role) {
717 	case ECORE_DRV_ROLE_OS:
718 		*p_mfw_drv_role = DRV_ROLE_OS;
719 		break;
720 	case ECORE_DRV_ROLE_KDUMP:
721 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
722 		break;
723 	default:
724 		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
725 		return ECORE_INVAL;
726 	}
727 
728 	return ECORE_SUCCESS;
729 }
730 
731 enum ecore_load_req_force {
732 	ECORE_LOAD_REQ_FORCE_NONE,
733 	ECORE_LOAD_REQ_FORCE_PF,
734 	ECORE_LOAD_REQ_FORCE_ALL,
735 };
736 
737 static enum _ecore_status_t
738 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
739 			enum ecore_load_req_force force_cmd,
740 			u8 *p_mfw_force_cmd)
741 {
742 	switch (force_cmd) {
743 	case ECORE_LOAD_REQ_FORCE_NONE:
744 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
745 		break;
746 	case ECORE_LOAD_REQ_FORCE_PF:
747 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
748 		break;
749 	case ECORE_LOAD_REQ_FORCE_ALL:
750 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
751 		break;
752 	default:
753 		DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
754 		return ECORE_INVAL;
755 	}
756 
757 	return ECORE_SUCCESS;
758 }
759 
760 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
761 					struct ecore_ptt *p_ptt,
762 					struct ecore_load_req_params *p_params)
763 {
764 	struct ecore_load_req_out_params out_params;
765 	struct ecore_load_req_in_params in_params;
766 	u8 mfw_drv_role, mfw_force_cmd;
767 	enum _ecore_status_t rc;
768 
769 #ifndef ASIC_ONLY
770 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
771 		ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
772 		return ECORE_SUCCESS;
773 	}
774 #endif
775 
776 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
777 	in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
778 	in_params.drv_ver_0 = ECORE_VERSION;
779 	in_params.drv_ver_1 = ecore_get_config_bitmap();
780 	in_params.fw_ver = STORM_FW_VERSION;
781 	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
782 	if (rc != ECORE_SUCCESS)
783 		return rc;
784 
785 	in_params.drv_role = mfw_drv_role;
786 	in_params.timeout_val = p_params->timeout_val;
787 	rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
788 				     &mfw_force_cmd);
789 	if (rc != ECORE_SUCCESS)
790 		return rc;
791 
792 	in_params.force_cmd = mfw_force_cmd;
793 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
794 
795 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
796 	rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
797 	if (rc != ECORE_SUCCESS)
798 		return rc;
799 
800 	/* First handle cases where another load request should/might be sent:
801 	 * - MFW expects the old interface [HSI version = 1]
802 	 * - MFW responds that a force load request is required
803 	 */
804 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
805 		DP_INFO(p_hwfn,
806 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
807 
808 		/* The previous load request set the mailbox blocking */
809 		p_hwfn->mcp_info->block_mb_sending = false;
810 
811 		in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
812 		OSAL_MEM_ZERO(&out_params, sizeof(out_params));
813 		rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
814 					  &out_params);
815 		if (rc != ECORE_SUCCESS)
816 			return rc;
817 	} else if (out_params.load_code ==
818 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
819 		/* The previous load request set the mailbox blocking */
820 		p_hwfn->mcp_info->block_mb_sending = false;
821 
822 		if (ecore_mcp_can_force_load(in_params.drv_role,
823 					     out_params.exist_drv_role)) {
824 			DP_INFO(p_hwfn,
825 				"A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
826 				out_params.exist_drv_role,
827 				out_params.exist_fw_ver,
828 				out_params.exist_drv_ver_0,
829 				out_params.exist_drv_ver_1);
830 
831 			rc = ecore_get_mfw_force_cmd(p_hwfn,
832 						     ECORE_LOAD_REQ_FORCE_ALL,
833 						     &mfw_force_cmd);
834 			if (rc != ECORE_SUCCESS)
835 				return rc;
836 
837 			in_params.force_cmd = mfw_force_cmd;
838 			OSAL_MEM_ZERO(&out_params, sizeof(out_params));
839 			rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
840 						  &out_params);
841 			if (rc != ECORE_SUCCESS)
842 				return rc;
843 		} else {
844 			DP_NOTICE(p_hwfn, false,
845 				  "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
846 				  out_params.exist_drv_role,
847 				  out_params.exist_fw_ver,
848 				  out_params.exist_drv_ver_0,
849 				  out_params.exist_drv_ver_1);
850 
851 			ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
852 			return ECORE_BUSY;
853 		}
854 	}
855 
856 	/* Now handle the other types of responses.
857 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
858 	 * expected here after the additional revised load requests were sent.
859 	 */
860 	switch (out_params.load_code) {
861 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
862 	case FW_MSG_CODE_DRV_LOAD_PORT:
863 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
864 		if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
865 		    out_params.drv_exists) {
866 			/* The role and fw/driver version match, but the PF is
867 			 * already loaded and has not been unloaded gracefully.
868 			 * This is unexpected since a quasi-FLR request was
869 			 * previously sent as part of ecore_hw_prepare().
870 			 */
871 			DP_NOTICE(p_hwfn, false,
872 				  "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
873 			return ECORE_INVAL;
874 		}
875 		break;
876 	case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
877 	case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
878 	case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
879 	case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
880 		DP_NOTICE(p_hwfn, false,
881 			  "MFW refused a load request [resp 0x%08x]. Aborting.\n",
882 			  out_params.load_code);
883 		return ECORE_BUSY;
884 	default:
885 		DP_NOTICE(p_hwfn, false,
886 			  "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
887 			  out_params.load_code);
888 		break;
889 	}
890 
891 	p_params->load_code = out_params.load_code;
892 
893 	return ECORE_SUCCESS;
894 }
895 
896 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
897 					  struct ecore_ptt *p_ptt)
898 {
899 	u32 wol_param, mcp_resp, mcp_param;
900 
901 	/* @DPDK */
902 	wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
903 
904 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
905 			     &mcp_resp, &mcp_param);
906 }
907 
908 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
909 					   struct ecore_ptt *p_ptt)
910 {
911 	struct ecore_mcp_mb_params mb_params;
912 	struct mcp_mac wol_mac;
913 
914 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
915 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
916 
917 	return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
918 }
919 
920 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
921 				    struct ecore_ptt *p_ptt)
922 {
923 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
924 					PUBLIC_PATH);
925 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
926 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
927 				     ECORE_PATH_ID(p_hwfn));
928 	u32 disabled_vfs[VF_MAX_STATIC / 32];
929 	int i;
930 
931 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
932 		   "Reading Disabled VF information from [offset %08x],"
933 		   " path_addr %08x\n",
934 		   mfw_path_offsize, path_addr);
935 
936 	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
937 		disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
938 					   path_addr +
939 					   OFFSETOF(struct public_path,
940 						    mcp_vf_disabled) +
941 					   sizeof(u32) * i);
942 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
943 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
944 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
945 	}
946 
947 	if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
948 		OSAL_VF_FLR_UPDATE(p_hwfn);
949 }
950 
951 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
952 					  struct ecore_ptt *p_ptt,
953 					  u32 *vfs_to_ack)
954 {
955 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
956 					PUBLIC_FUNC);
957 	u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
958 	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
959 				     MCP_PF_ID(p_hwfn));
960 	struct ecore_mcp_mb_params mb_params;
961 	enum _ecore_status_t rc;
962 	int i;
963 
964 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
965 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
966 			   "Acking VFs [%08x,...,%08x] - %08x\n",
967 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
968 
969 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
970 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
971 	mb_params.p_data_src = vfs_to_ack;
972 	mb_params.data_src_size = VF_MAX_STATIC / 8;
973 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
974 				     &mb_params);
975 	if (rc != ECORE_SUCCESS) {
976 		DP_NOTICE(p_hwfn, false,
977 			  "Failed to pass ACK for VF flr to MFW\n");
978 		return ECORE_TIMEOUT;
979 	}
980 
981 	/* TMP - clear the ACK bits; should be done by MFW */
982 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
983 		ecore_wr(p_hwfn, p_ptt,
984 			 func_addr +
985 			 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
986 			 i * sizeof(u32), 0);
987 
988 	return rc;
989 }
990 
991 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
992 						struct ecore_ptt *p_ptt)
993 {
994 	u32 transceiver_state;
995 
996 	transceiver_state = ecore_rd(p_hwfn, p_ptt,
997 				     p_hwfn->mcp_info->port_addr +
998 				     OFFSETOF(struct public_port,
999 					      transceiver_data));
1000 
1001 	DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1002 		   "Received transceiver state update [0x%08x] from mfw"
1003 		   " [Addr 0x%x]\n",
1004 		   transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1005 					    OFFSETOF(struct public_port,
1006 						     transceiver_data)));
1007 
1008 	transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
1009 
1010 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1011 		DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1012 	else
1013 		DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1014 }
1015 
1016 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1017 					 struct ecore_ptt *p_ptt,
1018 					 bool b_reset)
1019 {
1020 	struct ecore_mcp_link_state *p_link;
1021 	u8 max_bw, min_bw;
1022 	u32 status = 0;
1023 
1024 	p_link = &p_hwfn->mcp_info->link_output;
1025 	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1026 	if (!b_reset) {
1027 		status = ecore_rd(p_hwfn, p_ptt,
1028 				  p_hwfn->mcp_info->port_addr +
1029 				  OFFSETOF(struct public_port, link_status));
1030 		DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1031 			   "Received link update [0x%08x] from mfw"
1032 			   " [Addr 0x%x]\n",
1033 			   status, (u32)(p_hwfn->mcp_info->port_addr +
1034 					  OFFSETOF(struct public_port,
1035 						   link_status)));
1036 	} else {
1037 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1038 			   "Resetting link indications\n");
1039 		return;
1040 	}
1041 
1042 	if (p_hwfn->b_drv_link_init)
1043 		p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1044 	else
1045 		p_link->link_up = false;
1046 
1047 	p_link->full_duplex = true;
1048 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1049 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1050 		p_link->speed = 100000;
1051 		break;
1052 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1053 		p_link->speed = 50000;
1054 		break;
1055 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1056 		p_link->speed = 40000;
1057 		break;
1058 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1059 		p_link->speed = 25000;
1060 		break;
1061 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1062 		p_link->speed = 20000;
1063 		break;
1064 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1065 		p_link->speed = 10000;
1066 		break;
1067 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1068 		p_link->full_duplex = false;
1069 		/* Fall-through */
1070 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1071 		p_link->speed = 1000;
1072 		break;
1073 	default:
1074 		p_link->speed = 0;
1075 	}
1076 
1077 	/* We never store total line speed as p_link->speed is
1078 	 * again changes according to bandwidth allocation.
1079 	 */
1080 	if (p_link->link_up && p_link->speed)
1081 		p_link->line_speed = p_link->speed;
1082 	else
1083 		p_link->line_speed = 0;
1084 
1085 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1086 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1087 
1088 	/* Max bandwidth configuration */
1089 	__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1090 					   p_link, max_bw);
1091 
1092 	/* Mintz bandwidth configuration */
1093 	__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1094 					   p_link, min_bw);
1095 	ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
1096 					      p_link->min_pf_rate);
1097 
1098 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1099 	p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1100 	p_link->parallel_detection = !!(status &
1101 					 LINK_STATUS_PARALLEL_DETECTION_USED);
1102 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1103 
1104 	p_link->partner_adv_speed |=
1105 	    (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1106 	    ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1107 	p_link->partner_adv_speed |=
1108 	    (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1109 	    ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1110 	p_link->partner_adv_speed |=
1111 	    (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1112 	    ECORE_LINK_PARTNER_SPEED_10G : 0;
1113 	p_link->partner_adv_speed |=
1114 	    (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1115 	    ECORE_LINK_PARTNER_SPEED_20G : 0;
1116 	p_link->partner_adv_speed |=
1117 	    (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1118 	    ECORE_LINK_PARTNER_SPEED_25G : 0;
1119 	p_link->partner_adv_speed |=
1120 	    (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1121 	    ECORE_LINK_PARTNER_SPEED_40G : 0;
1122 	p_link->partner_adv_speed |=
1123 	    (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1124 	    ECORE_LINK_PARTNER_SPEED_50G : 0;
1125 	p_link->partner_adv_speed |=
1126 	    (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1127 	    ECORE_LINK_PARTNER_SPEED_100G : 0;
1128 
1129 	p_link->partner_tx_flow_ctrl_en =
1130 	    !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1131 	p_link->partner_rx_flow_ctrl_en =
1132 	    !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1133 
1134 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1135 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1136 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1137 		break;
1138 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1139 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1140 		break;
1141 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1142 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1143 		break;
1144 	default:
1145 		p_link->partner_adv_pause = 0;
1146 	}
1147 
1148 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1149 
1150 	OSAL_LINK_UPDATE(p_hwfn);
1151 }
1152 
1153 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1154 					struct ecore_ptt *p_ptt, bool b_up)
1155 {
1156 	struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1157 	struct ecore_mcp_mb_params mb_params;
1158 	struct eth_phy_cfg phy_cfg;
1159 	enum _ecore_status_t rc = ECORE_SUCCESS;
1160 	u32 cmd;
1161 
1162 #ifndef ASIC_ONLY
1163 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1164 		return ECORE_SUCCESS;
1165 #endif
1166 
1167 	/* Set the shmem configuration according to params */
1168 	OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1169 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1170 	if (!params->speed.autoneg)
1171 		phy_cfg.speed = params->speed.forced_speed;
1172 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1173 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1174 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1175 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1176 	phy_cfg.loopback_mode = params->loopback_mode;
1177 	p_hwfn->b_drv_link_init = b_up;
1178 
1179 	if (b_up)
1180 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1181 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
1182 			   " adv_speed 0x%08x, loopback 0x%08x\n",
1183 			   phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1184 			   phy_cfg.loopback_mode);
1185 	else
1186 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1187 
1188 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1189 	mb_params.cmd = cmd;
1190 	mb_params.p_data_src = &phy_cfg;
1191 	mb_params.data_src_size = sizeof(phy_cfg);
1192 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1193 
1194 	/* if mcp fails to respond we must abort */
1195 	if (rc != ECORE_SUCCESS) {
1196 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1197 		return rc;
1198 	}
1199 
1200 	/* Reset the link status if needed */
1201 	if (!b_up)
1202 		ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1203 
1204 	return rc;
1205 }
1206 
1207 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1208 				   struct ecore_ptt *p_ptt)
1209 {
1210 	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1211 
1212 	/* TODO - Add support for VFs */
1213 	if (IS_VF(p_hwfn->p_dev))
1214 		return ECORE_INVAL;
1215 
1216 	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1217 						 PUBLIC_PATH);
1218 	path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1219 	path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1220 
1221 	proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1222 				 path_addr +
1223 				 OFFSETOF(struct public_path, process_kill)) &
1224 	    PROCESS_KILL_COUNTER_MASK;
1225 
1226 	return proc_kill_cnt;
1227 }
1228 
1229 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1230 					  struct ecore_ptt *p_ptt)
1231 {
1232 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1233 	u32 proc_kill_cnt;
1234 
1235 	/* Prevent possible attentions/interrupts during the recovery handling
1236 	 * and till its load phase, during which they will be re-enabled.
1237 	 */
1238 	ecore_int_igu_disable_int(p_hwfn, p_ptt);
1239 
1240 	DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1241 
1242 	/* The following operations should be done once, and thus in CMT mode
1243 	 * are carried out by only the first HW function.
1244 	 */
1245 	if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1246 		return;
1247 
1248 	if (p_dev->recov_in_prog) {
1249 		DP_NOTICE(p_hwfn, false,
1250 			  "Ignoring the indication since a recovery"
1251 			  " process is already in progress\n");
1252 		return;
1253 	}
1254 
1255 	p_dev->recov_in_prog = true;
1256 
1257 	proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1258 	DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1259 
1260 	OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1261 }
1262 
1263 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1264 					  struct ecore_ptt *p_ptt,
1265 					  enum MFW_DRV_MSG_TYPE type)
1266 {
1267 	enum ecore_mcp_protocol_type stats_type;
1268 	union ecore_mcp_protocol_stats stats;
1269 	struct ecore_mcp_mb_params mb_params;
1270 	u32 hsi_param;
1271 	enum _ecore_status_t rc;
1272 
1273 	switch (type) {
1274 	case MFW_DRV_MSG_GET_LAN_STATS:
1275 		stats_type = ECORE_MCP_LAN_STATS;
1276 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1277 		break;
1278 	default:
1279 		DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1280 		return;
1281 	}
1282 
1283 	OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1284 
1285 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1286 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1287 	mb_params.param = hsi_param;
1288 	mb_params.p_data_src = &stats;
1289 	mb_params.data_src_size = sizeof(stats);
1290 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1291 	if (rc != ECORE_SUCCESS)
1292 		DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1293 }
1294 
1295 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1296 				    struct public_func *p_shmem_info)
1297 {
1298 	struct ecore_mcp_function_info *p_info;
1299 
1300 	p_info = &p_hwfn->mcp_info->func_info;
1301 
1302 	/* TODO - bandwidth min/max should have valid values of 1-100,
1303 	 * as well as some indication that the feature is disabled.
1304 	 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1305 	 * limit and correct value to min `1' and max `100' if limit isn't in
1306 	 * range.
1307 	 */
1308 	p_info->bandwidth_min = (p_shmem_info->config &
1309 				 FUNC_MF_CFG_MIN_BW_MASK) >>
1310 	    FUNC_MF_CFG_MIN_BW_SHIFT;
1311 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1312 		DP_INFO(p_hwfn,
1313 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1314 			p_info->bandwidth_min);
1315 		p_info->bandwidth_min = 1;
1316 	}
1317 
1318 	p_info->bandwidth_max = (p_shmem_info->config &
1319 				 FUNC_MF_CFG_MAX_BW_MASK) >>
1320 	    FUNC_MF_CFG_MAX_BW_SHIFT;
1321 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1322 		DP_INFO(p_hwfn,
1323 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1324 			p_info->bandwidth_max);
1325 		p_info->bandwidth_max = 100;
1326 	}
1327 }
1328 
1329 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1330 				    struct ecore_ptt *p_ptt,
1331 				    struct public_func *p_data,
1332 				    int pfid)
1333 {
1334 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1335 					PUBLIC_FUNC);
1336 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1337 	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1338 	u32 i, size;
1339 
1340 	OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1341 
1342 	size = OSAL_MIN_T(u32, sizeof(*p_data),
1343 			  SECTION_SIZE(mfw_path_offsize));
1344 	for (i = 0; i < size / sizeof(u32); i++)
1345 		((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1346 					      func_addr + (i << 2));
1347 
1348 	return size;
1349 }
1350 
1351 static void
1352 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1353 {
1354 	struct ecore_mcp_function_info *p_info;
1355 	struct public_func shmem_info;
1356 	u32 resp = 0, param = 0;
1357 
1358 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1359 
1360 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1361 
1362 	p_info = &p_hwfn->mcp_info->func_info;
1363 
1364 	ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1365 
1366 	ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1367 
1368 	/* Acknowledge the MFW */
1369 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1370 		      &param);
1371 }
1372 
1373 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1374 					 struct ecore_ptt *p_ptt)
1375 {
1376 	/* A single notification should be sent to upper driver in CMT mode */
1377 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1378 		return;
1379 
1380 	DP_NOTICE(p_hwfn, false,
1381 		  "Fan failure was detected on the network interface card"
1382 		  " and it's going to be shut down.\n");
1383 
1384 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1385 }
1386 
1387 struct ecore_mdump_cmd_params {
1388 	u32 cmd;
1389 	void *p_data_src;
1390 	u8 data_src_size;
1391 	void *p_data_dst;
1392 	u8 data_dst_size;
1393 	u32 mcp_resp;
1394 };
1395 
1396 static enum _ecore_status_t
1397 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1398 		    struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1399 {
1400 	struct ecore_mcp_mb_params mb_params;
1401 	enum _ecore_status_t rc;
1402 
1403 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1404 	mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1405 	mb_params.param = p_mdump_cmd_params->cmd;
1406 	mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1407 	mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1408 	mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1409 	mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1410 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1411 	if (rc != ECORE_SUCCESS)
1412 		return rc;
1413 
1414 	p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1415 	if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1416 		DP_NOTICE(p_hwfn, false,
1417 			  "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1418 			  p_mdump_cmd_params->cmd);
1419 		rc = ECORE_INVAL;
1420 	}
1421 
1422 	return rc;
1423 }
1424 
1425 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1426 						struct ecore_ptt *p_ptt)
1427 {
1428 	struct ecore_mdump_cmd_params mdump_cmd_params;
1429 
1430 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1431 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1432 
1433 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1434 }
1435 
1436 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1437 						struct ecore_ptt *p_ptt,
1438 						u32 epoch)
1439 {
1440 	struct ecore_mdump_cmd_params mdump_cmd_params;
1441 
1442 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1443 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1444 	mdump_cmd_params.p_data_src = &epoch;
1445 	mdump_cmd_params.data_src_size = sizeof(epoch);
1446 
1447 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1448 }
1449 
1450 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1451 					     struct ecore_ptt *p_ptt)
1452 {
1453 	struct ecore_mdump_cmd_params mdump_cmd_params;
1454 
1455 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1456 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1457 
1458 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1459 }
1460 
1461 static enum _ecore_status_t
1462 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1463 			   struct mdump_config_stc *p_mdump_config)
1464 {
1465 	struct ecore_mdump_cmd_params mdump_cmd_params;
1466 	enum _ecore_status_t rc;
1467 
1468 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1469 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1470 	mdump_cmd_params.p_data_dst = p_mdump_config;
1471 	mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1472 
1473 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1474 	if (rc != ECORE_SUCCESS)
1475 		return rc;
1476 
1477 	if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1478 		DP_INFO(p_hwfn,
1479 			"The mdump command is not supported by the MFW\n");
1480 		return ECORE_NOTIMPL;
1481 	}
1482 
1483 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1484 		DP_NOTICE(p_hwfn, false,
1485 			  "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1486 			  mdump_cmd_params.mcp_resp);
1487 		rc = ECORE_UNKNOWN_ERROR;
1488 	}
1489 
1490 	return rc;
1491 }
1492 
1493 enum _ecore_status_t
1494 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1495 			 struct ecore_mdump_info *p_mdump_info)
1496 {
1497 	u32 addr, global_offsize, global_addr;
1498 	struct mdump_config_stc mdump_config;
1499 	enum _ecore_status_t rc;
1500 
1501 	OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1502 
1503 	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1504 				    PUBLIC_GLOBAL);
1505 	global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1506 	global_addr = SECTION_ADDR(global_offsize, 0);
1507 	p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1508 					global_addr +
1509 					OFFSETOF(struct public_global,
1510 						 mdump_reason));
1511 
1512 	if (p_mdump_info->reason) {
1513 		rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1514 		if (rc != ECORE_SUCCESS)
1515 			return rc;
1516 
1517 		p_mdump_info->version = mdump_config.version;
1518 		p_mdump_info->config = mdump_config.config;
1519 		p_mdump_info->epoch = mdump_config.epoc;
1520 		p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1521 		p_mdump_info->valid_logs = mdump_config.valid_logs;
1522 
1523 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1524 			   "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1525 			   p_mdump_info->reason, p_mdump_info->version,
1526 			   p_mdump_info->config, p_mdump_info->epoch,
1527 			   p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1528 	} else {
1529 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1530 			   "MFW mdump info: reason %d\n", p_mdump_info->reason);
1531 	}
1532 
1533 	return ECORE_SUCCESS;
1534 }
1535 
1536 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1537 						struct ecore_ptt *p_ptt)
1538 {
1539 	struct ecore_mdump_cmd_params mdump_cmd_params;
1540 
1541 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1542 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1543 
1544 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1545 }
1546 
1547 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1548 					    struct ecore_ptt *p_ptt)
1549 {
1550 	/* In CMT mode - no need for more than a single acknowledgment to the
1551 	 * MFW, and no more than a single notification to the upper driver.
1552 	 */
1553 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1554 		return;
1555 
1556 	DP_NOTICE(p_hwfn, false,
1557 		  "Received a critical error notification from the MFW!\n");
1558 
1559 	if (p_hwfn->p_dev->mdump_en) {
1560 		DP_NOTICE(p_hwfn, false,
1561 			  "Not acknowledging the notification to allow the MFW crash dump\n");
1562 		p_hwfn->p_dev->mdump_en = false;
1563 		return;
1564 	}
1565 
1566 	ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1567 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1568 }
1569 
1570 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1571 					     struct ecore_ptt *p_ptt)
1572 {
1573 	struct ecore_mcp_info *info = p_hwfn->mcp_info;
1574 	enum _ecore_status_t rc = ECORE_SUCCESS;
1575 	bool found = false;
1576 	u16 i;
1577 
1578 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1579 
1580 	/* Read Messages from MFW */
1581 	ecore_mcp_read_mb(p_hwfn, p_ptt);
1582 
1583 	/* Compare current messages to old ones */
1584 	for (i = 0; i < info->mfw_mb_length; i++) {
1585 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1586 			continue;
1587 
1588 		found = true;
1589 
1590 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1591 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1592 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1593 
1594 		switch (i) {
1595 		case MFW_DRV_MSG_LINK_CHANGE:
1596 			ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1597 			break;
1598 		case MFW_DRV_MSG_VF_DISABLED:
1599 			ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1600 			break;
1601 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1602 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1603 						    ECORE_DCBX_REMOTE_LLDP_MIB);
1604 			break;
1605 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1606 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1607 						    ECORE_DCBX_REMOTE_MIB);
1608 			break;
1609 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1610 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1611 						    ECORE_DCBX_OPERATIONAL_MIB);
1612 			break;
1613 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1614 			ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1615 			break;
1616 		case MFW_DRV_MSG_ERROR_RECOVERY:
1617 			ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1618 			break;
1619 		case MFW_DRV_MSG_GET_LAN_STATS:
1620 		case MFW_DRV_MSG_GET_FCOE_STATS:
1621 		case MFW_DRV_MSG_GET_ISCSI_STATS:
1622 		case MFW_DRV_MSG_GET_RDMA_STATS:
1623 			ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1624 			break;
1625 		case MFW_DRV_MSG_BW_UPDATE:
1626 			ecore_mcp_update_bw(p_hwfn, p_ptt);
1627 			break;
1628 		case MFW_DRV_MSG_FAILURE_DETECTED:
1629 			ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1630 			break;
1631 		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1632 			ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1633 			break;
1634 		default:
1635 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1636 			rc = ECORE_INVAL;
1637 		}
1638 	}
1639 
1640 	/* ACK everything */
1641 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1642 		OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1643 
1644 		/* MFW expect answer in BE, so we force write in that format */
1645 		ecore_wr(p_hwfn, p_ptt,
1646 			 info->mfw_mb_addr + sizeof(u32) +
1647 			 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1648 			 sizeof(u32) + i * sizeof(u32), val);
1649 	}
1650 
1651 	if (!found) {
1652 		DP_NOTICE(p_hwfn, false,
1653 			  "Received an MFW message indication but no"
1654 			  " new message!\n");
1655 		rc = ECORE_INVAL;
1656 	}
1657 
1658 	/* Copy the new mfw messages into the shadow */
1659 	OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1660 
1661 	return rc;
1662 }
1663 
1664 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1665 					   struct ecore_ptt *p_ptt,
1666 					   u32 *p_mfw_ver,
1667 					   u32 *p_running_bundle_id)
1668 {
1669 	u32 global_offsize;
1670 
1671 #ifndef ASIC_ONLY
1672 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1673 		DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1674 		return ECORE_SUCCESS;
1675 	}
1676 #endif
1677 
1678 	if (IS_VF(p_hwfn->p_dev)) {
1679 		if (p_hwfn->vf_iov_info) {
1680 			struct pfvf_acquire_resp_tlv *p_resp;
1681 
1682 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1683 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1684 			return ECORE_SUCCESS;
1685 		} else {
1686 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1687 				   "VF requested MFW version prior to ACQUIRE\n");
1688 			return ECORE_INVAL;
1689 		}
1690 	}
1691 
1692 	global_offsize = ecore_rd(p_hwfn, p_ptt,
1693 				  SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1694 						       public_base,
1695 						       PUBLIC_GLOBAL));
1696 	*p_mfw_ver =
1697 	    ecore_rd(p_hwfn, p_ptt,
1698 		     SECTION_ADDR(global_offsize,
1699 				  0) + OFFSETOF(struct public_global, mfw_ver));
1700 
1701 	if (p_running_bundle_id != OSAL_NULL) {
1702 		*p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1703 						SECTION_ADDR(global_offsize,
1704 							     0) +
1705 						OFFSETOF(struct public_global,
1706 							 running_bundle_id));
1707 	}
1708 
1709 	return ECORE_SUCCESS;
1710 }
1711 
1712 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1713 					      u32 *p_media_type)
1714 {
1715 	struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1716 	struct ecore_ptt *p_ptt;
1717 
1718 	/* TODO - Add support for VFs */
1719 	if (IS_VF(p_dev))
1720 		return ECORE_INVAL;
1721 
1722 	if (!ecore_mcp_is_init(p_hwfn)) {
1723 		DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1724 		return ECORE_BUSY;
1725 	}
1726 
1727 	*p_media_type = MEDIA_UNSPECIFIED;
1728 
1729 	p_ptt = ecore_ptt_acquire(p_hwfn);
1730 	if (!p_ptt)
1731 		return ECORE_BUSY;
1732 
1733 	*p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1734 				 OFFSETOF(struct public_port, media_type));
1735 
1736 	ecore_ptt_release(p_hwfn, p_ptt);
1737 
1738 	return ECORE_SUCCESS;
1739 }
1740 
1741 /* @DPDK */
1742 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1743 static void
1744 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1745 				 enum ecore_pci_personality *p_proto)
1746 {
1747 	*p_proto = ECORE_PCI_ETH;
1748 
1749 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1750 		   "According to Legacy capabilities, L2 personality is %08x\n",
1751 		   (u32)*p_proto);
1752 }
1753 
1754 /* @DPDK */
1755 static enum _ecore_status_t
1756 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1757 			      struct ecore_ptt *p_ptt,
1758 			      enum ecore_pci_personality *p_proto)
1759 {
1760 	u32 resp = 0, param = 0;
1761 	enum _ecore_status_t rc;
1762 
1763 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1764 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1765 		   (u32)*p_proto, resp, param);
1766 	return ECORE_SUCCESS;
1767 }
1768 
1769 static enum _ecore_status_t
1770 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1771 			  struct public_func *p_info,
1772 			  struct ecore_ptt *p_ptt,
1773 			  enum ecore_pci_personality *p_proto)
1774 {
1775 	enum _ecore_status_t rc = ECORE_SUCCESS;
1776 
1777 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1778 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1779 		if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1780 		    ECORE_SUCCESS)
1781 			ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1782 		break;
1783 	default:
1784 		rc = ECORE_INVAL;
1785 	}
1786 
1787 	return rc;
1788 }
1789 
1790 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1791 						    struct ecore_ptt *p_ptt)
1792 {
1793 	struct ecore_mcp_function_info *info;
1794 	struct public_func shmem_info;
1795 
1796 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1797 	info = &p_hwfn->mcp_info->func_info;
1798 
1799 	info->pause_on_host = (shmem_info.config &
1800 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1801 
1802 	if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1803 				      &info->protocol)) {
1804 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
1805 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1806 		return ECORE_INVAL;
1807 	}
1808 
1809 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1810 
1811 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
1812 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1813 		info->mac[1] = (u8)(shmem_info.mac_upper);
1814 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1815 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1816 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1817 		info->mac[5] = (u8)(shmem_info.mac_lower);
1818 	} else {
1819 		/* TODO - are there protocols for which there's no MAC? */
1820 		DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1821 	}
1822 
1823 	/* TODO - are these calculations true for BE machine? */
1824 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1825 			 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1826 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1827 			 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1828 
1829 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1830 
1831 	info->mtu = (u16)shmem_info.mtu_size;
1832 
1833 	if (info->mtu == 0)
1834 		info->mtu = 1500;
1835 
1836 	info->mtu = (u16)shmem_info.mtu_size;
1837 
1838 	DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1839 		   "Read configuration from shmem: pause_on_host %02x"
1840 		    " protocol %02x BW [%02x - %02x]"
1841 		    " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1842 		    " node %lx ovlan %04x\n",
1843 		   info->pause_on_host, info->protocol,
1844 		   info->bandwidth_min, info->bandwidth_max,
1845 		   info->mac[0], info->mac[1], info->mac[2],
1846 		   info->mac[3], info->mac[4], info->mac[5],
1847 		   (unsigned long)info->wwn_port,
1848 		   (unsigned long)info->wwn_node, info->ovlan);
1849 
1850 	return ECORE_SUCCESS;
1851 }
1852 
1853 struct ecore_mcp_link_params
1854 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1855 {
1856 	if (!p_hwfn || !p_hwfn->mcp_info)
1857 		return OSAL_NULL;
1858 	return &p_hwfn->mcp_info->link_input;
1859 }
1860 
1861 struct ecore_mcp_link_state
1862 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1863 {
1864 	if (!p_hwfn || !p_hwfn->mcp_info)
1865 		return OSAL_NULL;
1866 
1867 #ifndef ASIC_ONLY
1868 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1869 		DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1870 		p_hwfn->mcp_info->link_output.link_up = true;
1871 	}
1872 #endif
1873 
1874 	return &p_hwfn->mcp_info->link_output;
1875 }
1876 
1877 struct ecore_mcp_link_capabilities
1878 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1879 {
1880 	if (!p_hwfn || !p_hwfn->mcp_info)
1881 		return OSAL_NULL;
1882 	return &p_hwfn->mcp_info->link_capabilities;
1883 }
1884 
1885 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1886 				     struct ecore_ptt *p_ptt)
1887 {
1888 	u32 resp = 0, param = 0;
1889 	enum _ecore_status_t rc;
1890 
1891 	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1892 			   DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
1893 
1894 	/* Wait for the drain to complete before returning */
1895 	OSAL_MSLEEP(1020);
1896 
1897 	return rc;
1898 }
1899 
1900 const struct ecore_mcp_function_info
1901 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1902 {
1903 	if (!p_hwfn || !p_hwfn->mcp_info)
1904 		return OSAL_NULL;
1905 	return &p_hwfn->mcp_info->func_info;
1906 }
1907 
1908 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1909 					   struct ecore_ptt *p_ptt,
1910 					   struct ecore_mcp_nvm_params *params)
1911 {
1912 	enum _ecore_status_t rc;
1913 
1914 	switch (params->type) {
1915 	case ECORE_MCP_NVM_RD:
1916 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1917 					  params->nvm_common.offset,
1918 					  &params->nvm_common.resp,
1919 					  &params->nvm_common.param,
1920 					  params->nvm_rd.buf_size,
1921 					  params->nvm_rd.buf);
1922 		break;
1923 	case ECORE_MCP_CMD:
1924 		rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1925 				   params->nvm_common.offset,
1926 				   &params->nvm_common.resp,
1927 				   &params->nvm_common.param);
1928 		break;
1929 	case ECORE_MCP_NVM_WR:
1930 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1931 					  params->nvm_common.offset,
1932 					  &params->nvm_common.resp,
1933 					  &params->nvm_common.param,
1934 					  params->nvm_wr.buf_size,
1935 					  params->nvm_wr.buf);
1936 		break;
1937 	default:
1938 		rc = ECORE_NOTIMPL;
1939 		break;
1940 	}
1941 	return rc;
1942 }
1943 
1944 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1945 				  struct ecore_ptt *p_ptt, u32 personalities)
1946 {
1947 	enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1948 	struct public_func shmem_info;
1949 	int i, count = 0, num_pfs;
1950 
1951 	num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1952 
1953 	for (i = 0; i < num_pfs; i++) {
1954 		ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1955 					 MCP_PF_ID_BY_REL(p_hwfn, i));
1956 		if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1957 			continue;
1958 
1959 		if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1960 					      &protocol) !=
1961 		    ECORE_SUCCESS)
1962 			continue;
1963 
1964 		if ((1 << ((u32)protocol)) & personalities)
1965 			count++;
1966 	}
1967 
1968 	return count;
1969 }
1970 
1971 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1972 					      struct ecore_ptt *p_ptt,
1973 					      u32 *p_flash_size)
1974 {
1975 	u32 flash_size;
1976 
1977 #ifndef ASIC_ONLY
1978 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1979 		DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1980 		return ECORE_INVAL;
1981 	}
1982 #endif
1983 
1984 	if (IS_VF(p_hwfn->p_dev))
1985 		return ECORE_INVAL;
1986 
1987 	flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1988 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1989 	    MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1990 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1991 
1992 	*p_flash_size = flash_size;
1993 
1994 	return ECORE_SUCCESS;
1995 }
1996 
1997 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1998 						  struct ecore_ptt *p_ptt)
1999 {
2000 	struct ecore_dev *p_dev = p_hwfn->p_dev;
2001 
2002 	if (p_dev->recov_in_prog) {
2003 		DP_NOTICE(p_hwfn, false,
2004 			  "Avoid triggering a recovery since such a process"
2005 			  " is already in progress\n");
2006 		return ECORE_AGAIN;
2007 	}
2008 
2009 	DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2010 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2011 
2012 	return ECORE_SUCCESS;
2013 }
2014 
2015 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2016 					      struct ecore_ptt *p_ptt,
2017 					      u8 vf_id, u8 num)
2018 {
2019 	u32 resp = 0, param = 0, rc_param = 0;
2020 	enum _ecore_status_t rc;
2021 
2022 /* Only Leader can configure MSIX, and need to take CMT into account */
2023 
2024 	if (!IS_LEAD_HWFN(p_hwfn))
2025 		return ECORE_SUCCESS;
2026 	num *= p_hwfn->p_dev->num_hwfns;
2027 
2028 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2029 	    DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2030 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2031 	    DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2032 
2033 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2034 			   &resp, &rc_param);
2035 
2036 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2037 		DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2038 			  vf_id);
2039 		rc = ECORE_INVAL;
2040 	} else {
2041 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2042 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2043 			    num, vf_id);
2044 	}
2045 
2046 	return rc;
2047 }
2048 
2049 enum _ecore_status_t
2050 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2051 			   struct ecore_mcp_drv_version *p_ver)
2052 {
2053 	struct ecore_mcp_mb_params mb_params;
2054 	struct drv_version_stc drv_version;
2055 	u32 num_words, i;
2056 	void *p_name;
2057 	OSAL_BE32 val;
2058 	enum _ecore_status_t rc;
2059 
2060 #ifndef ASIC_ONLY
2061 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2062 		return ECORE_SUCCESS;
2063 #endif
2064 
2065 	OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2066 	drv_version.version = p_ver->version;
2067 	num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2068 	for (i = 0; i < num_words; i++) {
2069 		/* The driver name is expected to be in a big-endian format */
2070 		p_name = &p_ver->name[i * sizeof(u32)];
2071 		val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2072 		*(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2073 	}
2074 
2075 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2076 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2077 	mb_params.p_data_src = &drv_version;
2078 	mb_params.data_src_size = sizeof(drv_version);
2079 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2080 	if (rc != ECORE_SUCCESS)
2081 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2082 
2083 	return rc;
2084 }
2085 
2086 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2087 				    struct ecore_ptt *p_ptt)
2088 {
2089 	enum _ecore_status_t rc;
2090 	u32 resp = 0, param = 0;
2091 
2092 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2093 			   &param);
2094 	if (rc != ECORE_SUCCESS)
2095 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2096 
2097 	return rc;
2098 }
2099 
2100 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2101 				      struct ecore_ptt *p_ptt)
2102 {
2103 	u32 value, cpu_mode;
2104 
2105 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2106 
2107 	value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2108 	value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2109 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2110 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2111 
2112 	return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2113 }
2114 
2115 enum _ecore_status_t
2116 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2117 				   struct ecore_ptt *p_ptt,
2118 				   enum ecore_ov_client client)
2119 {
2120 	enum _ecore_status_t rc;
2121 	u32 resp = 0, param = 0;
2122 	u32 drv_mb_param;
2123 
2124 	switch (client) {
2125 	case ECORE_OV_CLIENT_DRV:
2126 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2127 		break;
2128 	case ECORE_OV_CLIENT_USER:
2129 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2130 		break;
2131 	case ECORE_OV_CLIENT_VENDOR_SPEC:
2132 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2133 		break;
2134 	default:
2135 		DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2136 		return ECORE_INVAL;
2137 	}
2138 
2139 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2140 			   drv_mb_param, &resp, &param);
2141 	if (rc != ECORE_SUCCESS)
2142 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2143 
2144 	return rc;
2145 }
2146 
2147 enum _ecore_status_t
2148 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2149 				 struct ecore_ptt *p_ptt,
2150 				 enum ecore_ov_driver_state drv_state)
2151 {
2152 	enum _ecore_status_t rc;
2153 	u32 resp = 0, param = 0;
2154 	u32 drv_mb_param;
2155 
2156 	switch (drv_state) {
2157 	case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2158 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2159 		break;
2160 	case ECORE_OV_DRIVER_STATE_DISABLED:
2161 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2162 		break;
2163 	case ECORE_OV_DRIVER_STATE_ACTIVE:
2164 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2165 		break;
2166 	default:
2167 		DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2168 		return ECORE_INVAL;
2169 	}
2170 
2171 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2172 			   drv_mb_param, &resp, &param);
2173 	if (rc != ECORE_SUCCESS)
2174 		DP_ERR(p_hwfn, "Failed to send driver state\n");
2175 
2176 	return rc;
2177 }
2178 
2179 enum _ecore_status_t
2180 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2181 			 struct ecore_fc_npiv_tbl *p_table)
2182 {
2183 	return 0;
2184 }
2185 
2186 enum _ecore_status_t
2187 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2188 			struct ecore_ptt *p_ptt, u16 mtu)
2189 {
2190 	return 0;
2191 }
2192 
2193 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2194 				       struct ecore_ptt *p_ptt,
2195 				       enum ecore_led_mode mode)
2196 {
2197 	u32 resp = 0, param = 0, drv_mb_param;
2198 	enum _ecore_status_t rc;
2199 
2200 	switch (mode) {
2201 	case ECORE_LED_MODE_ON:
2202 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2203 		break;
2204 	case ECORE_LED_MODE_OFF:
2205 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2206 		break;
2207 	case ECORE_LED_MODE_RESTORE:
2208 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2209 		break;
2210 	default:
2211 		DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2212 		return ECORE_INVAL;
2213 	}
2214 
2215 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2216 			   drv_mb_param, &resp, &param);
2217 	if (rc != ECORE_SUCCESS)
2218 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2219 
2220 	return rc;
2221 }
2222 
2223 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2224 					     struct ecore_ptt *p_ptt,
2225 					     u32 mask_parities)
2226 {
2227 	enum _ecore_status_t rc;
2228 	u32 resp = 0, param = 0;
2229 
2230 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2231 			   mask_parities, &resp, &param);
2232 
2233 	if (rc != ECORE_SUCCESS) {
2234 		DP_ERR(p_hwfn,
2235 		       "MCP response failure for mask parities, aborting\n");
2236 	} else if (resp != FW_MSG_CODE_OK) {
2237 		DP_ERR(p_hwfn,
2238 		       "MCP did not ack mask parity request. Old MFW?\n");
2239 		rc = ECORE_INVAL;
2240 	}
2241 
2242 	return rc;
2243 }
2244 
2245 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2246 					u8 *p_buf, u32 len)
2247 {
2248 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2249 	u32 bytes_left, offset, bytes_to_copy, buf_size;
2250 	struct ecore_mcp_nvm_params params;
2251 	struct ecore_ptt *p_ptt;
2252 	enum _ecore_status_t rc = ECORE_SUCCESS;
2253 
2254 	p_ptt = ecore_ptt_acquire(p_hwfn);
2255 	if (!p_ptt)
2256 		return ECORE_BUSY;
2257 
2258 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2259 	bytes_left = len;
2260 	offset = 0;
2261 	params.type = ECORE_MCP_NVM_RD;
2262 	params.nvm_rd.buf_size = &buf_size;
2263 	params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2264 	while (bytes_left > 0) {
2265 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2266 					   MCP_DRV_NVM_BUF_LEN);
2267 		params.nvm_common.offset = (addr + offset) |
2268 		    (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2269 		params.nvm_rd.buf = (u32 *)(p_buf + offset);
2270 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2271 		if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2272 					    FW_MSG_CODE_NVM_OK)) {
2273 			DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2274 			break;
2275 		}
2276 
2277 		/* This can be a lengthy process, and it's possible scheduler
2278 		 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2279 		 */
2280 		if (bytes_left % 0x1000 <
2281 		    (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2282 			OSAL_MSLEEP(1);
2283 
2284 		offset += *params.nvm_rd.buf_size;
2285 		bytes_left -= *params.nvm_rd.buf_size;
2286 	}
2287 
2288 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2289 	ecore_ptt_release(p_hwfn, p_ptt);
2290 
2291 	return rc;
2292 }
2293 
2294 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2295 					u32 addr, u8 *p_buf, u32 len)
2296 {
2297 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2298 	struct ecore_mcp_nvm_params params;
2299 	struct ecore_ptt *p_ptt;
2300 	enum _ecore_status_t rc;
2301 
2302 	p_ptt = ecore_ptt_acquire(p_hwfn);
2303 	if (!p_ptt)
2304 		return ECORE_BUSY;
2305 
2306 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2307 	params.type = ECORE_MCP_NVM_RD;
2308 	params.nvm_rd.buf_size = &len;
2309 	params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2310 	    DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2311 	params.nvm_common.offset = addr;
2312 	params.nvm_rd.buf = (u32 *)p_buf;
2313 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2314 	if (rc != ECORE_SUCCESS)
2315 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2316 
2317 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2318 	ecore_ptt_release(p_hwfn, p_ptt);
2319 
2320 	return rc;
2321 }
2322 
2323 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2324 {
2325 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2326 	struct ecore_mcp_nvm_params params;
2327 	struct ecore_ptt *p_ptt;
2328 
2329 	p_ptt = ecore_ptt_acquire(p_hwfn);
2330 	if (!p_ptt)
2331 		return ECORE_BUSY;
2332 
2333 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2334 	OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2335 	ecore_ptt_release(p_hwfn, p_ptt);
2336 
2337 	return ECORE_SUCCESS;
2338 }
2339 
2340 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2341 {
2342 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2343 	struct ecore_mcp_nvm_params params;
2344 	struct ecore_ptt *p_ptt;
2345 	enum _ecore_status_t rc;
2346 
2347 	p_ptt = ecore_ptt_acquire(p_hwfn);
2348 	if (!p_ptt)
2349 		return ECORE_BUSY;
2350 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2351 	params.type = ECORE_MCP_CMD;
2352 	params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2353 	params.nvm_common.offset = addr;
2354 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2355 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2356 	ecore_ptt_release(p_hwfn, p_ptt);
2357 
2358 	return rc;
2359 }
2360 
2361 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2362 						  u32 addr)
2363 {
2364 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2365 	struct ecore_mcp_nvm_params params;
2366 	struct ecore_ptt *p_ptt;
2367 	enum _ecore_status_t rc;
2368 
2369 	p_ptt = ecore_ptt_acquire(p_hwfn);
2370 	if (!p_ptt)
2371 		return ECORE_BUSY;
2372 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2373 	params.type = ECORE_MCP_CMD;
2374 	params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2375 	params.nvm_common.offset = addr;
2376 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2377 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2378 	ecore_ptt_release(p_hwfn, p_ptt);
2379 
2380 	return rc;
2381 }
2382 
2383 /* rc receives ECORE_INVAL as default parameter because
2384  * it might not enter the while loop if the len is 0
2385  */
2386 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2387 					 u32 addr, u8 *p_buf, u32 len)
2388 {
2389 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2390 	enum _ecore_status_t rc = ECORE_INVAL;
2391 	struct ecore_mcp_nvm_params params;
2392 	struct ecore_ptt *p_ptt;
2393 	u32 buf_idx, buf_size;
2394 
2395 	p_ptt = ecore_ptt_acquire(p_hwfn);
2396 	if (!p_ptt)
2397 		return ECORE_BUSY;
2398 
2399 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2400 	params.type = ECORE_MCP_NVM_WR;
2401 	if (cmd == ECORE_PUT_FILE_DATA)
2402 		params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2403 	else
2404 		params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2405 	buf_idx = 0;
2406 	while (buf_idx < len) {
2407 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2408 				      MCP_DRV_NVM_BUF_LEN);
2409 		params.nvm_common.offset = ((buf_size <<
2410 					     DRV_MB_PARAM_NVM_LEN_SHIFT)
2411 					    | addr) + buf_idx;
2412 		params.nvm_wr.buf_size = buf_size;
2413 		params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2414 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2415 		if (rc != ECORE_SUCCESS ||
2416 		    ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2417 		     (params.nvm_common.resp !=
2418 		      FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2419 			DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2420 
2421 		/* This can be a lengthy process, and it's possible scheduler
2422 		 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2423 		 */
2424 		if (buf_idx % 0x1000 >
2425 		    (buf_idx + buf_size) % 0x1000)
2426 			OSAL_MSLEEP(1);
2427 
2428 		buf_idx += buf_size;
2429 	}
2430 
2431 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2432 	ecore_ptt_release(p_hwfn, p_ptt);
2433 
2434 	return rc;
2435 }
2436 
2437 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2438 					 u32 addr, u8 *p_buf, u32 len)
2439 {
2440 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2441 	struct ecore_mcp_nvm_params params;
2442 	struct ecore_ptt *p_ptt;
2443 	enum _ecore_status_t rc;
2444 
2445 	p_ptt = ecore_ptt_acquire(p_hwfn);
2446 	if (!p_ptt)
2447 		return ECORE_BUSY;
2448 
2449 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2450 	params.type = ECORE_MCP_NVM_WR;
2451 	params.nvm_wr.buf_size = len;
2452 	params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2453 	    DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2454 	params.nvm_common.offset = addr;
2455 	params.nvm_wr.buf = (u32 *)p_buf;
2456 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2457 	if (rc != ECORE_SUCCESS)
2458 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2459 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2460 	ecore_ptt_release(p_hwfn, p_ptt);
2461 
2462 	return rc;
2463 }
2464 
2465 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2466 						   u32 addr)
2467 {
2468 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2469 	struct ecore_mcp_nvm_params params;
2470 	struct ecore_ptt *p_ptt;
2471 	enum _ecore_status_t rc;
2472 
2473 	p_ptt = ecore_ptt_acquire(p_hwfn);
2474 	if (!p_ptt)
2475 		return ECORE_BUSY;
2476 
2477 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2478 	params.type = ECORE_MCP_CMD;
2479 	params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2480 	params.nvm_common.offset = addr;
2481 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2482 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2483 	ecore_ptt_release(p_hwfn, p_ptt);
2484 
2485 	return rc;
2486 }
2487 
2488 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2489 					    struct ecore_ptt *p_ptt,
2490 					    u32 port, u32 addr, u32 offset,
2491 					    u32 len, u8 *p_buf)
2492 {
2493 	struct ecore_mcp_nvm_params params;
2494 	enum _ecore_status_t rc;
2495 	u32 bytes_left, bytes_to_copy, buf_size;
2496 
2497 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2498 	params.nvm_common.offset =
2499 		(port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2500 		(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2501 	addr = offset;
2502 	offset = 0;
2503 	bytes_left = len;
2504 	params.type = ECORE_MCP_NVM_RD;
2505 	params.nvm_rd.buf_size = &buf_size;
2506 	params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2507 	while (bytes_left > 0) {
2508 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2509 					   MAX_I2C_TRANSACTION_SIZE);
2510 		params.nvm_rd.buf = (u32 *)(p_buf + offset);
2511 		params.nvm_common.offset &=
2512 			(DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2513 			 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2514 		params.nvm_common.offset |=
2515 			((addr + offset) <<
2516 			 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2517 		params.nvm_common.offset |=
2518 			(bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2519 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2520 		if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2521 		    FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2522 			return ECORE_NODEV;
2523 		} else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2524 			   FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2525 			return ECORE_UNKNOWN_ERROR;
2526 
2527 		offset += *params.nvm_rd.buf_size;
2528 		bytes_left -= *params.nvm_rd.buf_size;
2529 	}
2530 
2531 	return ECORE_SUCCESS;
2532 }
2533 
2534 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2535 					     struct ecore_ptt *p_ptt,
2536 					     u32 port, u32 addr, u32 offset,
2537 					     u32 len, u8 *p_buf)
2538 {
2539 	struct ecore_mcp_nvm_params params;
2540 	enum _ecore_status_t rc;
2541 	u32 buf_idx, buf_size;
2542 
2543 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2544 	params.nvm_common.offset =
2545 		(port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2546 		(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2547 	params.type = ECORE_MCP_NVM_WR;
2548 	params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2549 	buf_idx = 0;
2550 	while (buf_idx < len) {
2551 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2552 				      MAX_I2C_TRANSACTION_SIZE);
2553 		params.nvm_common.offset &=
2554 			(DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2555 			 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2556 		params.nvm_common.offset |=
2557 			((offset + buf_idx) <<
2558 			 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2559 		params.nvm_common.offset |=
2560 			(buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2561 		params.nvm_wr.buf_size = buf_size;
2562 		params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2563 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2564 		if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2565 		    FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2566 			return ECORE_NODEV;
2567 		} else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2568 			   FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2569 			return ECORE_UNKNOWN_ERROR;
2570 
2571 		buf_idx += buf_size;
2572 	}
2573 
2574 	return ECORE_SUCCESS;
2575 }
2576 
2577 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2578 					 struct ecore_ptt *p_ptt,
2579 					 u16 gpio, u32 *gpio_val)
2580 {
2581 	enum _ecore_status_t rc = ECORE_SUCCESS;
2582 	u32 drv_mb_param = 0, rsp;
2583 
2584 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2585 
2586 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2587 			   drv_mb_param, &rsp, gpio_val);
2588 
2589 	if (rc != ECORE_SUCCESS)
2590 		return rc;
2591 
2592 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2593 		return ECORE_UNKNOWN_ERROR;
2594 
2595 	return ECORE_SUCCESS;
2596 }
2597 
2598 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2599 					  struct ecore_ptt *p_ptt,
2600 					  u16 gpio, u16 gpio_val)
2601 {
2602 	enum _ecore_status_t rc = ECORE_SUCCESS;
2603 	u32 drv_mb_param = 0, param, rsp;
2604 
2605 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2606 		(gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2607 
2608 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2609 			   drv_mb_param, &rsp, &param);
2610 
2611 	if (rc != ECORE_SUCCESS)
2612 		return rc;
2613 
2614 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2615 		return ECORE_UNKNOWN_ERROR;
2616 
2617 	return ECORE_SUCCESS;
2618 }
2619 
2620 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2621 					 struct ecore_ptt *p_ptt,
2622 					 u16 gpio, u32 *gpio_direction,
2623 					 u32 *gpio_ctrl)
2624 {
2625 	u32 drv_mb_param = 0, rsp, val = 0;
2626 	enum _ecore_status_t rc = ECORE_SUCCESS;
2627 
2628 	drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2629 
2630 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2631 			   drv_mb_param, &rsp, &val);
2632 	if (rc != ECORE_SUCCESS)
2633 		return rc;
2634 
2635 	*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2636 			   DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2637 	*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2638 		      DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2639 
2640 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2641 		return ECORE_UNKNOWN_ERROR;
2642 
2643 	return ECORE_SUCCESS;
2644 }
2645 
2646 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2647 						  struct ecore_ptt *p_ptt)
2648 {
2649 	u32 drv_mb_param = 0, rsp, param;
2650 	enum _ecore_status_t rc = ECORE_SUCCESS;
2651 
2652 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2653 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2654 
2655 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2656 			   drv_mb_param, &rsp, &param);
2657 
2658 	if (rc != ECORE_SUCCESS)
2659 		return rc;
2660 
2661 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2662 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
2663 		rc = ECORE_UNKNOWN_ERROR;
2664 
2665 	return rc;
2666 }
2667 
2668 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2669 					       struct ecore_ptt *p_ptt)
2670 {
2671 	u32 drv_mb_param, rsp, param;
2672 	enum _ecore_status_t rc = ECORE_SUCCESS;
2673 
2674 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2675 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2676 
2677 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2678 			   drv_mb_param, &rsp, &param);
2679 
2680 	if (rc != ECORE_SUCCESS)
2681 		return rc;
2682 
2683 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2684 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
2685 		rc = ECORE_UNKNOWN_ERROR;
2686 
2687 	return rc;
2688 }
2689 
2690 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2691 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2692 {
2693 	u32 drv_mb_param = 0, rsp;
2694 	enum _ecore_status_t rc = ECORE_SUCCESS;
2695 
2696 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2697 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2698 
2699 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2700 			   drv_mb_param, &rsp, num_images);
2701 
2702 	if (rc != ECORE_SUCCESS)
2703 		return rc;
2704 
2705 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2706 		rc = ECORE_UNKNOWN_ERROR;
2707 
2708 	return rc;
2709 }
2710 
2711 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2712 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2713 	struct bist_nvm_image_att *p_image_att, u32 image_index)
2714 {
2715 	struct ecore_mcp_nvm_params params;
2716 	enum _ecore_status_t rc;
2717 	u32 buf_size;
2718 
2719 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2720 	params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2721 				    DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2722 	params.nvm_common.offset |= (image_index <<
2723 				    DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2724 
2725 	params.type = ECORE_MCP_NVM_RD;
2726 	params.nvm_rd.buf_size = &buf_size;
2727 	params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2728 	params.nvm_rd.buf = (u32 *)p_image_att;
2729 
2730 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2731 	if (rc != ECORE_SUCCESS)
2732 		return rc;
2733 
2734 	if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2735 	    (p_image_att->return_code != 1))
2736 		rc = ECORE_UNKNOWN_ERROR;
2737 
2738 	return rc;
2739 }
2740 
2741 enum _ecore_status_t
2742 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2743 			       struct ecore_ptt *p_ptt,
2744 			       struct ecore_temperature_info *p_temp_info)
2745 {
2746 	struct ecore_temperature_sensor *p_temp_sensor;
2747 	struct temperature_status_stc mfw_temp_info;
2748 	struct ecore_mcp_mb_params mb_params;
2749 	u32 val;
2750 	enum _ecore_status_t rc;
2751 	u8 i;
2752 
2753 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2754 	mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2755 	mb_params.p_data_dst = &mfw_temp_info;
2756 	mb_params.data_dst_size = sizeof(mfw_temp_info);
2757 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2758 	if (rc != ECORE_SUCCESS)
2759 		return rc;
2760 
2761 	OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2762 	p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
2763 					      ECORE_MAX_NUM_OF_SENSORS);
2764 	for (i = 0; i < p_temp_info->num_sensors; i++) {
2765 		val = mfw_temp_info.sensor[i];
2766 		p_temp_sensor = &p_temp_info->sensors[i];
2767 		p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2768 						 SENSOR_LOCATION_SHIFT;
2769 		p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2770 						THRESHOLD_HIGH_SHIFT;
2771 		p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2772 					  CRITICAL_TEMPERATURE_SHIFT;
2773 		p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2774 					      CURRENT_TEMP_SHIFT;
2775 	}
2776 
2777 	return ECORE_SUCCESS;
2778 }
2779 
2780 enum _ecore_status_t ecore_mcp_get_mba_versions(
2781 	struct ecore_hwfn *p_hwfn,
2782 	struct ecore_ptt *p_ptt,
2783 	struct ecore_mba_vers *p_mba_vers)
2784 {
2785 	struct ecore_mcp_nvm_params params;
2786 	enum _ecore_status_t rc;
2787 	u32 buf_size;
2788 
2789 	OSAL_MEM_ZERO(&params, sizeof(params));
2790 	params.type = ECORE_MCP_NVM_RD;
2791 	params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2792 	params.nvm_common.offset = 0;
2793 	params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2794 	params.nvm_rd.buf_size = &buf_size;
2795 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2796 
2797 	if (rc != ECORE_SUCCESS)
2798 		return rc;
2799 
2800 	if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2801 	    FW_MSG_CODE_NVM_OK)
2802 		rc = ECORE_UNKNOWN_ERROR;
2803 
2804 	if (buf_size != MCP_DRV_NVM_BUF_LEN)
2805 		rc = ECORE_UNKNOWN_ERROR;
2806 
2807 	return rc;
2808 }
2809 
2810 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2811 					      struct ecore_ptt *p_ptt,
2812 					      u64 *num_events)
2813 {
2814 	u32 rsp;
2815 
2816 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2817 			     0, &rsp, (u32 *)num_events);
2818 }
2819 
2820 static enum resource_id_enum
2821 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
2822 {
2823 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2824 
2825 	switch (res_id) {
2826 	case ECORE_SB:
2827 		mfw_res_id = RESOURCE_NUM_SB_E;
2828 		break;
2829 	case ECORE_L2_QUEUE:
2830 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2831 		break;
2832 	case ECORE_VPORT:
2833 		mfw_res_id = RESOURCE_NUM_VPORT_E;
2834 		break;
2835 	case ECORE_RSS_ENG:
2836 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2837 		break;
2838 	case ECORE_PQ:
2839 		mfw_res_id = RESOURCE_NUM_PQ_E;
2840 		break;
2841 	case ECORE_RL:
2842 		mfw_res_id = RESOURCE_NUM_RL_E;
2843 		break;
2844 	case ECORE_MAC:
2845 	case ECORE_VLAN:
2846 		/* Each VFC resource can accommodate both a MAC and a VLAN */
2847 		mfw_res_id = RESOURCE_VFC_FILTER_E;
2848 		break;
2849 	case ECORE_ILT:
2850 		mfw_res_id = RESOURCE_ILT_E;
2851 		break;
2852 	case ECORE_LL2_QUEUE:
2853 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
2854 		break;
2855 	case ECORE_RDMA_CNQ_RAM:
2856 	case ECORE_CMDQS_CQS:
2857 		/* CNQ/CMDQS are the same resource */
2858 		mfw_res_id = RESOURCE_CQS_E;
2859 		break;
2860 	case ECORE_RDMA_STATS_QUEUE:
2861 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2862 		break;
2863 	case ECORE_BDQ:
2864 		mfw_res_id = RESOURCE_BDQ_E;
2865 		break;
2866 	default:
2867 		break;
2868 	}
2869 
2870 	return mfw_res_id;
2871 }
2872 
2873 #define ECORE_RESC_ALLOC_VERSION_MAJOR	2
2874 #define ECORE_RESC_ALLOC_VERSION_MINOR	0
2875 #define ECORE_RESC_ALLOC_VERSION				\
2876 	((ECORE_RESC_ALLOC_VERSION_MAJOR <<			\
2877 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) |	\
2878 	 (ECORE_RESC_ALLOC_VERSION_MINOR <<			\
2879 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2880 
2881 struct ecore_resc_alloc_in_params {
2882 	u32 cmd;
2883 	enum ecore_resources res_id;
2884 	u32 resc_max_val;
2885 };
2886 
2887 struct ecore_resc_alloc_out_params {
2888 	u32 mcp_resp;
2889 	u32 mcp_param;
2890 	u32 resc_num;
2891 	u32 resc_start;
2892 	u32 vf_resc_num;
2893 	u32 vf_resc_start;
2894 	u32 flags;
2895 };
2896 
2897 static enum _ecore_status_t
2898 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
2899 			      struct ecore_ptt *p_ptt,
2900 			      struct ecore_resc_alloc_in_params *p_in_params,
2901 			      struct ecore_resc_alloc_out_params *p_out_params)
2902 {
2903 	struct ecore_mcp_mb_params mb_params;
2904 	struct resource_info mfw_resc_info;
2905 	enum _ecore_status_t rc;
2906 
2907 	OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
2908 
2909 	mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
2910 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2911 		DP_ERR(p_hwfn,
2912 		       "Failed to match resource %d [%s] with the MFW resources\n",
2913 		       p_in_params->res_id,
2914 		       ecore_hw_get_resc_name(p_in_params->res_id));
2915 		return ECORE_INVAL;
2916 	}
2917 
2918 	switch (p_in_params->cmd) {
2919 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2920 		mfw_resc_info.size = p_in_params->resc_max_val;
2921 		/* Fallthrough */
2922 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2923 		break;
2924 	default:
2925 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2926 		       p_in_params->cmd);
2927 		return ECORE_INVAL;
2928 	}
2929 
2930 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2931 	mb_params.cmd = p_in_params->cmd;
2932 	mb_params.param = ECORE_RESC_ALLOC_VERSION;
2933 	mb_params.p_data_src = &mfw_resc_info;
2934 	mb_params.data_src_size = sizeof(mfw_resc_info);
2935 	mb_params.p_data_dst = mb_params.p_data_src;
2936 	mb_params.data_dst_size = mb_params.data_src_size;
2937 
2938 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2939 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2940 		   p_in_params->cmd, p_in_params->res_id,
2941 		   ecore_hw_get_resc_name(p_in_params->res_id),
2942 		   ECORE_MFW_GET_FIELD(mb_params.param,
2943 			   DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2944 		   ECORE_MFW_GET_FIELD(mb_params.param,
2945 			   DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2946 		   p_in_params->resc_max_val);
2947 
2948 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2949 	if (rc != ECORE_SUCCESS)
2950 		return rc;
2951 
2952 	p_out_params->mcp_resp = mb_params.mcp_resp;
2953 	p_out_params->mcp_param = mb_params.mcp_param;
2954 	p_out_params->resc_num = mfw_resc_info.size;
2955 	p_out_params->resc_start = mfw_resc_info.offset;
2956 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
2957 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
2958 	p_out_params->flags = mfw_resc_info.flags;
2959 
2960 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2961 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2962 		   ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2963 			   FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2964 		   ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2965 			   FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2966 		   p_out_params->resc_num, p_out_params->resc_start,
2967 		   p_out_params->vf_resc_num, p_out_params->vf_resc_start,
2968 		   p_out_params->flags);
2969 
2970 	return ECORE_SUCCESS;
2971 }
2972 
2973 enum _ecore_status_t
2974 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2975 			   enum ecore_resources res_id, u32 resc_max_val,
2976 			   u32 *p_mcp_resp)
2977 {
2978 	struct ecore_resc_alloc_out_params out_params;
2979 	struct ecore_resc_alloc_in_params in_params;
2980 	enum _ecore_status_t rc;
2981 
2982 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2983 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2984 	in_params.res_id = res_id;
2985 	in_params.resc_max_val = resc_max_val;
2986 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2987 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2988 					   &out_params);
2989 	if (rc != ECORE_SUCCESS)
2990 		return rc;
2991 
2992 	*p_mcp_resp = out_params.mcp_resp;
2993 
2994 	return ECORE_SUCCESS;
2995 }
2996 
2997 enum _ecore_status_t
2998 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2999 			enum ecore_resources res_id, u32 *p_mcp_resp,
3000 			u32 *p_resc_num, u32 *p_resc_start)
3001 {
3002 	struct ecore_resc_alloc_out_params out_params;
3003 	struct ecore_resc_alloc_in_params in_params;
3004 	enum _ecore_status_t rc;
3005 
3006 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3007 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3008 	in_params.res_id = res_id;
3009 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3010 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3011 					   &out_params);
3012 	if (rc != ECORE_SUCCESS)
3013 		return rc;
3014 
3015 	*p_mcp_resp = out_params.mcp_resp;
3016 
3017 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3018 		*p_resc_num = out_params.resc_num;
3019 		*p_resc_start = out_params.resc_start;
3020 	}
3021 
3022 	return ECORE_SUCCESS;
3023 }
3024 
3025 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3026 					       struct ecore_ptt *p_ptt)
3027 {
3028 	u32 mcp_resp, mcp_param;
3029 
3030 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3031 			     &mcp_resp, &mcp_param);
3032 }
3033 
3034 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3035 						   struct ecore_ptt *p_ptt,
3036 						   u32 param, u32 *p_mcp_resp,
3037 						   u32 *p_mcp_param)
3038 {
3039 	enum _ecore_status_t rc;
3040 
3041 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3042 			   p_mcp_resp, p_mcp_param);
3043 	if (rc != ECORE_SUCCESS)
3044 		return rc;
3045 
3046 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3047 		DP_INFO(p_hwfn,
3048 			"The resource command is unsupported by the MFW\n");
3049 		return ECORE_NOTIMPL;
3050 	}
3051 
3052 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3053 		u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3054 
3055 		DP_NOTICE(p_hwfn, false,
3056 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3057 			  param, opcode);
3058 		return ECORE_INVAL;
3059 	}
3060 
3061 	return rc;
3062 }
3063 
3064 enum _ecore_status_t
3065 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3066 		      struct ecore_resc_lock_params *p_params)
3067 {
3068 	u32 param = 0, mcp_resp, mcp_param;
3069 	u8 opcode;
3070 	enum _ecore_status_t rc;
3071 
3072 	switch (p_params->timeout) {
3073 	case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3074 		opcode = RESOURCE_OPCODE_REQ;
3075 		p_params->timeout = 0;
3076 		break;
3077 	case ECORE_MCP_RESC_LOCK_TO_NONE:
3078 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3079 		p_params->timeout = 0;
3080 		break;
3081 	default:
3082 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3083 		break;
3084 	}
3085 
3086 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3087 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3088 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3089 
3090 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3091 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3092 		   param, p_params->timeout, opcode, p_params->resource);
3093 
3094 	/* Attempt to acquire the resource */
3095 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3096 				    &mcp_param);
3097 	if (rc != ECORE_SUCCESS)
3098 		return rc;
3099 
3100 	/* Analyze the response */
3101 	p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3102 					     RESOURCE_CMD_RSP_OWNER);
3103 	opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3104 
3105 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3106 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3107 		   mcp_param, opcode, p_params->owner);
3108 
3109 	switch (opcode) {
3110 	case RESOURCE_OPCODE_GNT:
3111 		p_params->b_granted = true;
3112 		break;
3113 	case RESOURCE_OPCODE_BUSY:
3114 		p_params->b_granted = false;
3115 		break;
3116 	default:
3117 		DP_NOTICE(p_hwfn, false,
3118 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3119 			  mcp_param, opcode);
3120 		return ECORE_INVAL;
3121 	}
3122 
3123 	return ECORE_SUCCESS;
3124 }
3125 
3126 enum _ecore_status_t
3127 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3128 		    struct ecore_resc_lock_params *p_params)
3129 {
3130 	u32 retry_cnt = 0;
3131 	enum _ecore_status_t rc;
3132 
3133 	do {
3134 		/* No need for an interval before the first iteration */
3135 		if (retry_cnt) {
3136 			if (p_params->sleep_b4_retry) {
3137 				u16 retry_interval_in_ms =
3138 					DIV_ROUND_UP(p_params->retry_interval,
3139 						     1000);
3140 
3141 				OSAL_MSLEEP(retry_interval_in_ms);
3142 			} else {
3143 				OSAL_UDELAY(p_params->retry_interval);
3144 			}
3145 		}
3146 
3147 		rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3148 		if (rc != ECORE_SUCCESS)
3149 			return rc;
3150 
3151 		if (p_params->b_granted)
3152 			break;
3153 	} while (retry_cnt++ < p_params->retry_num);
3154 
3155 	return ECORE_SUCCESS;
3156 }
3157 
3158 enum _ecore_status_t
3159 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3160 		      struct ecore_resc_unlock_params *p_params)
3161 {
3162 	u32 param = 0, mcp_resp, mcp_param;
3163 	u8 opcode;
3164 	enum _ecore_status_t rc;
3165 
3166 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3167 				   : RESOURCE_OPCODE_RELEASE;
3168 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3169 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3170 
3171 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3172 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3173 		   param, opcode, p_params->resource);
3174 
3175 	/* Attempt to release the resource */
3176 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3177 				    &mcp_param);
3178 	if (rc != ECORE_SUCCESS)
3179 		return rc;
3180 
3181 	/* Analyze the response */
3182 	opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3183 
3184 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3185 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3186 		   mcp_param, opcode);
3187 
3188 	switch (opcode) {
3189 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3190 		DP_INFO(p_hwfn,
3191 			"Resource unlock request for an already released resource [%d]\n",
3192 			p_params->resource);
3193 		/* Fallthrough */
3194 	case RESOURCE_OPCODE_RELEASED:
3195 		p_params->b_released = true;
3196 		break;
3197 	case RESOURCE_OPCODE_WRONG_OWNER:
3198 		p_params->b_released = false;
3199 		break;
3200 	default:
3201 		DP_NOTICE(p_hwfn, false,
3202 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3203 			  mcp_param, opcode);
3204 		return ECORE_INVAL;
3205 	}
3206 
3207 	return ECORE_SUCCESS;
3208 }
3209