xref: /dpdk/drivers/net/qede/base/ecore_mcp.c (revision c2c4f87b12590d96f549c4ef04a04d29d3b8fb97)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include "bcm_osal.h"
8 #include "ecore.h"
9 #include "ecore_status.h"
10 #include "nvm_cfg.h"
11 #include "ecore_mcp.h"
12 #include "mcp_public.h"
13 #include "reg_addr.h"
14 #include "ecore_hw.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_sriov.h"
17 #include "ecore_vf.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22 #include "ecore_sp_commands.h"
23 #include "ecore_cxt.h"
24 
25 #define GRCBASE_MCP	0xe00000
26 
27 #define ECORE_MCP_RESP_ITER_US		10
28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)	/* Account for 5 sec */
29 #define ECORE_MCP_RESET_RETRIES (50 * 1000)	/* Account for 500 msec */
30 
31 #ifndef ASIC_ONLY
32 /* Non-ASIC:
33  * The waiting interval is multiplied by 100 to reduce the impact of the
34  * built-in delay of 100usec in each ecore_rd().
35  * In addition, a factor of 4 comparing to ASIC is applied.
36  */
37 #define ECORE_EMUL_MCP_RESP_ITER_US	(ECORE_MCP_RESP_ITER_US * 100)
38 #define ECORE_EMUL_DRV_MB_MAX_RETRIES	((ECORE_DRV_MB_MAX_RETRIES / 100) * 4)
39 #define ECORE_EMUL_MCP_RESET_RETRIES	((ECORE_MCP_RESET_RETRIES / 100) * 4)
40 #endif
41 
42 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
43 	ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
44 		 _val)
45 
46 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
47 	ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
48 
49 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
50 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
51 		     OFFSETOF(struct public_drv_mb, _field), _val)
52 
53 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
54 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
55 		     OFFSETOF(struct public_drv_mb, _field))
56 
57 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
58 	DRV_ID_PDA_COMP_VER_OFFSET)
59 
60 #define MCP_BYTES_PER_MBIT_OFFSET 17
61 
62 #ifndef ASIC_ONLY
63 static int loaded;
64 static int loaded_port[MAX_NUM_PORTS] = { 0 };
65 #endif
66 
ecore_mcp_is_init(struct ecore_hwfn * p_hwfn)67 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
68 {
69 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
70 		return false;
71 	return true;
72 }
73 
ecore_mcp_cmd_port_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)74 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
75 {
76 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
77 					PUBLIC_PORT);
78 	u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
79 
80 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
81 						   MFW_PORT(p_hwfn));
82 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
83 		   "port_addr = 0x%x, port_id 0x%02x\n",
84 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
85 }
86 
ecore_mcp_read_mb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)87 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
88 {
89 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
90 	OSAL_BE32 tmp;
91 	u32 i;
92 
93 #ifndef ASIC_ONLY
94 	if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
95 		return;
96 #endif
97 
98 	if (!p_hwfn->mcp_info->public_base)
99 		return;
100 
101 	for (i = 0; i < length; i++) {
102 		tmp = ecore_rd(p_hwfn, p_ptt,
103 			       p_hwfn->mcp_info->mfw_mb_addr +
104 			       (i << 2) + sizeof(u32));
105 
106 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
107 		    OSAL_BE32_TO_CPU(tmp);
108 	}
109 }
110 
111 struct ecore_mcp_cmd_elem {
112 	osal_list_entry_t list;
113 	struct ecore_mcp_mb_params *p_mb_params;
114 	u16 expected_seq_num;
115 	bool b_is_completed;
116 };
117 
118 /* Must be called while cmd_lock is acquired */
119 static struct ecore_mcp_cmd_elem *
ecore_mcp_cmd_add_elem(struct ecore_hwfn * p_hwfn,struct ecore_mcp_mb_params * p_mb_params,u16 expected_seq_num)120 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
121 		       struct ecore_mcp_mb_params *p_mb_params,
122 		       u16 expected_seq_num)
123 {
124 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
125 
126 	p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
127 				 sizeof(*p_cmd_elem));
128 	if (!p_cmd_elem) {
129 		DP_NOTICE(p_hwfn, false,
130 			  "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
131 		goto out;
132 	}
133 
134 	p_cmd_elem->p_mb_params = p_mb_params;
135 	p_cmd_elem->expected_seq_num = expected_seq_num;
136 	OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
137 out:
138 	return p_cmd_elem;
139 }
140 
141 /* Must be called while cmd_lock is acquired */
ecore_mcp_cmd_del_elem(struct ecore_hwfn * p_hwfn,struct ecore_mcp_cmd_elem * p_cmd_elem)142 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
143 				   struct ecore_mcp_cmd_elem *p_cmd_elem)
144 {
145 	OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
146 	OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
147 }
148 
149 /* Must be called while cmd_lock is acquired */
150 static struct ecore_mcp_cmd_elem *
ecore_mcp_cmd_get_elem(struct ecore_hwfn * p_hwfn,u16 seq_num)151 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
152 {
153 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
154 
155 	OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
156 				 struct ecore_mcp_cmd_elem) {
157 		if (p_cmd_elem->expected_seq_num == seq_num)
158 			return p_cmd_elem;
159 	}
160 
161 	return OSAL_NULL;
162 }
163 
ecore_mcp_free(struct ecore_hwfn * p_hwfn)164 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
165 {
166 	if (p_hwfn->mcp_info) {
167 		struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
168 
169 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
170 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
171 
172 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
173 		OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
174 					      &p_hwfn->mcp_info->cmd_list, list,
175 					      struct ecore_mcp_cmd_elem) {
176 			ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
177 		}
178 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
179 
180 #ifdef CONFIG_ECORE_LOCK_ALLOC
181 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
182 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
183 #endif
184 	}
185 
186 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
187 
188 	return ECORE_SUCCESS;
189 }
190 
191 /* Maximum of 1 sec to wait for the SHMEM ready indication */
192 #define ECORE_MCP_SHMEM_RDY_MAX_RETRIES	20
193 #define ECORE_MCP_SHMEM_RDY_ITER_MS	50
194 
ecore_load_mcp_offsets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)195 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
196 						   struct ecore_ptt *p_ptt)
197 {
198 	struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
199 	u32 drv_mb_offsize, mfw_mb_offsize, val;
200 	u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
201 	u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
202 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
203 
204 	val = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE);
205 	p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
206 	if (!p_info->public_base) {
207 		DP_NOTICE(p_hwfn, false,
208 			  "The address of the MCP scratch-pad is not configured\n");
209 #ifndef ASIC_ONLY
210 		/* Zeroed "public_base" implies no MFW */
211 		if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
212 			DP_INFO(p_hwfn, "Emulation: Assume no MFW\n");
213 #endif
214 		return ECORE_INVAL;
215 	}
216 
217 	p_info->public_base |= GRCBASE_MCP;
218 
219 	/* Get the MFW MB address and number of supported messages */
220 	mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
221 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
222 				  PUBLIC_MFW_MB));
223 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
224 	p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
225 					      p_info->mfw_mb_addr);
226 
227 	/* @@@TBD:
228 	 * The driver can notify that there was an MCP reset, and read the SHMEM
229 	 * values before the MFW has completed initializing them.
230 	 * As a temporary solution, the "sup_msgs" field is used as a data ready
231 	 * indication.
232 	 * This should be replaced with an actual indication when it is provided
233 	 * by the MFW.
234 	 */
235 	while (!p_info->mfw_mb_length && cnt--) {
236 		OSAL_MSLEEP(msec);
237 		p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
238 						      p_info->mfw_mb_addr);
239 	}
240 
241 	if (!cnt) {
242 		DP_NOTICE(p_hwfn, false,
243 			  "Failed to get the SHMEM ready notification after %d msec\n",
244 			  ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
245 		return ECORE_TIMEOUT;
246 	}
247 
248 	/* Calculate the driver and MFW mailbox address */
249 	drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
250 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
251 						       PUBLIC_DRV_MB));
252 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
253 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
254 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
255 		   " mcp_pf_id = 0x%x\n",
256 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
257 
258 	/* Get the current driver mailbox sequence before sending
259 	 * the first command
260 	 */
261 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
262 	    DRV_MSG_SEQ_NUMBER_MASK;
263 
264 	/* Get current FW pulse sequence */
265 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
266 	    DRV_PULSE_SEQ_MASK;
267 
268 	p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
269 
270 	return ECORE_SUCCESS;
271 }
272 
ecore_mcp_cmd_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)273 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
274 					struct ecore_ptt *p_ptt)
275 {
276 	struct ecore_mcp_info *p_info;
277 	u32 size;
278 
279 	/* Allocate mcp_info structure */
280 	p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
281 			sizeof(*p_hwfn->mcp_info));
282 	if (!p_hwfn->mcp_info) {
283 		DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
284 		return ECORE_NOMEM;
285 	}
286 	p_info = p_hwfn->mcp_info;
287 
288 	/* Initialize the MFW spinlocks */
289 #ifdef CONFIG_ECORE_LOCK_ALLOC
290 	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
291 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
292 		return ECORE_NOMEM;
293 	}
294 	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
295 		OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
296 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
297 		return ECORE_NOMEM;
298 	}
299 #endif
300 	OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
301 	OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
302 
303 	OSAL_LIST_INIT(&p_info->cmd_list);
304 
305 	if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
306 		DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
307 		/* Do not free mcp_info here, since "public_base" indicates that
308 		 * the MCP is not initialized
309 		 */
310 		return ECORE_SUCCESS;
311 	}
312 
313 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
314 	p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
315 	p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
316 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
317 		goto err;
318 
319 	return ECORE_SUCCESS;
320 
321 err:
322 	DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
323 	ecore_mcp_free(p_hwfn);
324 	return ECORE_NOMEM;
325 }
326 
ecore_mcp_reread_offsets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)327 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
328 				     struct ecore_ptt *p_ptt)
329 {
330 	u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
331 
332 	/* Use MCP history register to check if MCP reset occurred between init
333 	 * time and now.
334 	 */
335 	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
336 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
337 			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
338 			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
339 
340 		ecore_load_mcp_offsets(p_hwfn, p_ptt);
341 		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
342 	}
343 }
344 
ecore_mcp_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)345 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
346 				     struct ecore_ptt *p_ptt)
347 {
348 	u32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0;
349 	u32 retries = ECORE_MCP_RESET_RETRIES;
350 	enum _ecore_status_t rc = ECORE_SUCCESS;
351 
352 #ifndef ASIC_ONLY
353 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
354 		delay = ECORE_EMUL_MCP_RESP_ITER_US;
355 		retries = ECORE_EMUL_MCP_RESET_RETRIES;
356 	}
357 #endif
358 	if (p_hwfn->mcp_info->b_block_cmd) {
359 		DP_NOTICE(p_hwfn, false,
360 			  "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
361 		return ECORE_ABORTED;
362 	}
363 
364 	/* Ensure that only a single thread is accessing the mailbox */
365 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
366 
367 	prev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
368 
369 	/* Set drv command along with the updated sequence */
370 	ecore_mcp_reread_offsets(p_hwfn, p_ptt);
371 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
372 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
373 
374 	/* Give the MFW up to 500 second (50*1000*10usec) to resume */
375 	do {
376 		OSAL_UDELAY(delay);
377 
378 		if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
379 		    prev_generic_por_0)
380 			break;
381 	} while (cnt++ < retries);
382 
383 	if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
384 	    prev_generic_por_0) {
385 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
386 			   "MCP was reset after %d usec\n", cnt * delay);
387 	} else {
388 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
389 		rc = ECORE_AGAIN;
390 	}
391 
392 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
393 
394 	return rc;
395 }
396 
397 #ifndef ASIC_ONLY
ecore_emul_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_mcp_mb_params * p_mb_params)398 static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn,
399 				    struct ecore_mcp_mb_params *p_mb_params)
400 {
401 	if (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) !=
402 	    1 /* ECORE_LOAD_REQ_HSI_VER_1 */) {
403 		p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1;
404 		return;
405 	}
406 
407 	if (!loaded)
408 		p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
409 	else if (!loaded_port[p_hwfn->port_id])
410 		p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT;
411 	else
412 		p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION;
413 
414 	/* On CMT, always tell that it's engine */
415 	if (ECORE_IS_CMT(p_hwfn->p_dev))
416 		p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
417 
418 	loaded++;
419 	loaded_port[p_hwfn->port_id]++;
420 
421 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
422 		   "Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\n",
423 		   p_mb_params->mcp_resp, loaded, p_hwfn->port_id,
424 		   loaded_port[p_hwfn->port_id]);
425 }
426 
ecore_emul_mcp_unload_req(struct ecore_hwfn * p_hwfn)427 static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn)
428 {
429 	loaded--;
430 	loaded_port[p_hwfn->port_id]--;
431 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded);
432 }
433 
434 static enum _ecore_status_t
ecore_emul_mcp_cmd(struct ecore_hwfn * p_hwfn,struct ecore_mcp_mb_params * p_mb_params)435 ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn,
436 		   struct ecore_mcp_mb_params *p_mb_params)
437 {
438 	if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev))
439 		return ECORE_INVAL;
440 
441 	switch (p_mb_params->cmd) {
442 	case DRV_MSG_CODE_LOAD_REQ:
443 		ecore_emul_mcp_load_req(p_hwfn, p_mb_params);
444 		break;
445 	case DRV_MSG_CODE_UNLOAD_REQ:
446 		ecore_emul_mcp_unload_req(p_hwfn);
447 		break;
448 	case DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT:
449 	case DRV_MSG_CODE_RESOURCE_CMD:
450 	case DRV_MSG_CODE_MDUMP_CMD:
451 	case DRV_MSG_CODE_GET_ENGINE_CONFIG:
452 	case DRV_MSG_CODE_GET_PPFID_BITMAP:
453 		return ECORE_NOTIMPL;
454 	default:
455 		break;
456 	}
457 
458 	return ECORE_SUCCESS;
459 }
460 #endif
461 
462 /* Must be called while cmd_lock is acquired */
ecore_mcp_has_pending_cmd(struct ecore_hwfn * p_hwfn)463 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
464 {
465 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
466 
467 	/* There is at most one pending command at a certain time, and if it
468 	 * exists - it is placed at the HEAD of the list.
469 	 */
470 	if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
471 		p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
472 						   struct ecore_mcp_cmd_elem,
473 						   list);
474 		return !p_cmd_elem->b_is_completed;
475 	}
476 
477 	return false;
478 }
479 
480 /* Must be called while cmd_lock is acquired */
481 static enum _ecore_status_t
ecore_mcp_update_pending_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)482 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
483 {
484 	struct ecore_mcp_mb_params *p_mb_params;
485 	struct ecore_mcp_cmd_elem *p_cmd_elem;
486 	u32 mcp_resp;
487 	u16 seq_num;
488 
489 	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
490 	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
491 
492 	/* Return if no new non-handled response has been received */
493 	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
494 		return ECORE_AGAIN;
495 
496 	p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
497 	if (!p_cmd_elem) {
498 		DP_ERR(p_hwfn,
499 		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
500 		       seq_num);
501 		return ECORE_UNKNOWN_ERROR;
502 	}
503 
504 	p_mb_params = p_cmd_elem->p_mb_params;
505 
506 	/* Get the MFW response along with the sequence number */
507 	p_mb_params->mcp_resp = mcp_resp;
508 
509 	/* Get the MFW param */
510 	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
511 
512 	/* Get the union data */
513 	if (p_mb_params->p_data_dst != OSAL_NULL &&
514 	    p_mb_params->data_dst_size) {
515 		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
516 				      OFFSETOF(struct public_drv_mb,
517 					       union_data);
518 		ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
519 				  union_data_addr, p_mb_params->data_dst_size);
520 	}
521 
522 	p_cmd_elem->b_is_completed = true;
523 
524 	return ECORE_SUCCESS;
525 }
526 
527 /* Must be called while cmd_lock is acquired */
__ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params,u16 seq_num)528 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
529 				      struct ecore_ptt *p_ptt,
530 				      struct ecore_mcp_mb_params *p_mb_params,
531 				      u16 seq_num)
532 {
533 	union drv_union_data union_data;
534 	u32 union_data_addr;
535 
536 	/* Set the union data */
537 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
538 			  OFFSETOF(struct public_drv_mb, union_data);
539 	OSAL_MEM_ZERO(&union_data, sizeof(union_data));
540 	if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
541 		OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
542 			    p_mb_params->data_src_size);
543 	ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
544 			sizeof(union_data));
545 
546 	/* Set the drv param */
547 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
548 
549 	/* Set the drv command along with the sequence number */
550 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
551 
552 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
553 		   "MFW mailbox: command 0x%08x param 0x%08x\n",
554 		   (p_mb_params->cmd | seq_num), p_mb_params->param);
555 }
556 
ecore_mcp_cmd_set_blocking(struct ecore_hwfn * p_hwfn,bool block_cmd)557 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
558 				       bool block_cmd)
559 {
560 	p_hwfn->mcp_info->b_block_cmd = block_cmd;
561 
562 	DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
563 		block_cmd ? "Block" : "Unblock");
564 }
565 
ecore_mcp_print_cpu_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)566 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
567 			      struct ecore_ptt *p_ptt)
568 {
569 	u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
570 	u32 delay = ECORE_MCP_RESP_ITER_US;
571 
572 #ifndef ASIC_ONLY
573 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
574 		delay = ECORE_EMUL_MCP_RESP_ITER_US;
575 #endif
576 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
577 	cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
578 	cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
579 	OSAL_UDELAY(delay);
580 	cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
581 	OSAL_UDELAY(delay);
582 	cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
583 
584 	DP_NOTICE(p_hwfn, false,
585 		  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
586 		  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
587 }
588 
589 static enum _ecore_status_t
_ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params,u32 max_retries,u32 delay)590 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
591 			 struct ecore_mcp_mb_params *p_mb_params,
592 			 u32 max_retries, u32 delay)
593 {
594 	struct ecore_mcp_cmd_elem *p_cmd_elem;
595 	u32 cnt = 0;
596 	u16 seq_num;
597 	enum _ecore_status_t rc = ECORE_SUCCESS;
598 
599 	/* Wait until the mailbox is non-occupied */
600 	do {
601 		/* Exit the loop if there is no pending command, or if the
602 		 * pending command is completed during this iteration.
603 		 * The spinlock stays locked until the command is sent.
604 		 */
605 
606 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
607 
608 		if (!ecore_mcp_has_pending_cmd(p_hwfn))
609 			break;
610 
611 		rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
612 		if (rc == ECORE_SUCCESS)
613 			break;
614 		else if (rc != ECORE_AGAIN)
615 			goto err;
616 
617 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
618 		OSAL_UDELAY(delay);
619 		OSAL_MFW_CMD_PREEMPT(p_hwfn);
620 	} while (++cnt < max_retries);
621 
622 	if (cnt >= max_retries) {
623 		DP_NOTICE(p_hwfn, false,
624 			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
625 			  p_mb_params->cmd, p_mb_params->param);
626 		return ECORE_AGAIN;
627 	}
628 
629 	/* Send the mailbox command */
630 	ecore_mcp_reread_offsets(p_hwfn, p_ptt);
631 	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
632 	p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
633 	if (!p_cmd_elem) {
634 		rc = ECORE_NOMEM;
635 		goto err;
636 	}
637 
638 	__ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
639 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
640 
641 	/* Wait for the MFW response */
642 	do {
643 		/* Exit the loop if the command is already completed, or if the
644 		 * command is completed during this iteration.
645 		 * The spinlock stays locked until the list element is removed.
646 		 */
647 
648 		OSAL_UDELAY(delay);
649 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
650 
651 		if (p_cmd_elem->b_is_completed)
652 			break;
653 
654 		rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
655 		if (rc == ECORE_SUCCESS)
656 			break;
657 		else if (rc != ECORE_AGAIN)
658 			goto err;
659 
660 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
661 		OSAL_MFW_CMD_PREEMPT(p_hwfn);
662 	} while (++cnt < max_retries);
663 
664 	if (cnt >= max_retries) {
665 		DP_NOTICE(p_hwfn, false,
666 			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
667 			  p_mb_params->cmd, p_mb_params->param);
668 		ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
669 
670 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
671 		ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
672 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
673 
674 		ecore_mcp_cmd_set_blocking(p_hwfn, true);
675 		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
676 		return ECORE_AGAIN;
677 	}
678 
679 	ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
680 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
681 
682 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
683 		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
684 		   p_mb_params->mcp_resp, p_mb_params->mcp_param,
685 		   (cnt * delay) / 1000, (cnt * delay) % 1000);
686 
687 	/* Clear the sequence number from the MFW response */
688 	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
689 
690 	return ECORE_SUCCESS;
691 
692 err:
693 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
694 	return rc;
695 }
696 
697 static enum _ecore_status_t
ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params)698 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
699 			struct ecore_ptt *p_ptt,
700 			struct ecore_mcp_mb_params *p_mb_params)
701 {
702 	osal_size_t union_data_size = sizeof(union drv_union_data);
703 	u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
704 	u32 usecs = ECORE_MCP_RESP_ITER_US;
705 
706 #ifndef ASIC_ONLY
707 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn))
708 		return ecore_emul_mcp_cmd(p_hwfn, p_mb_params);
709 
710 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
711 		max_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES;
712 		usecs = ECORE_EMUL_MCP_RESP_ITER_US;
713 	}
714 #endif
715 	if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
716 		max_retries = DIV_ROUND_UP(max_retries, 1000);
717 		usecs *= 1000;
718 	}
719 
720 	/* MCP not initialized */
721 	if (!ecore_mcp_is_init(p_hwfn)) {
722 		DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
723 		return ECORE_BUSY;
724 	}
725 
726 	if (p_mb_params->data_src_size > union_data_size ||
727 	    p_mb_params->data_dst_size > union_data_size) {
728 		DP_ERR(p_hwfn,
729 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
730 		       p_mb_params->data_src_size, p_mb_params->data_dst_size,
731 		       union_data_size);
732 		return ECORE_INVAL;
733 	}
734 
735 	if (p_hwfn->mcp_info->b_block_cmd) {
736 		DP_NOTICE(p_hwfn, false,
737 			  "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
738 			  p_mb_params->cmd, p_mb_params->param);
739 		return ECORE_ABORTED;
740 	}
741 
742 	return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
743 					usecs);
744 }
745 
ecore_mcp_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)746 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
747 				   struct ecore_ptt *p_ptt, u32 cmd, u32 param,
748 				   u32 *o_mcp_resp, u32 *o_mcp_param)
749 {
750 	struct ecore_mcp_mb_params mb_params;
751 	enum _ecore_status_t rc;
752 
753 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
754 	mb_params.cmd = cmd;
755 	mb_params.param = param;
756 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
757 	if (rc != ECORE_SUCCESS)
758 		return rc;
759 
760 	*o_mcp_resp = mb_params.mcp_resp;
761 	*o_mcp_param = mb_params.mcp_param;
762 
763 	return ECORE_SUCCESS;
764 }
765 
ecore_mcp_nvm_wr_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 i_txn_size,u32 * i_buf)766 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
767 					  struct ecore_ptt *p_ptt,
768 					  u32 cmd,
769 					  u32 param,
770 					  u32 *o_mcp_resp,
771 					  u32 *o_mcp_param,
772 					  u32 i_txn_size, u32 *i_buf)
773 {
774 	struct ecore_mcp_mb_params mb_params;
775 	enum _ecore_status_t rc;
776 
777 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
778 	mb_params.cmd = cmd;
779 	mb_params.param = param;
780 	mb_params.p_data_src = i_buf;
781 	mb_params.data_src_size = (u8)i_txn_size;
782 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
783 	if (rc != ECORE_SUCCESS)
784 		return rc;
785 
786 	*o_mcp_resp = mb_params.mcp_resp;
787 	*o_mcp_param = mb_params.mcp_param;
788 
789 	return ECORE_SUCCESS;
790 }
791 
ecore_mcp_nvm_rd_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 * o_txn_size,u32 * o_buf)792 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
793 					  struct ecore_ptt *p_ptt,
794 					  u32 cmd,
795 					  u32 param,
796 					  u32 *o_mcp_resp,
797 					  u32 *o_mcp_param,
798 					  u32 *o_txn_size, u32 *o_buf)
799 {
800 	struct ecore_mcp_mb_params mb_params;
801 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
802 	enum _ecore_status_t rc;
803 
804 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
805 	mb_params.cmd = cmd;
806 	mb_params.param = param;
807 	mb_params.p_data_dst = raw_data;
808 
809 	/* Use the maximal value since the actual one is part of the response */
810 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
811 
812 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
813 	if (rc != ECORE_SUCCESS)
814 		return rc;
815 
816 	*o_mcp_resp = mb_params.mcp_resp;
817 	*o_mcp_param = mb_params.mcp_param;
818 
819 	*o_txn_size = *o_mcp_param;
820 	/* @DPDK */
821 	OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
822 
823 	return ECORE_SUCCESS;
824 }
825 
826 static bool
ecore_mcp_can_force_load(u8 drv_role,u8 exist_drv_role,enum ecore_override_force_load override_force_load)827 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
828 			 enum ecore_override_force_load override_force_load)
829 {
830 	bool can_force_load = false;
831 
832 	switch (override_force_load) {
833 	case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
834 		can_force_load = true;
835 		break;
836 	case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
837 		can_force_load = false;
838 		break;
839 	default:
840 		can_force_load = (drv_role == DRV_ROLE_OS &&
841 				  exist_drv_role == DRV_ROLE_PREBOOT) ||
842 				 (drv_role == DRV_ROLE_KDUMP &&
843 				  exist_drv_role == DRV_ROLE_OS);
844 		break;
845 	}
846 
847 	return can_force_load;
848 }
849 
ecore_mcp_cancel_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)850 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
851 						      struct ecore_ptt *p_ptt)
852 {
853 	u32 resp = 0, param = 0;
854 	enum _ecore_status_t rc;
855 
856 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
857 			   &resp, &param);
858 	if (rc != ECORE_SUCCESS)
859 		DP_NOTICE(p_hwfn, false,
860 			  "Failed to send cancel load request, rc = %d\n", rc);
861 
862 	return rc;
863 }
864 
865 #define CONFIG_ECORE_L2_BITMAP_IDX	(0x1 << 0)
866 #define CONFIG_ECORE_SRIOV_BITMAP_IDX	(0x1 << 1)
867 #define CONFIG_ECORE_ROCE_BITMAP_IDX	(0x1 << 2)
868 #define CONFIG_ECORE_IWARP_BITMAP_IDX	(0x1 << 3)
869 #define CONFIG_ECORE_FCOE_BITMAP_IDX	(0x1 << 4)
870 #define CONFIG_ECORE_ISCSI_BITMAP_IDX	(0x1 << 5)
871 #define CONFIG_ECORE_LL2_BITMAP_IDX	(0x1 << 6)
872 
ecore_get_config_bitmap(void)873 static u32 ecore_get_config_bitmap(void)
874 {
875 	u32 config_bitmap = 0x0;
876 
877 #ifdef CONFIG_ECORE_L2
878 	config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
879 #endif
880 #ifdef CONFIG_ECORE_SRIOV
881 	config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
882 #endif
883 #ifdef CONFIG_ECORE_ROCE
884 	config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
885 #endif
886 #ifdef CONFIG_ECORE_IWARP
887 	config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
888 #endif
889 #ifdef CONFIG_ECORE_FCOE
890 	config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
891 #endif
892 #ifdef CONFIG_ECORE_ISCSI
893 	config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
894 #endif
895 #ifdef CONFIG_ECORE_LL2
896 	config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
897 #endif
898 
899 	return config_bitmap;
900 }
901 
902 struct ecore_load_req_in_params {
903 	u8 hsi_ver;
904 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT	0
905 #define ECORE_LOAD_REQ_HSI_VER_1	1
906 	u32 drv_ver_0;
907 	u32 drv_ver_1;
908 	u32 fw_ver;
909 	u8 drv_role;
910 	u8 timeout_val;
911 	u8 force_cmd;
912 	bool avoid_eng_reset;
913 };
914 
915 struct ecore_load_req_out_params {
916 	u32 load_code;
917 	u32 exist_drv_ver_0;
918 	u32 exist_drv_ver_1;
919 	u32 exist_fw_ver;
920 	u8 exist_drv_role;
921 	u8 mfw_hsi_ver;
922 	bool drv_exists;
923 };
924 
925 static enum _ecore_status_t
__ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_in_params * p_in_params,struct ecore_load_req_out_params * p_out_params)926 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
927 		     struct ecore_load_req_in_params *p_in_params,
928 		     struct ecore_load_req_out_params *p_out_params)
929 {
930 	struct ecore_mcp_mb_params mb_params;
931 	struct load_req_stc load_req;
932 	struct load_rsp_stc load_rsp;
933 	u32 hsi_ver;
934 	enum _ecore_status_t rc;
935 
936 	OSAL_MEM_ZERO(&load_req, sizeof(load_req));
937 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
938 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
939 	load_req.fw_ver = p_in_params->fw_ver;
940 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
941 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
942 		      p_in_params->timeout_val);
943 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
944 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
945 		      p_in_params->avoid_eng_reset);
946 
947 	hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
948 		  DRV_ID_MCP_HSI_VER_CURRENT :
949 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
950 
951 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
952 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
953 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
954 	mb_params.p_data_src = &load_req;
955 	mb_params.data_src_size = sizeof(load_req);
956 	mb_params.p_data_dst = &load_rsp;
957 	mb_params.data_dst_size = sizeof(load_rsp);
958 
959 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
960 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
961 		   mb_params.param,
962 		   GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
963 		   GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
964 		   GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
965 		   GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
966 
967 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
968 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
969 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
970 			   load_req.drv_ver_0, load_req.drv_ver_1,
971 			   load_req.fw_ver, load_req.misc0,
972 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
973 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
974 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
975 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
976 
977 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
978 	if (rc != ECORE_SUCCESS) {
979 		DP_NOTICE(p_hwfn, false,
980 			  "Failed to send load request, rc = %d\n", rc);
981 		return rc;
982 	}
983 
984 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
985 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
986 	p_out_params->load_code = mb_params.mcp_resp;
987 
988 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
989 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
990 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
991 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
992 			   load_rsp.drv_ver_0, load_rsp.drv_ver_1,
993 			   load_rsp.fw_ver, load_rsp.misc0,
994 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
995 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
996 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
997 
998 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
999 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
1000 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
1001 		p_out_params->exist_drv_role =
1002 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
1003 		p_out_params->mfw_hsi_ver =
1004 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
1005 		p_out_params->drv_exists =
1006 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
1007 			LOAD_RSP_FLAGS0_DRV_EXISTS;
1008 	}
1009 
1010 	return ECORE_SUCCESS;
1011 }
1012 
ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,u8 * p_mfw_drv_role)1013 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
1014 				   u8 *p_mfw_drv_role)
1015 {
1016 	switch (drv_role) {
1017 	case ECORE_DRV_ROLE_OS:
1018 		*p_mfw_drv_role = DRV_ROLE_OS;
1019 		break;
1020 	case ECORE_DRV_ROLE_KDUMP:
1021 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
1022 		break;
1023 	}
1024 }
1025 
1026 enum ecore_load_req_force {
1027 	ECORE_LOAD_REQ_FORCE_NONE,
1028 	ECORE_LOAD_REQ_FORCE_PF,
1029 	ECORE_LOAD_REQ_FORCE_ALL,
1030 };
1031 
ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,u8 * p_mfw_force_cmd)1032 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
1033 				    u8 *p_mfw_force_cmd)
1034 {
1035 	switch (force_cmd) {
1036 	case ECORE_LOAD_REQ_FORCE_NONE:
1037 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
1038 		break;
1039 	case ECORE_LOAD_REQ_FORCE_PF:
1040 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
1041 		break;
1042 	case ECORE_LOAD_REQ_FORCE_ALL:
1043 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
1044 		break;
1045 	}
1046 }
1047 
ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_params * p_params)1048 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
1049 					struct ecore_ptt *p_ptt,
1050 					struct ecore_load_req_params *p_params)
1051 {
1052 	struct ecore_load_req_out_params out_params;
1053 	struct ecore_load_req_in_params in_params;
1054 	u8 mfw_drv_role = 0, mfw_force_cmd;
1055 	enum _ecore_status_t rc;
1056 
1057 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1058 	in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1059 	in_params.drv_ver_0 = ECORE_VERSION;
1060 	in_params.drv_ver_1 = ecore_get_config_bitmap();
1061 	in_params.fw_ver = STORM_FW_VERSION;
1062 	ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1063 	in_params.drv_role = mfw_drv_role;
1064 	in_params.timeout_val = p_params->timeout_val;
1065 	ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1066 	in_params.force_cmd = mfw_force_cmd;
1067 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1068 
1069 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1070 	rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1071 	if (rc != ECORE_SUCCESS)
1072 		return rc;
1073 
1074 	/* First handle cases where another load request should/might be sent:
1075 	 * - MFW expects the old interface [HSI version = 1]
1076 	 * - MFW responds that a force load request is required
1077 	 */
1078 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1079 		DP_INFO(p_hwfn,
1080 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1081 
1082 		in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1083 		OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1084 		rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1085 					  &out_params);
1086 		if (rc != ECORE_SUCCESS)
1087 			return rc;
1088 	} else if (out_params.load_code ==
1089 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1090 		if (ecore_mcp_can_force_load(in_params.drv_role,
1091 					     out_params.exist_drv_role,
1092 					     p_params->override_force_load)) {
1093 			DP_INFO(p_hwfn,
1094 				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1095 				in_params.drv_role, in_params.fw_ver,
1096 				in_params.drv_ver_0, in_params.drv_ver_1,
1097 				out_params.exist_drv_role,
1098 				out_params.exist_fw_ver,
1099 				out_params.exist_drv_ver_0,
1100 				out_params.exist_drv_ver_1);
1101 
1102 			ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1103 						&mfw_force_cmd);
1104 
1105 			in_params.force_cmd = mfw_force_cmd;
1106 			OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1107 			rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1108 						  &out_params);
1109 			if (rc != ECORE_SUCCESS)
1110 				return rc;
1111 		} else {
1112 			DP_NOTICE(p_hwfn, false,
1113 				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1114 				  in_params.drv_role, in_params.fw_ver,
1115 				  in_params.drv_ver_0, in_params.drv_ver_1,
1116 				  out_params.exist_drv_role,
1117 				  out_params.exist_fw_ver,
1118 				  out_params.exist_drv_ver_0,
1119 				  out_params.exist_drv_ver_1);
1120 
1121 			ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1122 			return ECORE_BUSY;
1123 		}
1124 	}
1125 
1126 	/* Now handle the other types of responses.
1127 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1128 	 * expected here after the additional revised load requests were sent.
1129 	 */
1130 	switch (out_params.load_code) {
1131 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
1132 	case FW_MSG_CODE_DRV_LOAD_PORT:
1133 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1134 		if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1135 		    out_params.drv_exists) {
1136 			/* The role and fw/driver version match, but the PF is
1137 			 * already loaded and has not been unloaded gracefully.
1138 			 * This is unexpected since a quasi-FLR request was
1139 			 * previously sent as part of ecore_hw_prepare().
1140 			 */
1141 			DP_NOTICE(p_hwfn, false,
1142 				  "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1143 			return ECORE_INVAL;
1144 		}
1145 		break;
1146 	default:
1147 		DP_NOTICE(p_hwfn, false,
1148 			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1149 			  out_params.load_code);
1150 		return ECORE_BUSY;
1151 	}
1152 
1153 	p_params->load_code = out_params.load_code;
1154 
1155 	return ECORE_SUCCESS;
1156 }
1157 
ecore_mcp_load_done(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1158 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1159 					 struct ecore_ptt *p_ptt)
1160 {
1161 	u32 resp = 0, param = 0;
1162 	enum _ecore_status_t rc;
1163 
1164 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1165 			   &param);
1166 	if (rc != ECORE_SUCCESS) {
1167 		DP_NOTICE(p_hwfn, false,
1168 			  "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1169 		return rc;
1170 	}
1171 
1172 	/* Check if there is a DID mismatch between nvm-cfg/efuse */
1173 	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1174 		DP_NOTICE(p_hwfn, false,
1175 			  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1176 
1177 	return ECORE_SUCCESS;
1178 }
1179 
ecore_mcp_unload_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1180 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1181 					  struct ecore_ptt *p_ptt)
1182 {
1183 	u32 wol_param, mcp_resp, mcp_param;
1184 
1185 	/* @DPDK */
1186 	wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1187 
1188 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1189 			     &mcp_resp, &mcp_param);
1190 }
1191 
ecore_mcp_unload_done(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1192 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1193 					   struct ecore_ptt *p_ptt)
1194 {
1195 	struct ecore_mcp_mb_params mb_params;
1196 	struct mcp_mac wol_mac;
1197 
1198 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1199 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1200 
1201 	return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1202 }
1203 
ecore_mcp_handle_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1204 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1205 				    struct ecore_ptt *p_ptt)
1206 {
1207 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1208 					PUBLIC_PATH);
1209 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1210 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1211 				     ECORE_PATH_ID(p_hwfn));
1212 	u32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
1213 	int i;
1214 
1215 	OSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
1216 
1217 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1218 		   "Reading Disabled VF information from [offset %08x],"
1219 		   " path_addr %08x\n",
1220 		   mfw_path_offsize, path_addr);
1221 
1222 	for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) {
1223 		disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1224 					   path_addr +
1225 					   OFFSETOF(struct public_path,
1226 						    mcp_vf_disabled) +
1227 					   sizeof(u32) * i);
1228 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1229 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1230 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1231 	}
1232 
1233 	if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1234 		OSAL_VF_FLR_UPDATE(p_hwfn);
1235 }
1236 
ecore_mcp_ack_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * vfs_to_ack)1237 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1238 					  struct ecore_ptt *p_ptt,
1239 					  u32 *vfs_to_ack)
1240 {
1241 	struct ecore_mcp_mb_params mb_params;
1242 	enum _ecore_status_t rc;
1243 	u16 i;
1244 
1245 	for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
1246 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1247 			   "Acking VFs [%08x,...,%08x] - %08x\n",
1248 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1249 
1250 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1251 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1252 	mb_params.p_data_src = vfs_to_ack;
1253 	mb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES;
1254 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1255 				     &mb_params);
1256 	if (rc != ECORE_SUCCESS) {
1257 		DP_NOTICE(p_hwfn, false,
1258 			  "Failed to pass ACK for VF flr to MFW\n");
1259 		return ECORE_TIMEOUT;
1260 	}
1261 
1262 	return rc;
1263 }
1264 
ecore_mcp_handle_transceiver_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1265 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1266 						struct ecore_ptt *p_ptt)
1267 {
1268 	u32 transceiver_state;
1269 
1270 	transceiver_state = ecore_rd(p_hwfn, p_ptt,
1271 				     p_hwfn->mcp_info->port_addr +
1272 				     OFFSETOF(struct public_port,
1273 					      transceiver_data));
1274 
1275 	DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1276 		   "Received transceiver state update [0x%08x] from mfw"
1277 		   " [Addr 0x%x]\n",
1278 		   transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1279 					    OFFSETOF(struct public_port,
1280 						     transceiver_data)));
1281 
1282 	transceiver_state = GET_MFW_FIELD(transceiver_state,
1283 					  ETH_TRANSCEIVER_STATE);
1284 
1285 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1286 		DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1287 	else
1288 		DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1289 
1290 	OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1291 }
1292 
ecore_mcp_read_eee_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_link_state * p_link)1293 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1294 				      struct ecore_ptt *p_ptt,
1295 				      struct ecore_mcp_link_state *p_link)
1296 {
1297 	u32 eee_status, val;
1298 
1299 	p_link->eee_adv_caps = 0;
1300 	p_link->eee_lp_adv_caps = 0;
1301 	eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1302 				     OFFSETOF(struct public_port, eee_status));
1303 	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1304 	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1305 	if (val & EEE_1G_ADV)
1306 		p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1307 	if (val & EEE_10G_ADV)
1308 		p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1309 	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1310 	if (val & EEE_1G_ADV)
1311 		p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1312 	if (val & EEE_10G_ADV)
1313 		p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1314 }
1315 
ecore_mcp_get_shmem_func(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct public_func * p_data,int pfid)1316 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1317 				    struct ecore_ptt *p_ptt,
1318 				    struct public_func *p_data,
1319 				    int pfid)
1320 {
1321 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1322 					PUBLIC_FUNC);
1323 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1324 	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1325 	u32 i, size;
1326 
1327 	OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1328 
1329 	size = OSAL_MIN_T(u32, sizeof(*p_data),
1330 			  SECTION_SIZE(mfw_path_offsize));
1331 	for (i = 0; i < size / sizeof(u32); i++)
1332 		((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1333 					      func_addr + (i << 2));
1334 
1335 	return size;
1336 }
1337 
ecore_mcp_handle_link_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_reset)1338 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1339 					 struct ecore_ptt *p_ptt,
1340 					 bool b_reset)
1341 {
1342 	struct ecore_mcp_link_state *p_link;
1343 	u8 max_bw, min_bw;
1344 	u32 status = 0;
1345 
1346 	/* Prevent SW/attentions from doing this at the same time */
1347 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1348 
1349 	p_link = &p_hwfn->mcp_info->link_output;
1350 	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1351 	if (!b_reset) {
1352 		status = ecore_rd(p_hwfn, p_ptt,
1353 				  p_hwfn->mcp_info->port_addr +
1354 				  OFFSETOF(struct public_port, link_status));
1355 		DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1356 			   "Received link update [0x%08x] from mfw"
1357 			   " [Addr 0x%x]\n",
1358 			   status, (u32)(p_hwfn->mcp_info->port_addr +
1359 					  OFFSETOF(struct public_port,
1360 						   link_status)));
1361 	} else {
1362 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1363 			   "Resetting link indications\n");
1364 		goto out;
1365 	}
1366 
1367 	if (p_hwfn->b_drv_link_init) {
1368 		/* Link indication with modern MFW arrives as per-PF
1369 		 * indication.
1370 		 */
1371 		if (p_hwfn->mcp_info->capabilities &
1372 		    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1373 			struct public_func shmem_info;
1374 
1375 			ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1376 						 MCP_PF_ID(p_hwfn));
1377 			p_link->link_up = !!(shmem_info.status &
1378 					     FUNC_STATUS_VIRTUAL_LINK_UP);
1379 		} else {
1380 			p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1381 		}
1382 	} else {
1383 		p_link->link_up = false;
1384 	}
1385 
1386 	p_link->full_duplex = true;
1387 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1388 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1389 		p_link->speed = 100000;
1390 		break;
1391 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1392 		p_link->speed = 50000;
1393 		break;
1394 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1395 		p_link->speed = 40000;
1396 		break;
1397 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1398 		p_link->speed = 25000;
1399 		break;
1400 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1401 		p_link->speed = 20000;
1402 		break;
1403 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1404 		p_link->speed = 10000;
1405 		break;
1406 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1407 		p_link->full_duplex = false;
1408 		/* Fall-through */
1409 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1410 		p_link->speed = 1000;
1411 		break;
1412 	default:
1413 		p_link->speed = 0;
1414 	}
1415 
1416 	/* We never store total line speed as p_link->speed is
1417 	 * again changes according to bandwidth allocation.
1418 	 */
1419 	if (p_link->link_up && p_link->speed)
1420 		p_link->line_speed = p_link->speed;
1421 	else
1422 		p_link->line_speed = 0;
1423 
1424 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1425 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1426 
1427 	/* Max bandwidth configuration */
1428 	__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1429 					   p_link, max_bw);
1430 
1431 	/* Min bandwidth configuration */
1432 	__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1433 					   p_link, min_bw);
1434 	ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1435 					      p_link->min_pf_rate);
1436 
1437 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1438 	p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1439 	p_link->parallel_detection = !!(status &
1440 					 LINK_STATUS_PARALLEL_DETECTION_USED);
1441 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1442 
1443 	p_link->partner_adv_speed |=
1444 	    (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1445 	    ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1446 	p_link->partner_adv_speed |=
1447 	    (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1448 	    ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1449 	p_link->partner_adv_speed |=
1450 	    (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1451 	    ECORE_LINK_PARTNER_SPEED_10G : 0;
1452 	p_link->partner_adv_speed |=
1453 	    (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1454 	    ECORE_LINK_PARTNER_SPEED_20G : 0;
1455 	p_link->partner_adv_speed |=
1456 	    (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1457 	    ECORE_LINK_PARTNER_SPEED_25G : 0;
1458 	p_link->partner_adv_speed |=
1459 	    (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1460 	    ECORE_LINK_PARTNER_SPEED_40G : 0;
1461 	p_link->partner_adv_speed |=
1462 	    (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1463 	    ECORE_LINK_PARTNER_SPEED_50G : 0;
1464 	p_link->partner_adv_speed |=
1465 	    (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1466 	    ECORE_LINK_PARTNER_SPEED_100G : 0;
1467 
1468 	p_link->partner_tx_flow_ctrl_en =
1469 	    !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1470 	p_link->partner_rx_flow_ctrl_en =
1471 	    !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1472 
1473 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1474 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1475 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1476 		break;
1477 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1478 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1479 		break;
1480 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1481 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1482 		break;
1483 	default:
1484 		p_link->partner_adv_pause = 0;
1485 	}
1486 
1487 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1488 
1489 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1490 		ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1491 
1492 	OSAL_LINK_UPDATE(p_hwfn);
1493 out:
1494 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1495 }
1496 
ecore_mcp_set_link(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_up)1497 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1498 					struct ecore_ptt *p_ptt, bool b_up)
1499 {
1500 	struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1501 	struct ecore_mcp_mb_params mb_params;
1502 	struct eth_phy_cfg phy_cfg;
1503 	enum _ecore_status_t rc = ECORE_SUCCESS;
1504 	u32 cmd;
1505 
1506 #ifndef ASIC_ONLY
1507 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1508 		if (b_up)
1509 			OSAL_LINK_UPDATE(p_hwfn);
1510 		return ECORE_SUCCESS;
1511 	}
1512 #endif
1513 
1514 	/* Set the shmem configuration according to params */
1515 	OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1516 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1517 	if (!params->speed.autoneg)
1518 		phy_cfg.speed = params->speed.forced_speed;
1519 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1520 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1521 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1522 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1523 	phy_cfg.loopback_mode = params->loopback_mode;
1524 
1525 	/* There are MFWs that share this capability regardless of whether
1526 	 * this is feasible or not. And given that at the very least adv_caps
1527 	 * would be set internally by ecore, we want to make sure LFA would
1528 	 * still work.
1529 	 */
1530 	if ((p_hwfn->mcp_info->capabilities &
1531 	     FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1532 	    params->eee.enable) {
1533 		phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1534 		if (params->eee.tx_lpi_enable)
1535 			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1536 		if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1537 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1538 		if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1539 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1540 		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1541 				    EEE_TX_TIMER_USEC_OFFSET) &
1542 					EEE_TX_TIMER_USEC_MASK;
1543 	}
1544 
1545 	p_hwfn->b_drv_link_init = b_up;
1546 
1547 	if (b_up)
1548 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1549 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1550 			   phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1551 			   phy_cfg.loopback_mode);
1552 	else
1553 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1554 
1555 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1556 	mb_params.cmd = cmd;
1557 	mb_params.p_data_src = &phy_cfg;
1558 	mb_params.data_src_size = sizeof(phy_cfg);
1559 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1560 
1561 	/* if mcp fails to respond we must abort */
1562 	if (rc != ECORE_SUCCESS) {
1563 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1564 		return rc;
1565 	}
1566 
1567 	/* Mimic link-change attention, done for several reasons:
1568 	 *  - On reset, there's no guarantee MFW would trigger
1569 	 *    an attention.
1570 	 *  - On initialization, older MFWs might not indicate link change
1571 	 *    during LFA, so we'll never get an UP indication.
1572 	 */
1573 	ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1574 
1575 	return ECORE_SUCCESS;
1576 }
1577 
ecore_get_process_kill_counter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1578 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1579 				   struct ecore_ptt *p_ptt)
1580 {
1581 	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1582 
1583 	/* TODO - Add support for VFs */
1584 	if (IS_VF(p_hwfn->p_dev))
1585 		return ECORE_INVAL;
1586 
1587 	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1588 						 PUBLIC_PATH);
1589 	path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1590 	path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1591 
1592 	proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1593 				 path_addr +
1594 				 OFFSETOF(struct public_path, process_kill)) &
1595 	    PROCESS_KILL_COUNTER_MASK;
1596 
1597 	return proc_kill_cnt;
1598 }
1599 
ecore_mcp_handle_process_kill(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1600 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1601 					  struct ecore_ptt *p_ptt)
1602 {
1603 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1604 	u32 proc_kill_cnt;
1605 
1606 	/* Prevent possible attentions/interrupts during the recovery handling
1607 	 * and till its load phase, during which they will be re-enabled.
1608 	 */
1609 	ecore_int_igu_disable_int(p_hwfn, p_ptt);
1610 
1611 	DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1612 
1613 	/* The following operations should be done once, and thus in CMT mode
1614 	 * are carried out by only the first HW function.
1615 	 */
1616 	if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1617 		return;
1618 
1619 	if (p_dev->recov_in_prog) {
1620 		DP_NOTICE(p_hwfn, false,
1621 			  "Ignoring the indication since a recovery"
1622 			  " process is already in progress\n");
1623 		return;
1624 	}
1625 
1626 	p_dev->recov_in_prog = true;
1627 
1628 	proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1629 	DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1630 
1631 	OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1632 }
1633 
ecore_mcp_send_protocol_stats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum MFW_DRV_MSG_TYPE type)1634 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1635 					  struct ecore_ptt *p_ptt,
1636 					  enum MFW_DRV_MSG_TYPE type)
1637 {
1638 	enum ecore_mcp_protocol_type stats_type;
1639 	union ecore_mcp_protocol_stats stats;
1640 	struct ecore_mcp_mb_params mb_params;
1641 	u32 hsi_param;
1642 	enum _ecore_status_t rc;
1643 
1644 	switch (type) {
1645 	case MFW_DRV_MSG_GET_LAN_STATS:
1646 		stats_type = ECORE_MCP_LAN_STATS;
1647 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1648 		break;
1649 	default:
1650 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1651 			   "Invalid protocol type %d\n", type);
1652 		return;
1653 	}
1654 
1655 	OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1656 
1657 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1658 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1659 	mb_params.param = hsi_param;
1660 	mb_params.p_data_src = &stats;
1661 	mb_params.data_src_size = sizeof(stats);
1662 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1663 	if (rc != ECORE_SUCCESS)
1664 		DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1665 }
1666 
ecore_read_pf_bandwidth(struct ecore_hwfn * p_hwfn,struct public_func * p_shmem_info)1667 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1668 				    struct public_func *p_shmem_info)
1669 {
1670 	struct ecore_mcp_function_info *p_info;
1671 
1672 	p_info = &p_hwfn->mcp_info->func_info;
1673 
1674 	/* TODO - bandwidth min/max should have valid values of 1-100,
1675 	 * as well as some indication that the feature is disabled.
1676 	 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1677 	 * limit and correct value to min `1' and max `100' if limit isn't in
1678 	 * range.
1679 	 */
1680 	p_info->bandwidth_min = (p_shmem_info->config &
1681 				 FUNC_MF_CFG_MIN_BW_MASK) >>
1682 	    FUNC_MF_CFG_MIN_BW_OFFSET;
1683 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1684 		DP_INFO(p_hwfn,
1685 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1686 			p_info->bandwidth_min);
1687 		p_info->bandwidth_min = 1;
1688 	}
1689 
1690 	p_info->bandwidth_max = (p_shmem_info->config &
1691 				 FUNC_MF_CFG_MAX_BW_MASK) >>
1692 	    FUNC_MF_CFG_MAX_BW_OFFSET;
1693 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1694 		DP_INFO(p_hwfn,
1695 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1696 			p_info->bandwidth_max);
1697 		p_info->bandwidth_max = 100;
1698 	}
1699 }
1700 
1701 static void
ecore_mcp_update_bw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1702 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1703 {
1704 	struct ecore_mcp_function_info *p_info;
1705 	struct public_func shmem_info;
1706 	u32 resp = 0, param = 0;
1707 
1708 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1709 
1710 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1711 
1712 	p_info = &p_hwfn->mcp_info->func_info;
1713 
1714 	ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1715 
1716 	ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1717 
1718 	/* Acknowledge the MFW */
1719 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1720 		      &param);
1721 }
1722 
ecore_mcp_update_stag(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1723 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1724 				  struct ecore_ptt *p_ptt)
1725 {
1726 	struct public_func shmem_info;
1727 	u32 resp = 0, param = 0;
1728 
1729 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1730 				 MCP_PF_ID(p_hwfn));
1731 
1732 	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1733 						 FUNC_MF_CFG_OV_STAG_MASK;
1734 	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1735 	if (OSAL_GET_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
1736 		if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
1737 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1738 				 p_hwfn->hw_info.ovlan);
1739 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1740 
1741 			/* Configure DB to add external vlan to EDPM packets */
1742 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1743 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID,
1744 				 p_hwfn->hw_info.ovlan);
1745 		} else {
1746 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1747 			ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1748 
1749 			/* Configure DB to add external vlan to EDPM packets */
1750 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1751 			ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID, 0);
1752 		}
1753 
1754 		ecore_sp_pf_update_stag(p_hwfn);
1755 	}
1756 
1757 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan  = %d hw_mode = 0x%x\n",
1758 		   p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1759 	OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1760 
1761 	/* Acknowledge the MFW */
1762 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1763 		      &resp, &param);
1764 }
1765 
ecore_mcp_handle_fan_failure(struct ecore_hwfn * p_hwfn)1766 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1767 {
1768 	/* A single notification should be sent to upper driver in CMT mode */
1769 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1770 		return;
1771 
1772 	DP_NOTICE(p_hwfn, false,
1773 		  "Fan failure was detected on the network interface card"
1774 		  " and it's going to be shut down.\n");
1775 
1776 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1777 }
1778 
1779 struct ecore_mdump_cmd_params {
1780 	u32 cmd;
1781 	void *p_data_src;
1782 	u8 data_src_size;
1783 	void *p_data_dst;
1784 	u8 data_dst_size;
1785 	u32 mcp_resp;
1786 };
1787 
1788 static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_cmd_params * p_mdump_cmd_params)1789 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1790 		    struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1791 {
1792 	struct ecore_mcp_mb_params mb_params;
1793 	enum _ecore_status_t rc;
1794 
1795 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1796 	mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1797 	mb_params.param = p_mdump_cmd_params->cmd;
1798 	mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1799 	mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1800 	mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1801 	mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1802 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1803 	if (rc != ECORE_SUCCESS)
1804 		return rc;
1805 
1806 	p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1807 
1808 	if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1809 		DP_INFO(p_hwfn,
1810 			"The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1811 			p_mdump_cmd_params->cmd);
1812 		rc = ECORE_NOTIMPL;
1813 	} else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1814 		DP_INFO(p_hwfn,
1815 			"The mdump command is not supported by the MFW\n");
1816 		rc = ECORE_NOTIMPL;
1817 	}
1818 
1819 	return rc;
1820 }
1821 
ecore_mcp_mdump_ack(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1822 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1823 						struct ecore_ptt *p_ptt)
1824 {
1825 	struct ecore_mdump_cmd_params mdump_cmd_params;
1826 
1827 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1828 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1829 
1830 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1831 }
1832 
ecore_mcp_mdump_set_values(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 epoch)1833 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1834 						struct ecore_ptt *p_ptt,
1835 						u32 epoch)
1836 {
1837 	struct ecore_mdump_cmd_params mdump_cmd_params;
1838 
1839 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1840 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1841 	mdump_cmd_params.p_data_src = &epoch;
1842 	mdump_cmd_params.data_src_size = sizeof(epoch);
1843 
1844 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1845 }
1846 
ecore_mcp_mdump_trigger(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1847 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1848 					     struct ecore_ptt *p_ptt)
1849 {
1850 	struct ecore_mdump_cmd_params mdump_cmd_params;
1851 
1852 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1853 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1854 
1855 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1856 }
1857 
1858 static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct mdump_config_stc * p_mdump_config)1859 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1860 			   struct mdump_config_stc *p_mdump_config)
1861 {
1862 	struct ecore_mdump_cmd_params mdump_cmd_params;
1863 	enum _ecore_status_t rc;
1864 
1865 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1866 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1867 	mdump_cmd_params.p_data_dst = p_mdump_config;
1868 	mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1869 
1870 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1871 	if (rc != ECORE_SUCCESS)
1872 		return rc;
1873 
1874 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1875 		DP_INFO(p_hwfn,
1876 			"Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1877 			mdump_cmd_params.mcp_resp);
1878 		rc = ECORE_UNKNOWN_ERROR;
1879 	}
1880 
1881 	return rc;
1882 }
1883 
1884 enum _ecore_status_t
ecore_mcp_mdump_get_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_info * p_mdump_info)1885 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1886 			 struct ecore_mdump_info *p_mdump_info)
1887 {
1888 	u32 addr, global_offsize, global_addr;
1889 	struct mdump_config_stc mdump_config;
1890 	enum _ecore_status_t rc;
1891 
1892 #ifndef ASIC_ONLY
1893 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
1894 		DP_INFO(p_hwfn, "Emulation: Can't get mdump info\n");
1895 		return ECORE_NOTIMPL;
1896 	}
1897 #endif
1898 
1899 	OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1900 
1901 	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1902 				    PUBLIC_GLOBAL);
1903 	global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1904 	global_addr = SECTION_ADDR(global_offsize, 0);
1905 	p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1906 					global_addr +
1907 					OFFSETOF(struct public_global,
1908 						 mdump_reason));
1909 
1910 	if (p_mdump_info->reason) {
1911 		rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1912 		if (rc != ECORE_SUCCESS)
1913 			return rc;
1914 
1915 		p_mdump_info->version = mdump_config.version;
1916 		p_mdump_info->config = mdump_config.config;
1917 		p_mdump_info->epoch = mdump_config.epoc;
1918 		p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1919 		p_mdump_info->valid_logs = mdump_config.valid_logs;
1920 
1921 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1922 			   "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1923 			   p_mdump_info->reason, p_mdump_info->version,
1924 			   p_mdump_info->config, p_mdump_info->epoch,
1925 			   p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1926 	} else {
1927 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1928 			   "MFW mdump info: reason %d\n", p_mdump_info->reason);
1929 	}
1930 
1931 	return ECORE_SUCCESS;
1932 }
1933 
ecore_mcp_mdump_clear_logs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1934 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1935 						struct ecore_ptt *p_ptt)
1936 {
1937 	struct ecore_mdump_cmd_params mdump_cmd_params;
1938 
1939 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1940 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1941 
1942 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1943 }
1944 
1945 enum _ecore_status_t
ecore_mcp_mdump_get_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_retain_data * p_mdump_retain)1946 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1947 			   struct ecore_mdump_retain_data *p_mdump_retain)
1948 {
1949 	struct ecore_mdump_cmd_params mdump_cmd_params;
1950 	struct mdump_retain_data_stc mfw_mdump_retain;
1951 	enum _ecore_status_t rc;
1952 
1953 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1954 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1955 	mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1956 	mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1957 
1958 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1959 	if (rc != ECORE_SUCCESS)
1960 		return rc;
1961 
1962 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1963 		DP_INFO(p_hwfn,
1964 			"Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1965 			mdump_cmd_params.mcp_resp);
1966 		return ECORE_UNKNOWN_ERROR;
1967 	}
1968 
1969 	p_mdump_retain->valid = mfw_mdump_retain.valid;
1970 	p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1971 	p_mdump_retain->pf = mfw_mdump_retain.pf;
1972 	p_mdump_retain->status = mfw_mdump_retain.status;
1973 
1974 	return ECORE_SUCCESS;
1975 }
1976 
ecore_mcp_mdump_clr_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1977 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1978 						struct ecore_ptt *p_ptt)
1979 {
1980 	struct ecore_mdump_cmd_params mdump_cmd_params;
1981 
1982 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1983 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1984 
1985 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1986 }
1987 
ecore_mcp_handle_critical_error(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1988 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1989 					    struct ecore_ptt *p_ptt)
1990 {
1991 	struct ecore_mdump_retain_data mdump_retain;
1992 	enum _ecore_status_t rc;
1993 
1994 	/* In CMT mode - no need for more than a single acknowledgment to the
1995 	 * MFW, and no more than a single notification to the upper driver.
1996 	 */
1997 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1998 		return;
1999 
2000 	rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
2001 	if (rc == ECORE_SUCCESS && mdump_retain.valid) {
2002 		DP_NOTICE(p_hwfn, false,
2003 			  "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
2004 			  mdump_retain.epoch, mdump_retain.pf,
2005 			  mdump_retain.status);
2006 	} else {
2007 		DP_NOTICE(p_hwfn, false,
2008 			  "The MFW notified that a critical error occurred in the device\n");
2009 	}
2010 
2011 	if (p_hwfn->p_dev->allow_mdump) {
2012 		DP_NOTICE(p_hwfn, false,
2013 			  "Not acknowledging the notification to allow the MFW crash dump\n");
2014 		return;
2015 	}
2016 
2017 	DP_NOTICE(p_hwfn, false,
2018 		  "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
2019 	ecore_mcp_mdump_ack(p_hwfn, p_ptt);
2020 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
2021 }
2022 
2023 void
ecore_mcp_read_ufp_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2024 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2025 {
2026 	struct public_func shmem_info;
2027 	u32 port_cfg, val;
2028 
2029 	if (!OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
2030 		return;
2031 
2032 	OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
2033 	port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2034 			    OFFSETOF(struct public_port, oem_cfg_port));
2035 	val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
2036 	if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
2037 		DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type  %d\n",
2038 			  val);
2039 
2040 	val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
2041 	if (val == OEM_CFG_SCHED_TYPE_ETS)
2042 		p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
2043 	else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
2044 		p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
2045 	else
2046 		DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
2047 			  val);
2048 
2049 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2050 				 MCP_PF_ID(p_hwfn));
2051 	val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
2052 	p_hwfn->ufp_info.tc = (u8)val;
2053 	val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
2054 			    OEM_CFG_FUNC_HOST_PRI_CTRL);
2055 	if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
2056 		p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
2057 	else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
2058 		p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
2059 	else
2060 		DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
2061 			  val);
2062 
2063 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2064 		   "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
2065 		   p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
2066 		   p_hwfn->ufp_info.pri_type);
2067 }
2068 
2069 static enum _ecore_status_t
ecore_mcp_handle_ufp_event(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2070 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2071 {
2072 	ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
2073 
2074 	if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
2075 		p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2076 		p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
2077 
2078 		ecore_qm_reconf(p_hwfn, p_ptt);
2079 	} else {
2080 		/* Merge UFP TC with the dcbx TC data */
2081 		ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2082 					    ECORE_DCBX_OPERATIONAL_MIB);
2083 	}
2084 
2085 	/* update storm FW with negotiation results */
2086 	ecore_sp_pf_update_ufp(p_hwfn);
2087 
2088 	/* update stag pcp value */
2089 	ecore_sp_pf_update_stag(p_hwfn);
2090 
2091 	return ECORE_SUCCESS;
2092 }
2093 
ecore_mcp_handle_events(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2094 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
2095 					     struct ecore_ptt *p_ptt)
2096 {
2097 	struct ecore_mcp_info *info = p_hwfn->mcp_info;
2098 	enum _ecore_status_t rc = ECORE_SUCCESS;
2099 	bool found = false;
2100 	u16 i;
2101 
2102 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
2103 
2104 	/* Read Messages from MFW */
2105 	ecore_mcp_read_mb(p_hwfn, p_ptt);
2106 
2107 	/* Compare current messages to old ones */
2108 	for (i = 0; i < info->mfw_mb_length; i++) {
2109 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2110 			continue;
2111 
2112 		found = true;
2113 
2114 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2115 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2116 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2117 
2118 		switch (i) {
2119 		case MFW_DRV_MSG_LINK_CHANGE:
2120 			ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2121 			break;
2122 		case MFW_DRV_MSG_VF_DISABLED:
2123 			ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2124 			break;
2125 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2126 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2127 						    ECORE_DCBX_REMOTE_LLDP_MIB);
2128 			break;
2129 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2130 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2131 						    ECORE_DCBX_REMOTE_MIB);
2132 			break;
2133 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2134 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2135 						    ECORE_DCBX_OPERATIONAL_MIB);
2136 			/* clear the user-config cache */
2137 			OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2138 				    sizeof(struct ecore_dcbx_set));
2139 			break;
2140 		case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2141 			ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2142 			break;
2143 		case MFW_DRV_MSG_OEM_CFG_UPDATE:
2144 			ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2145 			break;
2146 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2147 			ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2148 			break;
2149 		case MFW_DRV_MSG_ERROR_RECOVERY:
2150 			ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2151 			break;
2152 		case MFW_DRV_MSG_GET_LAN_STATS:
2153 		case MFW_DRV_MSG_GET_FCOE_STATS:
2154 		case MFW_DRV_MSG_GET_ISCSI_STATS:
2155 		case MFW_DRV_MSG_GET_RDMA_STATS:
2156 			ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2157 			break;
2158 		case MFW_DRV_MSG_BW_UPDATE:
2159 			ecore_mcp_update_bw(p_hwfn, p_ptt);
2160 			break;
2161 		case MFW_DRV_MSG_S_TAG_UPDATE:
2162 			ecore_mcp_update_stag(p_hwfn, p_ptt);
2163 			break;
2164 		case MFW_DRV_MSG_FAILURE_DETECTED:
2165 			ecore_mcp_handle_fan_failure(p_hwfn);
2166 			break;
2167 		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2168 			ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2169 			break;
2170 		default:
2171 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2172 			rc = ECORE_INVAL;
2173 		}
2174 	}
2175 
2176 	/* ACK everything */
2177 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2178 		OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2179 
2180 		/* MFW expect answer in BE, so we force write in that format */
2181 		ecore_wr(p_hwfn, p_ptt,
2182 			 info->mfw_mb_addr + sizeof(u32) +
2183 			 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2184 			 sizeof(u32) + i * sizeof(u32), val);
2185 	}
2186 
2187 	if (!found) {
2188 		DP_NOTICE(p_hwfn, false,
2189 			  "Received an MFW message indication but no"
2190 			  " new message!\n");
2191 		rc = ECORE_INVAL;
2192 	}
2193 
2194 	/* Copy the new mfw messages into the shadow */
2195 	OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2196 
2197 	return rc;
2198 }
2199 
ecore_mcp_get_mfw_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mfw_ver,u32 * p_running_bundle_id)2200 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2201 					   struct ecore_ptt *p_ptt,
2202 					   u32 *p_mfw_ver,
2203 					   u32 *p_running_bundle_id)
2204 {
2205 	u32 global_offsize;
2206 
2207 #ifndef ASIC_ONLY
2208 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
2209 		DP_INFO(p_hwfn, "Emulation: Can't get MFW version\n");
2210 		return ECORE_NOTIMPL;
2211 	}
2212 #endif
2213 
2214 	if (IS_VF(p_hwfn->p_dev)) {
2215 		if (p_hwfn->vf_iov_info) {
2216 			struct pfvf_acquire_resp_tlv *p_resp;
2217 
2218 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2219 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2220 			return ECORE_SUCCESS;
2221 		} else {
2222 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2223 				   "VF requested MFW version prior to ACQUIRE\n");
2224 			return ECORE_INVAL;
2225 		}
2226 	}
2227 
2228 	global_offsize = ecore_rd(p_hwfn, p_ptt,
2229 				  SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2230 						       public_base,
2231 						       PUBLIC_GLOBAL));
2232 	*p_mfw_ver =
2233 	    ecore_rd(p_hwfn, p_ptt,
2234 		     SECTION_ADDR(global_offsize,
2235 				  0) + OFFSETOF(struct public_global, mfw_ver));
2236 
2237 	if (p_running_bundle_id != OSAL_NULL) {
2238 		*p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2239 						SECTION_ADDR(global_offsize,
2240 							     0) +
2241 						OFFSETOF(struct public_global,
2242 							 running_bundle_id));
2243 	}
2244 
2245 	return ECORE_SUCCESS;
2246 }
2247 
ecore_mcp_get_mbi_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mbi_ver)2248 int ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2249 			  struct ecore_ptt *p_ptt, u32 *p_mbi_ver)
2250 {
2251 	u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2252 
2253 #ifndef ASIC_ONLY
2254 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
2255 		DP_INFO(p_hwfn, "Emulation: Can't get MBI version\n");
2256 		return -EOPNOTSUPP;
2257 	}
2258 #endif
2259 
2260 	if (IS_VF(p_hwfn->p_dev))
2261 		return -EINVAL;
2262 
2263 	/* Read the address of the nvm_cfg */
2264 	nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2265 	if (!nvm_cfg_addr) {
2266 		DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2267 		return -EINVAL;
2268 	}
2269 
2270 	/* Read the offset of nvm_cfg1 */
2271 	nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2272 
2273 	mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2274 	    offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob,
2275 						       mbi_version);
2276 	*p_mbi_ver =
2277 	    ecore_rd(p_hwfn, p_ptt,
2278 		     mbi_ver_addr) & (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2279 				      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2280 				      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2281 
2282 	return 0;
2283 }
2284 
ecore_mcp_get_media_type(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_media_type)2285 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2286 					      struct ecore_ptt *p_ptt,
2287 					      u32 *p_media_type)
2288 {
2289 	*p_media_type = MEDIA_UNSPECIFIED;
2290 
2291 	/* TODO - Add support for VFs */
2292 	if (IS_VF(p_hwfn->p_dev))
2293 		return ECORE_INVAL;
2294 
2295 	if (!ecore_mcp_is_init(p_hwfn)) {
2296 #ifndef ASIC_ONLY
2297 		if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2298 			DP_INFO(p_hwfn, "Emulation: Can't get media type\n");
2299 			return ECORE_NOTIMPL;
2300 		}
2301 #endif
2302 		DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2303 		return ECORE_BUSY;
2304 	}
2305 
2306 	if (!p_ptt)
2307 		return ECORE_INVAL;
2308 
2309 	*p_media_type = ecore_rd(p_hwfn, p_ptt,
2310 				 p_hwfn->mcp_info->port_addr +
2311 				 OFFSETOF(struct public_port, media_type));
2312 
2313 	return ECORE_SUCCESS;
2314 }
2315 
ecore_mcp_get_transceiver_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_transceiver_state,u32 * p_transceiver_type)2316 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2317 						    struct ecore_ptt *p_ptt,
2318 						    u32 *p_transceiver_state,
2319 						    u32 *p_transceiver_type)
2320 {
2321 	u32 transceiver_info;
2322 	enum _ecore_status_t rc = ECORE_SUCCESS;
2323 
2324 	/* TODO - Add support for VFs */
2325 	if (IS_VF(p_hwfn->p_dev))
2326 		return ECORE_INVAL;
2327 
2328 	if (!ecore_mcp_is_init(p_hwfn)) {
2329 		DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2330 		return ECORE_BUSY;
2331 	}
2332 
2333 	*p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2334 	*p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2335 
2336 	transceiver_info = ecore_rd(p_hwfn, p_ptt,
2337 				    p_hwfn->mcp_info->port_addr +
2338 				    offsetof(struct public_port,
2339 				    transceiver_data));
2340 
2341 	*p_transceiver_state = GET_MFW_FIELD(transceiver_info,
2342 					     ETH_TRANSCEIVER_STATE);
2343 
2344 	if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
2345 		*p_transceiver_type = GET_MFW_FIELD(transceiver_info,
2346 					    ETH_TRANSCEIVER_TYPE);
2347 	} else {
2348 		*p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2349 	}
2350 
2351 	return rc;
2352 }
2353 
is_transceiver_ready(u32 transceiver_state,u32 transceiver_type)2354 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2355 {
2356 	if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2357 	    ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2358 	    (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2359 		return 1;
2360 
2361 	return 0;
2362 }
2363 
ecore_mcp_trans_speed_mask(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_speed_mask)2364 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2365 						struct ecore_ptt *p_ptt,
2366 						u32 *p_speed_mask)
2367 {
2368 	u32 transceiver_type = ETH_TRANSCEIVER_TYPE_NONE, transceiver_state;
2369 
2370 	ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2371 				       &transceiver_type);
2372 
2373 
2374 	if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
2375 		return ECORE_INVAL;
2376 
2377 	switch (transceiver_type) {
2378 	case ETH_TRANSCEIVER_TYPE_1G_LX:
2379 	case ETH_TRANSCEIVER_TYPE_1G_SX:
2380 	case ETH_TRANSCEIVER_TYPE_1G_PCC:
2381 	case ETH_TRANSCEIVER_TYPE_1G_ACC:
2382 	case ETH_TRANSCEIVER_TYPE_1000BASET:
2383 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2384 		break;
2385 
2386 	case ETH_TRANSCEIVER_TYPE_10G_SR:
2387 	case ETH_TRANSCEIVER_TYPE_10G_LR:
2388 	case ETH_TRANSCEIVER_TYPE_10G_LRM:
2389 	case ETH_TRANSCEIVER_TYPE_10G_ER:
2390 	case ETH_TRANSCEIVER_TYPE_10G_PCC:
2391 	case ETH_TRANSCEIVER_TYPE_10G_ACC:
2392 	case ETH_TRANSCEIVER_TYPE_4x10G:
2393 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2394 		break;
2395 
2396 	case ETH_TRANSCEIVER_TYPE_40G_LR4:
2397 	case ETH_TRANSCEIVER_TYPE_40G_SR4:
2398 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2399 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2400 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2401 		 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2402 		break;
2403 
2404 	case ETH_TRANSCEIVER_TYPE_100G_AOC:
2405 	case ETH_TRANSCEIVER_TYPE_100G_SR4:
2406 	case ETH_TRANSCEIVER_TYPE_100G_LR4:
2407 	case ETH_TRANSCEIVER_TYPE_100G_ER4:
2408 	case ETH_TRANSCEIVER_TYPE_100G_ACC:
2409 		*p_speed_mask =
2410 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2411 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2412 		break;
2413 
2414 	case ETH_TRANSCEIVER_TYPE_25G_SR:
2415 	case ETH_TRANSCEIVER_TYPE_25G_LR:
2416 	case ETH_TRANSCEIVER_TYPE_25G_AOC:
2417 	case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2418 	case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2419 	case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2420 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2421 		break;
2422 
2423 	case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2424 	case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2425 	case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2426 	case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2427 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2428 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2429 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2430 		break;
2431 
2432 	case ETH_TRANSCEIVER_TYPE_40G_CR4:
2433 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2434 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2435 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2436 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2437 		break;
2438 
2439 	case ETH_TRANSCEIVER_TYPE_100G_CR4:
2440 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2441 		*p_speed_mask =
2442 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2443 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2444 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2445 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2446 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2447 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2448 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2449 		break;
2450 
2451 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2452 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2453 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2454 		*p_speed_mask =
2455 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2456 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2457 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2458 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2459 		break;
2460 
2461 	case ETH_TRANSCEIVER_TYPE_XLPPI:
2462 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2463 		break;
2464 
2465 	case ETH_TRANSCEIVER_TYPE_10G_BASET:
2466 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2467 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2468 		break;
2469 
2470 	default:
2471 		DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2472 			transceiver_type);
2473 		*p_speed_mask = 0xff;
2474 		break;
2475 	}
2476 
2477 	return ECORE_SUCCESS;
2478 }
2479 
ecore_mcp_get_board_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_board_config)2480 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2481 						struct ecore_ptt *p_ptt,
2482 						u32 *p_board_config)
2483 {
2484 	u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2485 	enum _ecore_status_t rc = ECORE_SUCCESS;
2486 
2487 	/* TODO - Add support for VFs */
2488 	if (IS_VF(p_hwfn->p_dev))
2489 		return ECORE_INVAL;
2490 
2491 	if (!ecore_mcp_is_init(p_hwfn)) {
2492 		DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2493 		return ECORE_BUSY;
2494 	}
2495 	if (!p_ptt) {
2496 		*p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2497 		rc = ECORE_INVAL;
2498 	} else {
2499 		nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2500 					MISC_REG_GEN_PURP_CR0);
2501 		nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2502 					   nvm_cfg_addr + 4);
2503 		port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2504 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2505 		*p_board_config  =  ecore_rd(p_hwfn, p_ptt,
2506 					     port_cfg_addr +
2507 					     offsetof(struct nvm_cfg1_port,
2508 					     board_cfg));
2509 	}
2510 
2511 	return rc;
2512 }
2513 
2514 /* @DPDK */
2515 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2516 static void
ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn * p_hwfn,enum ecore_pci_personality * p_proto)2517 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2518 				 enum ecore_pci_personality *p_proto)
2519 {
2520 	*p_proto = ECORE_PCI_ETH;
2521 
2522 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2523 		   "According to Legacy capabilities, L2 personality is %08x\n",
2524 		   (u32)*p_proto);
2525 }
2526 
2527 /* @DPDK */
2528 static enum _ecore_status_t
ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2529 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2530 			      struct ecore_ptt *p_ptt,
2531 			      enum ecore_pci_personality *p_proto)
2532 {
2533 	u32 resp = 0, param = 0;
2534 	enum _ecore_status_t rc;
2535 
2536 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2537 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2538 		   (u32)*p_proto, resp, param);
2539 	return ECORE_SUCCESS;
2540 }
2541 
2542 static enum _ecore_status_t
ecore_mcp_get_shmem_proto(struct ecore_hwfn * p_hwfn,struct public_func * p_info,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2543 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2544 			  struct public_func *p_info,
2545 			  struct ecore_ptt *p_ptt,
2546 			  enum ecore_pci_personality *p_proto)
2547 {
2548 	enum _ecore_status_t rc = ECORE_SUCCESS;
2549 
2550 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2551 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2552 		if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2553 		    ECORE_SUCCESS)
2554 			ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2555 		break;
2556 	default:
2557 		rc = ECORE_INVAL;
2558 	}
2559 
2560 	return rc;
2561 }
2562 
ecore_mcp_fill_shmem_func_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2563 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2564 						    struct ecore_ptt *p_ptt)
2565 {
2566 	struct ecore_mcp_function_info *info;
2567 	struct public_func shmem_info;
2568 
2569 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2570 	info = &p_hwfn->mcp_info->func_info;
2571 
2572 	info->pause_on_host = (shmem_info.config &
2573 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2574 
2575 	if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2576 				      &info->protocol)) {
2577 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2578 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2579 		return ECORE_INVAL;
2580 	}
2581 
2582 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2583 
2584 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2585 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2586 		info->mac[1] = (u8)(shmem_info.mac_upper);
2587 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2588 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2589 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2590 		info->mac[5] = (u8)(shmem_info.mac_lower);
2591 	} else {
2592 		/* TODO - are there protocols for which there's no MAC? */
2593 		DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2594 	}
2595 
2596 	/* TODO - are these calculations true for BE machine? */
2597 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2598 			 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2599 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2600 			 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2601 
2602 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2603 
2604 	info->mtu = (u16)shmem_info.mtu_size;
2605 
2606 	if (info->mtu == 0)
2607 		info->mtu = 1500;
2608 
2609 	info->mtu = (u16)shmem_info.mtu_size;
2610 
2611 	DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2612 		   "Read configuration from shmem: pause_on_host %02x"
2613 		    " protocol %02x BW [%02x - %02x]"
2614 		    " MAC " RTE_ETHER_ADDR_PRT_FMT " wwn port %lx"
2615 		    " node %lx ovlan %04x\n",
2616 		   info->pause_on_host, info->protocol,
2617 		   info->bandwidth_min, info->bandwidth_max,
2618 		   info->mac[0], info->mac[1], info->mac[2],
2619 		   info->mac[3], info->mac[4], info->mac[5],
2620 		   (unsigned long)info->wwn_port,
2621 		   (unsigned long)info->wwn_node, info->ovlan);
2622 
2623 	return ECORE_SUCCESS;
2624 }
2625 
2626 struct ecore_mcp_link_params
ecore_mcp_get_link_params(struct ecore_hwfn * p_hwfn)2627 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2628 {
2629 	if (!p_hwfn || !p_hwfn->mcp_info)
2630 		return OSAL_NULL;
2631 	return &p_hwfn->mcp_info->link_input;
2632 }
2633 
2634 struct ecore_mcp_link_state
ecore_mcp_get_link_state(struct ecore_hwfn * p_hwfn)2635 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2636 {
2637 	if (!p_hwfn || !p_hwfn->mcp_info)
2638 		return OSAL_NULL;
2639 
2640 #ifndef ASIC_ONLY
2641 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2642 		DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2643 		p_hwfn->mcp_info->link_output.link_up = true;
2644 	}
2645 #endif
2646 
2647 	return &p_hwfn->mcp_info->link_output;
2648 }
2649 
2650 struct ecore_mcp_link_capabilities
ecore_mcp_get_link_capabilities(struct ecore_hwfn * p_hwfn)2651 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2652 {
2653 	if (!p_hwfn || !p_hwfn->mcp_info)
2654 		return OSAL_NULL;
2655 	return &p_hwfn->mcp_info->link_capabilities;
2656 }
2657 
ecore_mcp_drain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2658 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2659 				     struct ecore_ptt *p_ptt)
2660 {
2661 	u32 resp = 0, param = 0;
2662 	enum _ecore_status_t rc;
2663 
2664 	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2665 			   DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2666 
2667 	/* Wait for the drain to complete before returning */
2668 	OSAL_MSLEEP(1020);
2669 
2670 	return rc;
2671 }
2672 
2673 const struct ecore_mcp_function_info
ecore_mcp_get_function_info(struct ecore_hwfn * p_hwfn)2674 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2675 {
2676 	if (!p_hwfn || !p_hwfn->mcp_info)
2677 		return OSAL_NULL;
2678 	return &p_hwfn->mcp_info->func_info;
2679 }
2680 
ecore_mcp_get_personality_cnt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 personalities)2681 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2682 				  struct ecore_ptt *p_ptt, u32 personalities)
2683 {
2684 	enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2685 	struct public_func shmem_info;
2686 	int i, count = 0, num_pfs;
2687 
2688 	num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2689 
2690 	for (i = 0; i < num_pfs; i++) {
2691 		ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2692 					 MCP_PF_ID_BY_REL(p_hwfn, i));
2693 		if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2694 			continue;
2695 
2696 		if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2697 					      &protocol) !=
2698 		    ECORE_SUCCESS)
2699 			continue;
2700 
2701 		if ((1 << ((u32)protocol)) & personalities)
2702 			count++;
2703 	}
2704 
2705 	return count;
2706 }
2707 
ecore_mcp_get_flash_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_flash_size)2708 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2709 					      struct ecore_ptt *p_ptt,
2710 					      u32 *p_flash_size)
2711 {
2712 	u32 flash_size;
2713 
2714 #ifndef ASIC_ONLY
2715 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
2716 		DP_INFO(p_hwfn, "Emulation: Can't get flash size\n");
2717 		return ECORE_NOTIMPL;
2718 	}
2719 #endif
2720 
2721 	if (IS_VF(p_hwfn->p_dev))
2722 		return ECORE_INVAL;
2723 
2724 	flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2725 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2726 		     MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2727 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2728 
2729 	*p_flash_size = flash_size;
2730 
2731 	return ECORE_SUCCESS;
2732 }
2733 
ecore_start_recovery_process(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2734 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2735 						  struct ecore_ptt *p_ptt)
2736 {
2737 	struct ecore_dev *p_dev = p_hwfn->p_dev;
2738 
2739 	if (p_dev->recov_in_prog) {
2740 		DP_NOTICE(p_hwfn, false,
2741 			  "Avoid triggering a recovery since such a process"
2742 			  " is already in progress\n");
2743 		return ECORE_AGAIN;
2744 	}
2745 
2746 	DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2747 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2748 
2749 	return ECORE_SUCCESS;
2750 }
2751 
2752 static enum _ecore_status_t
ecore_mcp_config_vf_msix_bb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2753 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2754 			    struct ecore_ptt *p_ptt,
2755 			    u8 vf_id, u8 num)
2756 {
2757 	u32 resp = 0, param = 0, rc_param = 0;
2758 	enum _ecore_status_t rc;
2759 
2760 /* Only Leader can configure MSIX, and need to take CMT into account */
2761 
2762 	if (!IS_LEAD_HWFN(p_hwfn))
2763 		return ECORE_SUCCESS;
2764 	num *= p_hwfn->p_dev->num_hwfns;
2765 
2766 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2767 	    DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2768 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2769 	    DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2770 
2771 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2772 			   &resp, &rc_param);
2773 
2774 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2775 		DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2776 			  vf_id);
2777 		rc = ECORE_INVAL;
2778 	} else {
2779 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2780 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2781 			    num, vf_id);
2782 	}
2783 
2784 	return rc;
2785 }
2786 
2787 static enum _ecore_status_t
ecore_mcp_config_vf_msix_ah(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 num)2788 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2789 			    struct ecore_ptt *p_ptt,
2790 			    u8 num)
2791 {
2792 	u32 resp = 0, param = num, rc_param = 0;
2793 	enum _ecore_status_t rc;
2794 
2795 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2796 			   param, &resp, &rc_param);
2797 
2798 	if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2799 		DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2800 		rc = ECORE_INVAL;
2801 	} else {
2802 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2803 			   "Requested 0x%02x MSI-x interrupts for VFs\n",
2804 			   num);
2805 	}
2806 
2807 	return rc;
2808 }
2809 
ecore_mcp_config_vf_msix(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2810 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2811 					      struct ecore_ptt *p_ptt,
2812 					      u8 vf_id, u8 num)
2813 {
2814 #ifndef ASIC_ONLY
2815 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
2816 		DP_INFO(p_hwfn,
2817 			"Emulation: Avoid sending the %s mailbox command\n",
2818 			ECORE_IS_BB(p_hwfn->p_dev) ? "CFG_VF_MSIX" :
2819 						     "CFG_PF_VFS_MSIX");
2820 		return ECORE_SUCCESS;
2821 	}
2822 #endif
2823 
2824 	if (ECORE_IS_BB(p_hwfn->p_dev))
2825 		return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2826 	else
2827 		return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2828 }
2829 
2830 enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_drv_version * p_ver)2831 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2832 			   struct ecore_mcp_drv_version *p_ver)
2833 {
2834 	struct ecore_mcp_mb_params mb_params;
2835 	struct drv_version_stc drv_version;
2836 	u32 num_words, i;
2837 	void *p_name;
2838 	OSAL_BE32 val;
2839 	enum _ecore_status_t rc;
2840 
2841 #ifndef ASIC_ONLY
2842 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2843 		return ECORE_SUCCESS;
2844 #endif
2845 
2846 	OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2847 	drv_version.version = p_ver->version;
2848 	num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2849 	for (i = 0; i < num_words; i++) {
2850 		/* The driver name is expected to be in a big-endian format */
2851 		p_name = &p_ver->name[i * sizeof(u32)];
2852 		val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2853 		*(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2854 	}
2855 
2856 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2857 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2858 	mb_params.p_data_src = &drv_version;
2859 	mb_params.data_src_size = sizeof(drv_version);
2860 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2861 	if (rc != ECORE_SUCCESS)
2862 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2863 
2864 	return rc;
2865 }
2866 
2867 /* A maximal 100 msec waiting time for the MCP to halt */
2868 #define ECORE_MCP_HALT_SLEEP_MS		10
2869 #define ECORE_MCP_HALT_MAX_RETRIES	10
2870 
ecore_mcp_halt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2871 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2872 				    struct ecore_ptt *p_ptt)
2873 {
2874 	u32 resp = 0, param = 0, cpu_state, cnt = 0;
2875 	enum _ecore_status_t rc;
2876 
2877 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2878 			   &param);
2879 	if (rc != ECORE_SUCCESS) {
2880 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2881 		return rc;
2882 	}
2883 
2884 	do {
2885 		OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2886 		cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2887 		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2888 			break;
2889 	} while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2890 
2891 	if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2892 		DP_NOTICE(p_hwfn, false,
2893 			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2894 			  ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2895 		return ECORE_BUSY;
2896 	}
2897 
2898 	ecore_mcp_cmd_set_blocking(p_hwfn, true);
2899 
2900 	return ECORE_SUCCESS;
2901 }
2902 
2903 #define ECORE_MCP_RESUME_SLEEP_MS	10
2904 
ecore_mcp_resume(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2905 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2906 				      struct ecore_ptt *p_ptt)
2907 {
2908 	u32 cpu_mode, cpu_state;
2909 
2910 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2911 
2912 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2913 	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2914 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2915 
2916 	OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2917 	cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2918 
2919 	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2920 		DP_NOTICE(p_hwfn, false,
2921 			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2922 			  cpu_mode, cpu_state);
2923 		return ECORE_BUSY;
2924 	}
2925 
2926 	ecore_mcp_cmd_set_blocking(p_hwfn, false);
2927 
2928 	return ECORE_SUCCESS;
2929 }
2930 
2931 enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_client client)2932 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2933 				   struct ecore_ptt *p_ptt,
2934 				   enum ecore_ov_client client)
2935 {
2936 	u32 resp = 0, param = 0;
2937 	u32 drv_mb_param;
2938 	enum _ecore_status_t rc;
2939 
2940 	switch (client) {
2941 	case ECORE_OV_CLIENT_DRV:
2942 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2943 		break;
2944 	case ECORE_OV_CLIENT_USER:
2945 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2946 		break;
2947 	case ECORE_OV_CLIENT_VENDOR_SPEC:
2948 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2949 		break;
2950 	default:
2951 		DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2952 		return ECORE_INVAL;
2953 	}
2954 
2955 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2956 			   drv_mb_param, &resp, &param);
2957 	if (rc != ECORE_SUCCESS)
2958 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2959 
2960 	return rc;
2961 }
2962 
2963 enum _ecore_status_t
ecore_mcp_ov_update_driver_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_driver_state drv_state)2964 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2965 				 struct ecore_ptt *p_ptt,
2966 				 enum ecore_ov_driver_state drv_state)
2967 {
2968 	u32 resp = 0, param = 0;
2969 	u32 drv_mb_param;
2970 	enum _ecore_status_t rc;
2971 
2972 	switch (drv_state) {
2973 	case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2974 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2975 		break;
2976 	case ECORE_OV_DRIVER_STATE_DISABLED:
2977 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2978 		break;
2979 	case ECORE_OV_DRIVER_STATE_ACTIVE:
2980 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2981 		break;
2982 	default:
2983 		DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2984 		return ECORE_INVAL;
2985 	}
2986 
2987 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2988 			   drv_mb_param, &resp, &param);
2989 	if (rc != ECORE_SUCCESS)
2990 		DP_ERR(p_hwfn, "Failed to send driver state\n");
2991 
2992 	return rc;
2993 }
2994 
2995 enum _ecore_status_t
ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_fc_npiv_tbl * p_table)2996 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2997 			 struct ecore_fc_npiv_tbl *p_table)
2998 {
2999 	return 0;
3000 }
3001 
3002 enum _ecore_status_t
ecore_mcp_ov_update_mtu(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 mtu)3003 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3004 			u16 mtu)
3005 {
3006 	u32 resp = 0, param = 0, drv_mb_param = 0;
3007 	enum _ecore_status_t rc;
3008 
3009 	SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
3010 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
3011 			   drv_mb_param, &resp, &param);
3012 	if (rc != ECORE_SUCCESS)
3013 		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
3014 
3015 	return rc;
3016 }
3017 
3018 enum _ecore_status_t
ecore_mcp_ov_update_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 * mac)3019 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3020 			u8 *mac)
3021 {
3022 	struct ecore_mcp_mb_params mb_params;
3023 	union drv_union_data union_data;
3024 	enum _ecore_status_t rc;
3025 
3026 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3027 	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
3028 	SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
3029 		      DRV_MSG_CODE_VMAC_TYPE_MAC);
3030 	mb_params.param |= MCP_PF_ID(p_hwfn);
3031 	OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
3032 	mb_params.p_data_src = &union_data;
3033 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3034 	if (rc != ECORE_SUCCESS)
3035 		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
3036 
3037 	return rc;
3038 }
3039 
3040 enum _ecore_status_t
ecore_mcp_ov_update_eswitch(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_eswitch eswitch)3041 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3042 			    enum ecore_ov_eswitch eswitch)
3043 {
3044 	enum _ecore_status_t rc;
3045 	u32 resp = 0, param = 0;
3046 	u32 drv_mb_param;
3047 
3048 	switch (eswitch) {
3049 	case ECORE_OV_ESWITCH_NONE:
3050 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
3051 		break;
3052 	case ECORE_OV_ESWITCH_VEB:
3053 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
3054 		break;
3055 	case ECORE_OV_ESWITCH_VEPA:
3056 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
3057 		break;
3058 	default:
3059 		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
3060 		return ECORE_INVAL;
3061 	}
3062 
3063 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
3064 			   drv_mb_param, &resp, &param);
3065 	if (rc != ECORE_SUCCESS)
3066 		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
3067 
3068 	return rc;
3069 }
3070 
ecore_mcp_set_led(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_led_mode mode)3071 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
3072 				       struct ecore_ptt *p_ptt,
3073 				       enum ecore_led_mode mode)
3074 {
3075 	u32 resp = 0, param = 0, drv_mb_param;
3076 	enum _ecore_status_t rc;
3077 
3078 	switch (mode) {
3079 	case ECORE_LED_MODE_ON:
3080 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
3081 		break;
3082 	case ECORE_LED_MODE_OFF:
3083 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
3084 		break;
3085 	case ECORE_LED_MODE_RESTORE:
3086 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
3087 		break;
3088 	default:
3089 		DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
3090 		return ECORE_INVAL;
3091 	}
3092 
3093 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
3094 			   drv_mb_param, &resp, &param);
3095 	if (rc != ECORE_SUCCESS)
3096 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3097 
3098 	return rc;
3099 }
3100 
ecore_mcp_mask_parities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 mask_parities)3101 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
3102 					     struct ecore_ptt *p_ptt,
3103 					     u32 mask_parities)
3104 {
3105 	u32 resp = 0, param = 0;
3106 	enum _ecore_status_t rc;
3107 
3108 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3109 			   mask_parities, &resp, &param);
3110 
3111 	if (rc != ECORE_SUCCESS) {
3112 		DP_ERR(p_hwfn,
3113 		       "MCP response failure for mask parities, aborting\n");
3114 	} else if (resp != FW_MSG_CODE_OK) {
3115 		DP_ERR(p_hwfn,
3116 		       "MCP did not ack mask parity request. Old MFW?\n");
3117 		rc = ECORE_INVAL;
3118 	}
3119 
3120 	return rc;
3121 }
3122 
ecore_mcp_nvm_read(struct ecore_dev * p_dev,u32 addr,u8 * p_buf,u32 len)3123 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
3124 					u8 *p_buf, u32 len)
3125 {
3126 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3127 	u32 bytes_left, offset, bytes_to_copy, buf_size;
3128 	u32 nvm_offset, resp, param;
3129 	struct ecore_ptt *p_ptt;
3130 	enum _ecore_status_t rc = ECORE_SUCCESS;
3131 
3132 	p_ptt = ecore_ptt_acquire(p_hwfn);
3133 	if (!p_ptt)
3134 		return ECORE_BUSY;
3135 
3136 	bytes_left = len;
3137 	offset = 0;
3138 	while (bytes_left > 0) {
3139 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3140 					   MCP_DRV_NVM_BUF_LEN);
3141 		nvm_offset = (addr + offset) | (bytes_to_copy <<
3142 						DRV_MB_PARAM_NVM_LEN_OFFSET);
3143 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3144 					  DRV_MSG_CODE_NVM_READ_NVRAM,
3145 					  nvm_offset, &resp, &param, &buf_size,
3146 					  (u32 *)(p_buf + offset));
3147 		if (rc != ECORE_SUCCESS) {
3148 			DP_NOTICE(p_dev, false,
3149 				  "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
3150 				  rc);
3151 			resp = FW_MSG_CODE_ERROR;
3152 			break;
3153 		}
3154 
3155 		if (resp != FW_MSG_CODE_NVM_OK) {
3156 			DP_NOTICE(p_dev, false,
3157 				  "nvm read failed, resp = 0x%08x\n", resp);
3158 			rc = ECORE_UNKNOWN_ERROR;
3159 			break;
3160 		}
3161 
3162 		/* This can be a lengthy process, and it's possible scheduler
3163 		 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3164 		 */
3165 		if (bytes_left % 0x1000 <
3166 		    (bytes_left - buf_size) % 0x1000)
3167 			OSAL_MSLEEP(1);
3168 
3169 		offset += buf_size;
3170 		bytes_left -= buf_size;
3171 	}
3172 
3173 	p_dev->mcp_nvm_resp = resp;
3174 	ecore_ptt_release(p_hwfn, p_ptt);
3175 
3176 	return rc;
3177 }
3178 
ecore_mcp_phy_read(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 * p_len)3179 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
3180 					u32 addr, u8 *p_buf, u32 *p_len)
3181 {
3182 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3183 	struct ecore_ptt *p_ptt;
3184 	u32 resp = 0, param;
3185 	enum _ecore_status_t rc;
3186 
3187 	p_ptt = ecore_ptt_acquire(p_hwfn);
3188 	if (!p_ptt)
3189 		return ECORE_BUSY;
3190 
3191 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3192 				  (cmd == ECORE_PHY_CORE_READ) ?
3193 				  DRV_MSG_CODE_PHY_CORE_READ :
3194 				  DRV_MSG_CODE_PHY_RAW_READ,
3195 				  addr, &resp, &param, p_len, (u32 *)p_buf);
3196 	if (rc != ECORE_SUCCESS)
3197 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3198 
3199 	p_dev->mcp_nvm_resp = resp;
3200 	ecore_ptt_release(p_hwfn, p_ptt);
3201 
3202 	return rc;
3203 }
3204 
ecore_mcp_nvm_resp(struct ecore_dev * p_dev,u8 * p_buf)3205 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3206 {
3207 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3208 	struct ecore_ptt *p_ptt;
3209 
3210 	p_ptt = ecore_ptt_acquire(p_hwfn);
3211 	if (!p_ptt)
3212 		return ECORE_BUSY;
3213 
3214 	OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3215 	ecore_ptt_release(p_hwfn, p_ptt);
3216 
3217 	return ECORE_SUCCESS;
3218 }
3219 
ecore_mcp_nvm_del_file(struct ecore_dev * p_dev,u32 addr)3220 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
3221 {
3222 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3223 	struct ecore_ptt *p_ptt;
3224 	u32 resp = 0, param;
3225 	enum _ecore_status_t rc;
3226 
3227 	p_ptt = ecore_ptt_acquire(p_hwfn);
3228 	if (!p_ptt)
3229 		return ECORE_BUSY;
3230 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3231 			   &resp, &param);
3232 	p_dev->mcp_nvm_resp = resp;
3233 	ecore_ptt_release(p_hwfn, p_ptt);
3234 
3235 	return rc;
3236 }
3237 
ecore_mcp_nvm_put_file_begin(struct ecore_dev * p_dev,u32 addr)3238 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3239 						  u32 addr)
3240 {
3241 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3242 	struct ecore_ptt *p_ptt;
3243 	u32 resp = 0, param;
3244 	enum _ecore_status_t rc;
3245 
3246 	p_ptt = ecore_ptt_acquire(p_hwfn);
3247 	if (!p_ptt)
3248 		return ECORE_BUSY;
3249 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3250 			   &resp, &param);
3251 	p_dev->mcp_nvm_resp = resp;
3252 	ecore_ptt_release(p_hwfn, p_ptt);
3253 
3254 	return rc;
3255 }
3256 
3257 /* rc receives ECORE_INVAL as default parameter because
3258  * it might not enter the while loop if the len is 0
3259  */
ecore_mcp_nvm_write(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3260 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3261 					 u32 addr, u8 *p_buf, u32 len)
3262 {
3263 	u32 buf_idx, buf_size, nvm_cmd, nvm_offset;
3264 	u32 resp = FW_MSG_CODE_ERROR, param;
3265 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3266 	enum _ecore_status_t rc = ECORE_INVAL;
3267 	struct ecore_ptt *p_ptt;
3268 
3269 	p_ptt = ecore_ptt_acquire(p_hwfn);
3270 	if (!p_ptt)
3271 		return ECORE_BUSY;
3272 
3273 	switch (cmd) {
3274 	case ECORE_PUT_FILE_DATA:
3275 		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3276 		break;
3277 	case ECORE_NVM_WRITE_NVRAM:
3278 		nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3279 		break;
3280 	case ECORE_EXT_PHY_FW_UPGRADE:
3281 		nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3282 		break;
3283 	default:
3284 		DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3285 			  cmd);
3286 		rc = ECORE_INVAL;
3287 		goto out;
3288 	}
3289 
3290 	buf_idx = 0;
3291 	while (buf_idx < len) {
3292 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3293 				      MCP_DRV_NVM_BUF_LEN);
3294 		nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3295 			      addr) +
3296 			     buf_idx;
3297 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3298 					  &resp, &param, buf_size,
3299 					  (u32 *)&p_buf[buf_idx]);
3300 		if (rc != ECORE_SUCCESS) {
3301 			DP_NOTICE(p_dev, false,
3302 				  "ecore_mcp_nvm_write() failed, rc = %d\n",
3303 				  rc);
3304 			resp = FW_MSG_CODE_ERROR;
3305 			break;
3306 		}
3307 
3308 		if (resp != FW_MSG_CODE_OK &&
3309 		    resp != FW_MSG_CODE_NVM_OK &&
3310 		    resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3311 			DP_NOTICE(p_dev, false,
3312 				  "nvm write failed, resp = 0x%08x\n", resp);
3313 			rc = ECORE_UNKNOWN_ERROR;
3314 			break;
3315 		}
3316 
3317 		/* This can be a lengthy process, and it's possible scheduler
3318 		 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3319 		 */
3320 		if (buf_idx % 0x1000 >
3321 		    (buf_idx + buf_size) % 0x1000)
3322 			OSAL_MSLEEP(1);
3323 
3324 		buf_idx += buf_size;
3325 	}
3326 
3327 	p_dev->mcp_nvm_resp = resp;
3328 out:
3329 	ecore_ptt_release(p_hwfn, p_ptt);
3330 
3331 	return rc;
3332 }
3333 
ecore_mcp_phy_write(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3334 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3335 					 u32 addr, u8 *p_buf, u32 len)
3336 {
3337 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3338 	u32 resp = 0, param, nvm_cmd;
3339 	struct ecore_ptt *p_ptt;
3340 	enum _ecore_status_t rc;
3341 
3342 	p_ptt = ecore_ptt_acquire(p_hwfn);
3343 	if (!p_ptt)
3344 		return ECORE_BUSY;
3345 
3346 	nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ?  DRV_MSG_CODE_PHY_CORE_WRITE :
3347 			DRV_MSG_CODE_PHY_RAW_WRITE;
3348 	rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3349 				  &resp, &param, len, (u32 *)p_buf);
3350 	if (rc != ECORE_SUCCESS)
3351 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3352 	p_dev->mcp_nvm_resp = resp;
3353 	ecore_ptt_release(p_hwfn, p_ptt);
3354 
3355 	return rc;
3356 }
3357 
ecore_mcp_nvm_set_secure_mode(struct ecore_dev * p_dev,u32 addr)3358 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3359 						   u32 addr)
3360 {
3361 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3362 	struct ecore_ptt *p_ptt;
3363 	u32 resp = 0, param;
3364 	enum _ecore_status_t rc;
3365 
3366 	p_ptt = ecore_ptt_acquire(p_hwfn);
3367 	if (!p_ptt)
3368 		return ECORE_BUSY;
3369 
3370 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3371 			   &resp, &param);
3372 	p_dev->mcp_nvm_resp = resp;
3373 	ecore_ptt_release(p_hwfn, p_ptt);
3374 
3375 	return rc;
3376 }
3377 
ecore_mcp_phy_sfp_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)3378 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3379 					    struct ecore_ptt *p_ptt,
3380 					    u32 port, u32 addr, u32 offset,
3381 					    u32 len, u8 *p_buf)
3382 {
3383 	u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3384 	u32 resp, param;
3385 	enum _ecore_status_t rc;
3386 
3387 	nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3388 			(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3389 	addr = offset;
3390 	offset = 0;
3391 	bytes_left = len;
3392 	while (bytes_left > 0) {
3393 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3394 					   MAX_I2C_TRANSACTION_SIZE);
3395 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3396 			       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3397 		nvm_offset |= ((addr + offset) <<
3398 				DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3399 		nvm_offset |= (bytes_to_copy <<
3400 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3401 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3402 					  DRV_MSG_CODE_TRANSCEIVER_READ,
3403 					  nvm_offset, &resp, &param, &buf_size,
3404 					  (u32 *)(p_buf + offset));
3405 		if (rc != ECORE_SUCCESS) {
3406 			DP_NOTICE(p_hwfn, false,
3407 				  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3408 				  rc);
3409 			return rc;
3410 		}
3411 
3412 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3413 			return ECORE_NODEV;
3414 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3415 			return ECORE_UNKNOWN_ERROR;
3416 
3417 		offset += buf_size;
3418 		bytes_left -= buf_size;
3419 	}
3420 
3421 	return ECORE_SUCCESS;
3422 }
3423 
ecore_mcp_phy_sfp_write(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)3424 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3425 					     struct ecore_ptt *p_ptt,
3426 					     u32 port, u32 addr, u32 offset,
3427 					     u32 len, u8 *p_buf)
3428 {
3429 	u32 buf_idx, buf_size, nvm_offset, resp, param;
3430 	enum _ecore_status_t rc;
3431 
3432 	nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3433 			(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3434 	buf_idx = 0;
3435 	while (buf_idx < len) {
3436 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3437 				      MAX_I2C_TRANSACTION_SIZE);
3438 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3439 				 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3440 		nvm_offset |= ((offset + buf_idx) <<
3441 				 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3442 		nvm_offset |= (buf_size <<
3443 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3444 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3445 					  DRV_MSG_CODE_TRANSCEIVER_WRITE,
3446 					  nvm_offset, &resp, &param, buf_size,
3447 					  (u32 *)&p_buf[buf_idx]);
3448 		if (rc != ECORE_SUCCESS) {
3449 			DP_NOTICE(p_hwfn, false,
3450 				  "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3451 				  rc);
3452 			return rc;
3453 		}
3454 
3455 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3456 			return ECORE_NODEV;
3457 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3458 			return ECORE_UNKNOWN_ERROR;
3459 
3460 		buf_idx += buf_size;
3461 	}
3462 
3463 	return ECORE_SUCCESS;
3464 }
3465 
ecore_mcp_gpio_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u32 * gpio_val)3466 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3467 					 struct ecore_ptt *p_ptt,
3468 					 u16 gpio, u32 *gpio_val)
3469 {
3470 	enum _ecore_status_t rc = ECORE_SUCCESS;
3471 	u32 drv_mb_param = 0, rsp = 0;
3472 
3473 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3474 
3475 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3476 			   drv_mb_param, &rsp, gpio_val);
3477 
3478 	if (rc != ECORE_SUCCESS)
3479 		return rc;
3480 
3481 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3482 		return ECORE_UNKNOWN_ERROR;
3483 
3484 	return ECORE_SUCCESS;
3485 }
3486 
ecore_mcp_gpio_write(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u16 gpio_val)3487 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3488 					  struct ecore_ptt *p_ptt,
3489 					  u16 gpio, u16 gpio_val)
3490 {
3491 	enum _ecore_status_t rc = ECORE_SUCCESS;
3492 	u32 drv_mb_param = 0, param, rsp = 0;
3493 
3494 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3495 		(gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3496 
3497 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3498 			   drv_mb_param, &rsp, &param);
3499 
3500 	if (rc != ECORE_SUCCESS)
3501 		return rc;
3502 
3503 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3504 		return ECORE_UNKNOWN_ERROR;
3505 
3506 	return ECORE_SUCCESS;
3507 }
3508 
ecore_mcp_gpio_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u32 * gpio_direction,u32 * gpio_ctrl)3509 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3510 					 struct ecore_ptt *p_ptt,
3511 					 u16 gpio, u32 *gpio_direction,
3512 					 u32 *gpio_ctrl)
3513 {
3514 	u32 drv_mb_param = 0, rsp, val = 0;
3515 	enum _ecore_status_t rc = ECORE_SUCCESS;
3516 
3517 	drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3518 
3519 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3520 			   drv_mb_param, &rsp, &val);
3521 	if (rc != ECORE_SUCCESS)
3522 		return rc;
3523 
3524 	*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3525 			   DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3526 	*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3527 		      DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3528 
3529 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3530 		return ECORE_UNKNOWN_ERROR;
3531 
3532 	return ECORE_SUCCESS;
3533 }
3534 
ecore_mcp_bist_register_test(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3535 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3536 						  struct ecore_ptt *p_ptt)
3537 {
3538 	u32 drv_mb_param = 0, rsp, param;
3539 	enum _ecore_status_t rc = ECORE_SUCCESS;
3540 
3541 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3542 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3543 
3544 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3545 			   drv_mb_param, &rsp, &param);
3546 
3547 	if (rc != ECORE_SUCCESS)
3548 		return rc;
3549 
3550 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3551 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3552 		rc = ECORE_UNKNOWN_ERROR;
3553 
3554 	return rc;
3555 }
3556 
ecore_mcp_bist_clock_test(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3557 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3558 					       struct ecore_ptt *p_ptt)
3559 {
3560 	u32 drv_mb_param, rsp, param;
3561 	enum _ecore_status_t rc = ECORE_SUCCESS;
3562 
3563 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3564 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3565 
3566 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3567 			   drv_mb_param, &rsp, &param);
3568 
3569 	if (rc != ECORE_SUCCESS)
3570 		return rc;
3571 
3572 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3573 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3574 		rc = ECORE_UNKNOWN_ERROR;
3575 
3576 	return rc;
3577 }
3578 
ecore_mcp_bist_nvm_test_get_num_images(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * num_images)3579 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3580 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3581 {
3582 	u32 drv_mb_param = 0, rsp = 0;
3583 	enum _ecore_status_t rc = ECORE_SUCCESS;
3584 
3585 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3586 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3587 
3588 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3589 			   drv_mb_param, &rsp, num_images);
3590 
3591 	if (rc != ECORE_SUCCESS)
3592 		return rc;
3593 
3594 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3595 		rc = ECORE_UNKNOWN_ERROR;
3596 
3597 	return rc;
3598 }
3599 
ecore_mcp_bist_nvm_test_get_image_att(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct bist_nvm_image_att * p_image_att,u32 image_index)3600 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3601 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3602 	struct bist_nvm_image_att *p_image_att, u32 image_index)
3603 {
3604 	u32 buf_size, nvm_offset, resp, param;
3605 	enum _ecore_status_t rc;
3606 
3607 	nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3608 				    DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3609 	nvm_offset |= (image_index <<
3610 		       DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3611 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3612 				  nvm_offset, &resp, &param, &buf_size,
3613 				  (u32 *)p_image_att);
3614 	if (rc != ECORE_SUCCESS)
3615 		return rc;
3616 
3617 	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3618 	    (p_image_att->return_code != 1))
3619 		rc = ECORE_UNKNOWN_ERROR;
3620 
3621 	return rc;
3622 }
3623 
3624 enum _ecore_status_t
ecore_mcp_bist_nvm_get_num_images(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * num_images)3625 ecore_mcp_bist_nvm_get_num_images(struct ecore_hwfn *p_hwfn,
3626 				  struct ecore_ptt *p_ptt, u32 *num_images)
3627 {
3628 	u32 drv_mb_param = 0, rsp;
3629 	enum _ecore_status_t rc = ECORE_SUCCESS;
3630 
3631 	SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_BIST_TEST_INDEX,
3632 		      DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES);
3633 
3634 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3635 			   drv_mb_param, &rsp, num_images);
3636 	if (rc != ECORE_SUCCESS)
3637 		return rc;
3638 
3639 	if (rsp == FW_MSG_CODE_UNSUPPORTED)
3640 		rc = ECORE_NOTIMPL;
3641 	else if (rsp != FW_MSG_CODE_OK)
3642 		rc = ECORE_UNKNOWN_ERROR;
3643 
3644 	return rc;
3645 }
3646 
3647 enum _ecore_status_t
ecore_mcp_bist_nvm_get_image_att(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct bist_nvm_image_att * p_image_att,u32 image_index)3648 ecore_mcp_bist_nvm_get_image_att(struct ecore_hwfn *p_hwfn,
3649 				 struct ecore_ptt *p_ptt,
3650 				 struct bist_nvm_image_att *p_image_att,
3651 				 u32 image_index)
3652 {
3653 	u32 buf_size, nvm_offset = 0, resp, param;
3654 	enum _ecore_status_t rc;
3655 
3656 	SET_MFW_FIELD(nvm_offset, DRV_MB_PARAM_BIST_TEST_INDEX,
3657 		      DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX);
3658 	SET_MFW_FIELD(nvm_offset, DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX,
3659 		      image_index);
3660 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3661 				  nvm_offset, &resp, &param, &buf_size,
3662 				  (u32 *)p_image_att);
3663 	if (rc != ECORE_SUCCESS)
3664 		return rc;
3665 
3666 	if (resp == FW_MSG_CODE_UNSUPPORTED)
3667 		rc = ECORE_NOTIMPL;
3668 	else if ((resp != FW_MSG_CODE_OK) || (p_image_att->return_code != 1))
3669 		rc = ECORE_UNKNOWN_ERROR;
3670 
3671 	return rc;
3672 }
3673 
ecore_mcp_nvm_info_populate(struct ecore_hwfn * p_hwfn)3674 enum _ecore_status_t ecore_mcp_nvm_info_populate(struct ecore_hwfn *p_hwfn)
3675 {
3676 	struct ecore_nvm_image_info nvm_info;
3677 	struct ecore_ptt *p_ptt;
3678 	enum _ecore_status_t rc;
3679 	u32 i;
3680 
3681 	if (p_hwfn->nvm_info.valid)
3682 		return ECORE_SUCCESS;
3683 
3684 #ifndef ASIC_ONLY
3685 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ||
3686 	    CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
3687 		return ECORE_SUCCESS;
3688 #endif
3689 
3690 	p_ptt = ecore_ptt_acquire(p_hwfn);
3691 	if (!p_ptt) {
3692 		DP_ERR(p_hwfn, "failed to acquire ptt\n");
3693 		return ECORE_BUSY;
3694 	}
3695 
3696 	/* Acquire from MFW the amount of available images */
3697 	OSAL_MEM_ZERO(&nvm_info, sizeof(nvm_info));
3698 	rc = ecore_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt,
3699 					       &nvm_info.num_images);
3700 	if (rc == ECORE_NOTIMPL) {
3701 		DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3702 		goto out;
3703 	} else if ((rc != ECORE_SUCCESS) || (nvm_info.num_images == 0)) {
3704 		DP_ERR(p_hwfn, "Failed getting number of images\n");
3705 		goto err0;
3706 	}
3707 
3708 	nvm_info.image_att = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
3709 					 nvm_info.num_images *
3710 					 sizeof(struct bist_nvm_image_att));
3711 	if (!nvm_info.image_att) {
3712 		rc = ECORE_NOMEM;
3713 		goto err0;
3714 	}
3715 
3716 	/* Iterate over images and get their attributes */
3717 	for (i = 0; i < nvm_info.num_images; i++) {
3718 		rc = ecore_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3719 						      &nvm_info.image_att[i],
3720 						      i);
3721 		if (rc != ECORE_SUCCESS) {
3722 			DP_ERR(p_hwfn,
3723 			       "Failed getting image index %d attributes\n",
3724 			       i);
3725 			goto err1;
3726 		}
3727 
3728 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "image index %d, size %x\n", i,
3729 			   nvm_info.image_att[i].len);
3730 	}
3731 out:
3732 	/* Update hwfn's nvm_info */
3733 	if (nvm_info.num_images) {
3734 		p_hwfn->nvm_info.num_images = nvm_info.num_images;
3735 		if (p_hwfn->nvm_info.image_att)
3736 			OSAL_FREE(p_hwfn->p_dev, p_hwfn->nvm_info.image_att);
3737 		p_hwfn->nvm_info.image_att = nvm_info.image_att;
3738 		p_hwfn->nvm_info.valid = true;
3739 	}
3740 
3741 	ecore_ptt_release(p_hwfn, p_ptt);
3742 	return ECORE_SUCCESS;
3743 
3744 err1:
3745 	OSAL_FREE(p_hwfn->p_dev, nvm_info.image_att);
3746 err0:
3747 	ecore_ptt_release(p_hwfn, p_ptt);
3748 	return rc;
3749 }
3750 
3751 enum _ecore_status_t
ecore_mcp_get_nvm_image_att(struct ecore_hwfn * p_hwfn,enum ecore_nvm_images image_id,struct ecore_nvm_image_att * p_image_att)3752 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn,
3753 			    enum ecore_nvm_images image_id,
3754 			    struct ecore_nvm_image_att *p_image_att)
3755 {
3756 	enum nvm_image_type type;
3757 	u32 i;
3758 
3759 	/* Translate image_id into MFW definitions */
3760 	switch (image_id) {
3761 	case ECORE_NVM_IMAGE_ISCSI_CFG:
3762 		type = NVM_TYPE_ISCSI_CFG;
3763 		break;
3764 	case ECORE_NVM_IMAGE_FCOE_CFG:
3765 		type = NVM_TYPE_FCOE_CFG;
3766 		break;
3767 	case ECORE_NVM_IMAGE_MDUMP:
3768 		type = NVM_TYPE_MDUMP;
3769 		break;
3770 	case ECORE_NVM_IMAGE_NVM_CFG1:
3771 		type = NVM_TYPE_NVM_CFG1;
3772 		break;
3773 	case ECORE_NVM_IMAGE_DEFAULT_CFG:
3774 		type = NVM_TYPE_DEFAULT_CFG;
3775 		break;
3776 	case ECORE_NVM_IMAGE_NVM_META:
3777 		type = NVM_TYPE_META;
3778 		break;
3779 	default:
3780 		DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3781 			  image_id);
3782 		return ECORE_INVAL;
3783 	}
3784 
3785 	ecore_mcp_nvm_info_populate(p_hwfn);
3786 	for (i = 0; i < p_hwfn->nvm_info.num_images; i++) {
3787 		if (type == p_hwfn->nvm_info.image_att[i].image_type)
3788 			break;
3789 	}
3790 	if (i == p_hwfn->nvm_info.num_images) {
3791 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3792 			   "Failed to find nvram image of type %08x\n",
3793 			   image_id);
3794 		return ECORE_NOENT;
3795 	}
3796 
3797 	p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3798 	p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3799 
3800 	return ECORE_SUCCESS;
3801 }
3802 
ecore_mcp_get_nvm_image(struct ecore_hwfn * p_hwfn,enum ecore_nvm_images image_id,u8 * p_buffer,u32 buffer_len)3803 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3804 					     enum ecore_nvm_images image_id,
3805 					     u8 *p_buffer, u32 buffer_len)
3806 {
3807 	struct ecore_nvm_image_att image_att;
3808 	enum _ecore_status_t rc;
3809 
3810 	OSAL_MEM_ZERO(p_buffer, buffer_len);
3811 
3812 	rc = ecore_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3813 	if (rc != ECORE_SUCCESS)
3814 		return rc;
3815 
3816 	/* Validate sizes - both the image's and the supplied buffer's */
3817 	if (image_att.length <= 4) {
3818 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3819 			   "Image [%d] is too small - only %d bytes\n",
3820 			   image_id, image_att.length);
3821 		return ECORE_INVAL;
3822 	}
3823 
3824 	if (image_att.length > buffer_len) {
3825 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3826 			   "Image [%d] is too big - %08x bytes where only %08x are available\n",
3827 			   image_id, image_att.length, buffer_len);
3828 		return ECORE_NOMEM;
3829 	}
3830 
3831 	return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3832 				  (u8 *)p_buffer, image_att.length);
3833 }
3834 
3835 enum _ecore_status_t
ecore_mcp_get_temperature_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_temperature_info * p_temp_info)3836 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3837 			       struct ecore_ptt *p_ptt,
3838 			       struct ecore_temperature_info *p_temp_info)
3839 {
3840 	struct ecore_temperature_sensor *p_temp_sensor;
3841 	struct temperature_status_stc mfw_temp_info;
3842 	struct ecore_mcp_mb_params mb_params;
3843 	u32 val;
3844 	enum _ecore_status_t rc;
3845 	u8 i;
3846 
3847 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3848 	mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3849 	mb_params.p_data_dst = &mfw_temp_info;
3850 	mb_params.data_dst_size = sizeof(mfw_temp_info);
3851 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3852 	if (rc != ECORE_SUCCESS)
3853 		return rc;
3854 
3855 	OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3856 	p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3857 					      ECORE_MAX_NUM_OF_SENSORS);
3858 	for (i = 0; i < p_temp_info->num_sensors; i++) {
3859 		val = mfw_temp_info.sensor[i];
3860 		p_temp_sensor = &p_temp_info->sensors[i];
3861 		p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3862 						 SENSOR_LOCATION_OFFSET;
3863 		p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3864 						THRESHOLD_HIGH_OFFSET;
3865 		p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3866 					  CRITICAL_TEMPERATURE_OFFSET;
3867 		p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3868 					      CURRENT_TEMP_OFFSET;
3869 	}
3870 
3871 	return ECORE_SUCCESS;
3872 }
3873 
ecore_mcp_get_mba_versions(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mba_vers * p_mba_vers)3874 enum _ecore_status_t ecore_mcp_get_mba_versions(
3875 	struct ecore_hwfn *p_hwfn,
3876 	struct ecore_ptt *p_ptt,
3877 	struct ecore_mba_vers *p_mba_vers)
3878 {
3879 	u32 buf_size, resp, param;
3880 	enum _ecore_status_t rc;
3881 
3882 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3883 				  0, &resp, &param, &buf_size,
3884 				  &p_mba_vers->mba_vers[0]);
3885 
3886 	if (rc != ECORE_SUCCESS)
3887 		return rc;
3888 
3889 	if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3890 		rc = ECORE_UNKNOWN_ERROR;
3891 
3892 	if (buf_size != MCP_DRV_NVM_BUF_LEN)
3893 		rc = ECORE_UNKNOWN_ERROR;
3894 
3895 	return rc;
3896 }
3897 
ecore_mcp_mem_ecc_events(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 * num_events)3898 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3899 					      struct ecore_ptt *p_ptt,
3900 					      u64 *num_events)
3901 {
3902 	u32 rsp;
3903 
3904 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3905 			     0, &rsp, (u32 *)num_events);
3906 }
3907 
3908 static enum resource_id_enum
ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)3909 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3910 {
3911 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3912 
3913 	switch (res_id) {
3914 	case ECORE_SB:
3915 		mfw_res_id = RESOURCE_NUM_SB_E;
3916 		break;
3917 	case ECORE_L2_QUEUE:
3918 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3919 		break;
3920 	case ECORE_VPORT:
3921 		mfw_res_id = RESOURCE_NUM_VPORT_E;
3922 		break;
3923 	case ECORE_RSS_ENG:
3924 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3925 		break;
3926 	case ECORE_PQ:
3927 		mfw_res_id = RESOURCE_NUM_PQ_E;
3928 		break;
3929 	case ECORE_RL:
3930 		mfw_res_id = RESOURCE_NUM_RL_E;
3931 		break;
3932 	case ECORE_MAC:
3933 	case ECORE_VLAN:
3934 		/* Each VFC resource can accommodate both a MAC and a VLAN */
3935 		mfw_res_id = RESOURCE_VFC_FILTER_E;
3936 		break;
3937 	case ECORE_ILT:
3938 		mfw_res_id = RESOURCE_ILT_E;
3939 		break;
3940 	case ECORE_LL2_QUEUE:
3941 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3942 		break;
3943 	case ECORE_RDMA_CNQ_RAM:
3944 	case ECORE_CMDQS_CQS:
3945 		/* CNQ/CMDQS are the same resource */
3946 		mfw_res_id = RESOURCE_CQS_E;
3947 		break;
3948 	case ECORE_RDMA_STATS_QUEUE:
3949 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3950 		break;
3951 	case ECORE_BDQ:
3952 		mfw_res_id = RESOURCE_BDQ_E;
3953 		break;
3954 	default:
3955 		break;
3956 	}
3957 
3958 	return mfw_res_id;
3959 }
3960 
3961 #define ECORE_RESC_ALLOC_VERSION_MAJOR	2
3962 #define ECORE_RESC_ALLOC_VERSION_MINOR	0
3963 #define ECORE_RESC_ALLOC_VERSION				\
3964 	((ECORE_RESC_ALLOC_VERSION_MAJOR <<			\
3965 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) |	\
3966 	 (ECORE_RESC_ALLOC_VERSION_MINOR <<			\
3967 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3968 
3969 struct ecore_resc_alloc_in_params {
3970 	u32 cmd;
3971 	enum ecore_resources res_id;
3972 	u32 resc_max_val;
3973 };
3974 
3975 struct ecore_resc_alloc_out_params {
3976 	u32 mcp_resp;
3977 	u32 mcp_param;
3978 	u32 resc_num;
3979 	u32 resc_start;
3980 	u32 vf_resc_num;
3981 	u32 vf_resc_start;
3982 	u32 flags;
3983 };
3984 
3985 #define ECORE_RECOVERY_PROLOG_SLEEP_MS	100
3986 
ecore_recovery_prolog(struct ecore_dev * p_dev)3987 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3988 {
3989 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3990 	struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3991 	enum _ecore_status_t rc;
3992 
3993 	/* Allow ongoing PCIe transactions to complete */
3994 	OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3995 
3996 	/* Clear the PF's internal FID_enable in the PXP */
3997 	rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3998 	if (rc != ECORE_SUCCESS)
3999 		DP_NOTICE(p_hwfn, false,
4000 			  "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
4001 			  rc);
4002 
4003 	return rc;
4004 }
4005 
4006 static enum _ecore_status_t
ecore_mcp_resc_allocation_msg(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_alloc_in_params * p_in_params,struct ecore_resc_alloc_out_params * p_out_params)4007 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
4008 			      struct ecore_ptt *p_ptt,
4009 			      struct ecore_resc_alloc_in_params *p_in_params,
4010 			      struct ecore_resc_alloc_out_params *p_out_params)
4011 {
4012 	struct ecore_mcp_mb_params mb_params;
4013 	struct resource_info mfw_resc_info;
4014 	enum _ecore_status_t rc;
4015 
4016 	OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
4017 
4018 	mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
4019 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
4020 		DP_ERR(p_hwfn,
4021 		       "Failed to match resource %d [%s] with the MFW resources\n",
4022 		       p_in_params->res_id,
4023 		       ecore_hw_get_resc_name(p_in_params->res_id));
4024 		return ECORE_INVAL;
4025 	}
4026 
4027 	switch (p_in_params->cmd) {
4028 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
4029 		mfw_resc_info.size = p_in_params->resc_max_val;
4030 		/* Fallthrough */
4031 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
4032 		break;
4033 	default:
4034 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
4035 		       p_in_params->cmd);
4036 		return ECORE_INVAL;
4037 	}
4038 
4039 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4040 	mb_params.cmd = p_in_params->cmd;
4041 	mb_params.param = ECORE_RESC_ALLOC_VERSION;
4042 	mb_params.p_data_src = &mfw_resc_info;
4043 	mb_params.data_src_size = sizeof(mfw_resc_info);
4044 	mb_params.p_data_dst = mb_params.p_data_src;
4045 	mb_params.data_dst_size = mb_params.data_src_size;
4046 
4047 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4048 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
4049 		   p_in_params->cmd, p_in_params->res_id,
4050 		   ecore_hw_get_resc_name(p_in_params->res_id),
4051 		   GET_MFW_FIELD(mb_params.param,
4052 				 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
4053 		   GET_MFW_FIELD(mb_params.param,
4054 				 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
4055 		   p_in_params->resc_max_val);
4056 
4057 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4058 	if (rc != ECORE_SUCCESS)
4059 		return rc;
4060 
4061 	p_out_params->mcp_resp = mb_params.mcp_resp;
4062 	p_out_params->mcp_param = mb_params.mcp_param;
4063 	p_out_params->resc_num = mfw_resc_info.size;
4064 	p_out_params->resc_start = mfw_resc_info.offset;
4065 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
4066 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
4067 	p_out_params->flags = mfw_resc_info.flags;
4068 
4069 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4070 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
4071 		   GET_MFW_FIELD(p_out_params->mcp_param,
4072 				 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
4073 		   GET_MFW_FIELD(p_out_params->mcp_param,
4074 				 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
4075 		   p_out_params->resc_num, p_out_params->resc_start,
4076 		   p_out_params->vf_resc_num, p_out_params->vf_resc_start,
4077 		   p_out_params->flags);
4078 
4079 	return ECORE_SUCCESS;
4080 }
4081 
4082 enum _ecore_status_t
ecore_mcp_set_resc_max_val(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_resources res_id,u32 resc_max_val,u32 * p_mcp_resp)4083 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4084 			   enum ecore_resources res_id, u32 resc_max_val,
4085 			   u32 *p_mcp_resp)
4086 {
4087 	struct ecore_resc_alloc_out_params out_params;
4088 	struct ecore_resc_alloc_in_params in_params;
4089 	enum _ecore_status_t rc;
4090 
4091 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
4092 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
4093 	in_params.res_id = res_id;
4094 	in_params.resc_max_val = resc_max_val;
4095 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
4096 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
4097 					   &out_params);
4098 	if (rc != ECORE_SUCCESS)
4099 		return rc;
4100 
4101 	*p_mcp_resp = out_params.mcp_resp;
4102 
4103 	return ECORE_SUCCESS;
4104 }
4105 
4106 enum _ecore_status_t
ecore_mcp_get_resc_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_resources res_id,u32 * p_mcp_resp,u32 * p_resc_num,u32 * p_resc_start)4107 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4108 			enum ecore_resources res_id, u32 *p_mcp_resp,
4109 			u32 *p_resc_num, u32 *p_resc_start)
4110 {
4111 	struct ecore_resc_alloc_out_params out_params;
4112 	struct ecore_resc_alloc_in_params in_params;
4113 	enum _ecore_status_t rc;
4114 
4115 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
4116 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
4117 	in_params.res_id = res_id;
4118 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
4119 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
4120 					   &out_params);
4121 	if (rc != ECORE_SUCCESS)
4122 		return rc;
4123 
4124 	*p_mcp_resp = out_params.mcp_resp;
4125 
4126 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
4127 		*p_resc_num = out_params.resc_num;
4128 		*p_resc_start = out_params.resc_start;
4129 	}
4130 
4131 	return ECORE_SUCCESS;
4132 }
4133 
ecore_mcp_initiate_pf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4134 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
4135 					       struct ecore_ptt *p_ptt)
4136 {
4137 	u32 mcp_resp, mcp_param;
4138 
4139 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
4140 			     &mcp_resp, &mcp_param);
4141 }
4142 
ecore_mcp_resource_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 param,u32 * p_mcp_resp,u32 * p_mcp_param)4143 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
4144 						   struct ecore_ptt *p_ptt,
4145 						   u32 param, u32 *p_mcp_resp,
4146 						   u32 *p_mcp_param)
4147 {
4148 	enum _ecore_status_t rc;
4149 
4150 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
4151 			   p_mcp_resp, p_mcp_param);
4152 	if (rc != ECORE_SUCCESS)
4153 		return rc;
4154 
4155 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4156 		DP_INFO(p_hwfn,
4157 			"The resource command is unsupported by the MFW\n");
4158 		return ECORE_NOTIMPL;
4159 	}
4160 
4161 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
4162 		u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
4163 
4164 		DP_NOTICE(p_hwfn, false,
4165 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
4166 			  param, opcode);
4167 		return ECORE_INVAL;
4168 	}
4169 
4170 	return rc;
4171 }
4172 
4173 enum _ecore_status_t
__ecore_mcp_resc_lock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_lock_params * p_params)4174 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4175 		      struct ecore_resc_lock_params *p_params)
4176 {
4177 	u32 param = 0, mcp_resp = 0, mcp_param = 0;
4178 	u8 opcode;
4179 	enum _ecore_status_t rc;
4180 
4181 	switch (p_params->timeout) {
4182 	case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
4183 		opcode = RESOURCE_OPCODE_REQ;
4184 		p_params->timeout = 0;
4185 		break;
4186 	case ECORE_MCP_RESC_LOCK_TO_NONE:
4187 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
4188 		p_params->timeout = 0;
4189 		break;
4190 	default:
4191 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
4192 		break;
4193 	}
4194 
4195 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
4196 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4197 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
4198 
4199 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4200 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
4201 		   param, p_params->timeout, opcode, p_params->resource);
4202 
4203 	/* Attempt to acquire the resource */
4204 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4205 				    &mcp_param);
4206 	if (rc != ECORE_SUCCESS)
4207 		return rc;
4208 
4209 	/* Analyze the response */
4210 	p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
4211 	opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4212 
4213 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4214 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
4215 		   mcp_param, opcode, p_params->owner);
4216 
4217 	switch (opcode) {
4218 	case RESOURCE_OPCODE_GNT:
4219 		p_params->b_granted = true;
4220 		break;
4221 	case RESOURCE_OPCODE_BUSY:
4222 		p_params->b_granted = false;
4223 		break;
4224 	default:
4225 		DP_NOTICE(p_hwfn, false,
4226 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
4227 			  mcp_param, opcode);
4228 		return ECORE_INVAL;
4229 	}
4230 
4231 	return ECORE_SUCCESS;
4232 }
4233 
4234 enum _ecore_status_t
ecore_mcp_resc_lock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_lock_params * p_params)4235 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4236 		    struct ecore_resc_lock_params *p_params)
4237 {
4238 	u32 retry_cnt = 0;
4239 	enum _ecore_status_t rc;
4240 
4241 	do {
4242 		/* No need for an interval before the first iteration */
4243 		if (retry_cnt) {
4244 			if (p_params->sleep_b4_retry) {
4245 				u16 retry_interval_in_ms =
4246 					DIV_ROUND_UP(p_params->retry_interval,
4247 						     1000);
4248 
4249 				OSAL_MSLEEP(retry_interval_in_ms);
4250 			} else {
4251 				OSAL_UDELAY(p_params->retry_interval);
4252 			}
4253 		}
4254 
4255 		rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
4256 		if (rc != ECORE_SUCCESS)
4257 			return rc;
4258 
4259 		if (p_params->b_granted)
4260 			break;
4261 	} while (retry_cnt++ < p_params->retry_num);
4262 
4263 	return ECORE_SUCCESS;
4264 }
4265 
ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params * p_lock,struct ecore_resc_unlock_params * p_unlock,enum ecore_resc_lock resource,bool b_is_permanent)4266 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
4267 				      struct ecore_resc_unlock_params *p_unlock,
4268 				      enum ecore_resc_lock resource,
4269 				      bool b_is_permanent)
4270 {
4271 	if (p_lock != OSAL_NULL) {
4272 		OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
4273 
4274 		/* Permanent resources don't require aging, and there's no
4275 		 * point in trying to acquire them more than once since it's
4276 		 * unexpected another entity would release them.
4277 		 */
4278 		if (b_is_permanent) {
4279 			p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
4280 		} else {
4281 			p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
4282 			p_lock->retry_interval =
4283 					ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
4284 			p_lock->sleep_b4_retry = true;
4285 		}
4286 
4287 		p_lock->resource = resource;
4288 	}
4289 
4290 	if (p_unlock != OSAL_NULL) {
4291 		OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
4292 		p_unlock->resource = resource;
4293 	}
4294 }
4295 
4296 enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_unlock_params * p_params)4297 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4298 		      struct ecore_resc_unlock_params *p_params)
4299 {
4300 	u32 param = 0, mcp_resp, mcp_param;
4301 	u8 opcode;
4302 	enum _ecore_status_t rc;
4303 
4304 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
4305 				   : RESOURCE_OPCODE_RELEASE;
4306 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
4307 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4308 
4309 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4310 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
4311 		   param, opcode, p_params->resource);
4312 
4313 	/* Attempt to release the resource */
4314 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4315 				    &mcp_param);
4316 	if (rc != ECORE_SUCCESS)
4317 		return rc;
4318 
4319 	/* Analyze the response */
4320 	opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4321 
4322 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4323 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4324 		   mcp_param, opcode);
4325 
4326 	switch (opcode) {
4327 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4328 		DP_INFO(p_hwfn,
4329 			"Resource unlock request for an already released resource [%d]\n",
4330 			p_params->resource);
4331 		/* Fallthrough */
4332 	case RESOURCE_OPCODE_RELEASED:
4333 		p_params->b_released = true;
4334 		break;
4335 	case RESOURCE_OPCODE_WRONG_OWNER:
4336 		p_params->b_released = false;
4337 		break;
4338 	default:
4339 		DP_NOTICE(p_hwfn, false,
4340 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4341 			  mcp_param, opcode);
4342 		return ECORE_INVAL;
4343 	}
4344 
4345 	return ECORE_SUCCESS;
4346 }
4347 
ecore_mcp_is_smart_an_supported(struct ecore_hwfn * p_hwfn)4348 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
4349 {
4350 	return !!(p_hwfn->mcp_info->capabilities &
4351 		  FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
4352 }
4353 
ecore_mcp_get_capabilities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4354 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4355 						struct ecore_ptt *p_ptt)
4356 {
4357 	u32 mcp_resp;
4358 	enum _ecore_status_t rc;
4359 
4360 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4361 			   0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4362 	if (rc == ECORE_SUCCESS)
4363 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4364 			   "MFW supported features: %08x\n",
4365 			   p_hwfn->mcp_info->capabilities);
4366 
4367 	return rc;
4368 }
4369 
ecore_mcp_set_capabilities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4370 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4371 						struct ecore_ptt *p_ptt)
4372 {
4373 	u32 mcp_resp, mcp_param, features;
4374 
4375 	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4376 		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
4377 		   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
4378 
4379 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4380 			     features, &mcp_resp, &mcp_param);
4381 }
4382 
4383 enum _ecore_status_t
ecore_mcp_drv_attribute(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_drv_attr * p_drv_attr)4384 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4385 			struct ecore_mcp_drv_attr *p_drv_attr)
4386 {
4387 	struct attribute_cmd_write_stc attr_cmd_write;
4388 	enum _attribute_commands_e mfw_attr_cmd;
4389 	struct ecore_mcp_mb_params mb_params;
4390 	enum _ecore_status_t rc;
4391 
4392 	switch (p_drv_attr->attr_cmd) {
4393 	case ECORE_MCP_DRV_ATTR_CMD_READ:
4394 		mfw_attr_cmd = ATTRIBUTE_CMD_READ;
4395 		break;
4396 	case ECORE_MCP_DRV_ATTR_CMD_WRITE:
4397 		mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
4398 		break;
4399 	case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
4400 		mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
4401 		break;
4402 	case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
4403 		mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
4404 		break;
4405 	default:
4406 		DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
4407 			  p_drv_attr->attr_cmd);
4408 		return ECORE_INVAL;
4409 	}
4410 
4411 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4412 	mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
4413 	SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
4414 		      p_drv_attr->attr_num);
4415 	SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
4416 		      mfw_attr_cmd);
4417 	if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
4418 		OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
4419 		attr_cmd_write.val = p_drv_attr->val;
4420 		attr_cmd_write.mask = p_drv_attr->mask;
4421 		attr_cmd_write.offset = p_drv_attr->offset;
4422 
4423 		mb_params.p_data_src = &attr_cmd_write;
4424 		mb_params.data_src_size = sizeof(attr_cmd_write);
4425 	}
4426 
4427 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4428 	if (rc != ECORE_SUCCESS)
4429 		return rc;
4430 
4431 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4432 		DP_INFO(p_hwfn,
4433 			"The attribute command is not supported by the MFW\n");
4434 		return ECORE_NOTIMPL;
4435 	} else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4436 		DP_INFO(p_hwfn,
4437 			"Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
4438 			mb_params.mcp_resp, p_drv_attr->attr_cmd,
4439 			p_drv_attr->attr_num);
4440 		return ECORE_INVAL;
4441 	}
4442 
4443 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4444 		   "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
4445 		   p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
4446 		   p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
4447 		   mb_params.mcp_param);
4448 
4449 	if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4450 	    p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4451 		p_drv_attr->val = mb_params.mcp_param;
4452 
4453 	return ECORE_SUCCESS;
4454 }
4455 
ecore_mcp_get_engine_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4456 enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
4457 						 struct ecore_ptt *p_ptt)
4458 {
4459 	struct ecore_dev *p_dev = p_hwfn->p_dev;
4460 	struct ecore_mcp_mb_params mb_params;
4461 	u8 fir_valid, l2_valid;
4462 	enum _ecore_status_t rc;
4463 
4464 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4465 	mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
4466 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4467 	if (rc != ECORE_SUCCESS)
4468 		return rc;
4469 
4470 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4471 		DP_INFO(p_hwfn,
4472 			"The get_engine_config command is unsupported by the MFW\n");
4473 		return ECORE_NOTIMPL;
4474 	}
4475 
4476 	fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
4477 				  FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
4478 	if (fir_valid)
4479 		p_dev->fir_affin =
4480 			GET_MFW_FIELD(mb_params.mcp_param,
4481 				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
4482 
4483 	l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
4484 				 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
4485 	if (l2_valid)
4486 		p_dev->l2_affin_hint =
4487 			GET_MFW_FIELD(mb_params.mcp_param,
4488 				      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
4489 
4490 	DP_INFO(p_hwfn,
4491 		"Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
4492 		fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
4493 
4494 	return ECORE_SUCCESS;
4495 }
4496 
ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4497 enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4498 						struct ecore_ptt *p_ptt)
4499 {
4500 	struct ecore_dev *p_dev = p_hwfn->p_dev;
4501 	struct ecore_mcp_mb_params mb_params;
4502 	enum _ecore_status_t rc;
4503 
4504 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4505 	mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
4506 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4507 	if (rc != ECORE_SUCCESS)
4508 		return rc;
4509 
4510 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4511 		DP_INFO(p_hwfn,
4512 			"The get_ppfid_bitmap command is unsupported by the MFW\n");
4513 		return ECORE_NOTIMPL;
4514 	}
4515 
4516 	p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
4517 					    FW_MB_PARAM_PPFID_BITMAP);
4518 
4519 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
4520 		   p_dev->ppfid_bitmap);
4521 
4522 	return ECORE_SUCCESS;
4523 }
4524 
ecore_mcp_wol_wr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 offset,u32 val)4525 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4526 		      u32 offset, u32 val)
4527 {
4528 	enum _ecore_status_t	   rc = ECORE_SUCCESS;
4529 	u32			   dword = val;
4530 	struct ecore_mcp_mb_params mb_params;
4531 
4532 	OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
4533 	mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4534 	mb_params.param = offset;
4535 	mb_params.p_data_src = &dword;
4536 	mb_params.data_src_size = sizeof(dword);
4537 
4538 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4539 	if (rc != ECORE_SUCCESS) {
4540 		DP_NOTICE(p_hwfn, false,
4541 			  "Failed to wol write request, rc = %d\n", rc);
4542 	}
4543 
4544 	if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4545 		DP_NOTICE(p_hwfn, false,
4546 			  "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4547 			  val, offset, mb_params.mcp_resp);
4548 		rc = ECORE_UNKNOWN_ERROR;
4549 	}
4550 }
4551