xref: /dpdk/drivers/net/qede/qede_debug.c (revision 721ef3f54578e33905f75c7196439981dc264289)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2020 Marvell Semiconductor Inc.
3  * All rights reserved.
4  * www.marvell.com
5  */
6 
7 #include <rte_common.h>
8 #include "base/bcm_osal.h"
9 #include "base/ecore.h"
10 #include "base/ecore_cxt.h"
11 #include "base/ecore_hsi_common.h"
12 #include "base/ecore_hw.h"
13 #include "base/ecore_mcp.h"
14 #include "base/reg_addr.h"
15 #include "qede_debug.h"
16 
17 /* Memory groups enum */
18 enum mem_groups {
19 	MEM_GROUP_PXP_MEM,
20 	MEM_GROUP_DMAE_MEM,
21 	MEM_GROUP_CM_MEM,
22 	MEM_GROUP_QM_MEM,
23 	MEM_GROUP_DORQ_MEM,
24 	MEM_GROUP_BRB_RAM,
25 	MEM_GROUP_BRB_MEM,
26 	MEM_GROUP_PRS_MEM,
27 	MEM_GROUP_SDM_MEM,
28 	MEM_GROUP_PBUF,
29 	MEM_GROUP_IOR,
30 	MEM_GROUP_RAM,
31 	MEM_GROUP_BTB_RAM,
32 	MEM_GROUP_RDIF_CTX,
33 	MEM_GROUP_TDIF_CTX,
34 	MEM_GROUP_CFC_MEM,
35 	MEM_GROUP_CONN_CFC_MEM,
36 	MEM_GROUP_CAU_PI,
37 	MEM_GROUP_CAU_MEM,
38 	MEM_GROUP_CAU_MEM_EXT,
39 	MEM_GROUP_PXP_ILT,
40 	MEM_GROUP_MULD_MEM,
41 	MEM_GROUP_BTB_MEM,
42 	MEM_GROUP_IGU_MEM,
43 	MEM_GROUP_IGU_MSIX,
44 	MEM_GROUP_CAU_SB,
45 	MEM_GROUP_BMB_RAM,
46 	MEM_GROUP_BMB_MEM,
47 	MEM_GROUP_TM_MEM,
48 	MEM_GROUP_TASK_CFC_MEM,
49 	MEM_GROUPS_NUM
50 };
51 
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 	"PXP_MEM",
55 	"DMAE_MEM",
56 	"CM_MEM",
57 	"QM_MEM",
58 	"DORQ_MEM",
59 	"BRB_RAM",
60 	"BRB_MEM",
61 	"PRS_MEM",
62 	"SDM_MEM",
63 	"PBUF",
64 	"IOR",
65 	"RAM",
66 	"BTB_RAM",
67 	"RDIF_CTX",
68 	"TDIF_CTX",
69 	"CFC_MEM",
70 	"CONN_CFC_MEM",
71 	"CAU_PI",
72 	"CAU_MEM",
73 	"CAU_MEM_EXT",
74 	"PXP_ILT",
75 	"MULD_MEM",
76 	"BTB_MEM",
77 	"IGU_MEM",
78 	"IGU_MSIX",
79 	"CAU_SB",
80 	"BMB_RAM",
81 	"BMB_MEM",
82 	"TM_MEM",
83 	"TASK_CFC_MEM",
84 };
85 
86 /* Idle check conditions */
87 
cond5(const u32 * r,const u32 * imm)88 static u32 cond5(const u32 *r, const u32 *imm)
89 {
90 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
91 }
92 
cond7(const u32 * r,const u32 * imm)93 static u32 cond7(const u32 *r, const u32 *imm)
94 {
95 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
96 }
97 
cond6(const u32 * r,const u32 * imm)98 static u32 cond6(const u32 *r, const u32 *imm)
99 {
100 	return (r[0] & imm[0]) != imm[1];
101 }
102 
cond9(const u32 * r,const u32 * imm)103 static u32 cond9(const u32 *r, const u32 *imm)
104 {
105 	return ((r[0] & imm[0]) >> imm[1]) !=
106 		(((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
107 }
108 
cond10(const u32 * r,const u32 * imm)109 static u32 cond10(const u32 *r, const u32 *imm)
110 {
111 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
112 }
113 
cond4(const u32 * r,const u32 * imm)114 static u32 cond4(const u32 *r, const u32 *imm)
115 {
116 	return (r[0] & ~imm[0]) != imm[1];
117 }
118 
cond0(const u32 * r,const u32 * imm)119 static u32 cond0(const u32 *r, const u32 *imm)
120 {
121 	return (r[0] & ~r[1]) != imm[0];
122 }
123 
cond1(const u32 * r,const u32 * imm)124 static u32 cond1(const u32 *r, const u32 *imm)
125 {
126 	return r[0] != imm[0];
127 }
128 
cond11(const u32 * r,const u32 * imm)129 static u32 cond11(const u32 *r, const u32 *imm)
130 {
131 	return r[0] != r[1] && r[2] == imm[0];
132 }
133 
cond12(const u32 * r,const u32 * imm)134 static u32 cond12(const u32 *r, const u32 *imm)
135 {
136 	return r[0] != r[1] && r[2] > imm[0];
137 }
138 
cond3(const u32 * r,const __rte_unused u32 * imm)139 static u32 cond3(const u32 *r, const __rte_unused u32 *imm)
140 {
141 	return r[0] != r[1];
142 }
143 
cond13(const u32 * r,const u32 * imm)144 static u32 cond13(const u32 *r, const u32 *imm)
145 {
146 	return r[0] & imm[0];
147 }
148 
cond8(const u32 * r,const u32 * imm)149 static u32 cond8(const u32 *r, const u32 *imm)
150 {
151 	return r[0] < (r[1] - imm[0]);
152 }
153 
cond2(const u32 * r,const u32 * imm)154 static u32 cond2(const u32 *r, const u32 *imm)
155 {
156 	return r[0] > imm[0];
157 }
158 
159 /* Array of Idle Check conditions */
160 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
161 	cond0,
162 	cond1,
163 	cond2,
164 	cond3,
165 	cond4,
166 	cond5,
167 	cond6,
168 	cond7,
169 	cond8,
170 	cond9,
171 	cond10,
172 	cond11,
173 	cond12,
174 	cond13,
175 };
176 
177 #define NUM_PHYS_BLOCKS 84
178 
179 #define NUM_DBG_RESET_REGS 8
180 
181 /******************************* Data Types **********************************/
182 
183 enum hw_types {
184 	HW_TYPE_ASIC,
185 	PLATFORM_RESERVED,
186 	PLATFORM_RESERVED2,
187 	PLATFORM_RESERVED3,
188 	PLATFORM_RESERVED4,
189 	MAX_HW_TYPES
190 };
191 
192 /* CM context types */
193 enum cm_ctx_types {
194 	CM_CTX_CONN_AG,
195 	CM_CTX_CONN_ST,
196 	CM_CTX_TASK_AG,
197 	CM_CTX_TASK_ST,
198 	NUM_CM_CTX_TYPES
199 };
200 
201 /* Debug bus frame modes */
202 enum dbg_bus_frame_modes {
203 	DBG_BUS_FRAME_MODE_4ST = 0,	/* 4 Storm dwords (no HW) */
204 	DBG_BUS_FRAME_MODE_2ST_2HW = 1,	/* 2 Storm dwords, 2 HW dwords */
205 	DBG_BUS_FRAME_MODE_1ST_3HW = 2,	/* 1 Storm dwords, 3 HW dwords */
206 	DBG_BUS_FRAME_MODE_4HW = 3,	/* 4 HW dwords (no Storms) */
207 	DBG_BUS_FRAME_MODE_8HW = 4,	/* 8 HW dwords (no Storms) */
208 	DBG_BUS_NUM_FRAME_MODES
209 };
210 
211 /* Chip constant definitions */
212 struct chip_defs {
213 	const char *name;
214 	u32 num_ilt_pages;
215 };
216 
217 /* HW type constant definitions */
218 struct hw_type_defs {
219 	const char *name;
220 	u32 delay_factor;
221 	u32 dmae_thresh;
222 	u32 log_thresh;
223 };
224 
225 /* RBC reset definitions */
226 struct rbc_reset_defs {
227 	u32 reset_reg_addr;
228 	u32 reset_val[MAX_CHIP_IDS];
229 };
230 
231 /* Storm constant definitions.
232  * Addresses are in bytes, sizes are in quad-regs.
233  */
234 struct storm_defs {
235 	char letter;
236 	enum block_id sem_block_id;
237 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 	bool has_vfc;
239 	u32 sem_fast_mem_addr;
240 	u32 sem_frame_mode_addr;
241 	u32 sem_slow_enable_addr;
242 	u32 sem_slow_mode_addr;
243 	u32 sem_slow_mode1_conf_addr;
244 	u32 sem_sync_dbg_empty_addr;
245 	u32 sem_gpre_vect_addr;
246 	u32 cm_ctx_wr_addr;
247 	u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
248 	u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
249 };
250 
251 /* Debug Bus Constraint operation constant definitions */
252 struct dbg_bus_constraint_op_defs {
253 	u8 hw_op_val;
254 	bool is_cyclic;
255 };
256 
257 /* Storm Mode definitions */
258 struct storm_mode_defs {
259 	const char *name;
260 	bool is_fast_dbg;
261 	u8 id_in_hw;
262 	u32 src_disable_reg_addr;
263 	u32 src_enable_val;
264 	bool exists[MAX_CHIP_IDS];
265 };
266 
267 struct grc_param_defs {
268 	u32 default_val[MAX_CHIP_IDS];
269 	u32 min;
270 	u32 max;
271 	bool is_preset;
272 	bool is_persistent;
273 	u32 exclude_all_preset_val;
274 	u32 crash_preset_val[MAX_CHIP_IDS];
275 };
276 
277 /* Address is in 128b units. Width is in bits. */
278 struct rss_mem_defs {
279 	const char *mem_name;
280 	const char *type_name;
281 	u32 addr;
282 	u32 entry_width;
283 	u32 num_entries[MAX_CHIP_IDS];
284 };
285 
286 struct vfc_ram_defs {
287 	const char *mem_name;
288 	const char *type_name;
289 	u32 base_row;
290 	u32 num_rows;
291 };
292 
293 struct big_ram_defs {
294 	const char *instance_name;
295 	enum mem_groups mem_group_id;
296 	enum mem_groups ram_mem_group_id;
297 	enum dbg_grc_params grc_param;
298 	u32 addr_reg_addr;
299 	u32 data_reg_addr;
300 	u32 is_256b_reg_addr;
301 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
302 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
303 };
304 
305 struct phy_defs {
306 	const char *phy_name;
307 
308 	/* PHY base GRC address */
309 	u32 base_addr;
310 
311 	/* Relative address of indirect TBUS address register (bits 0..7) */
312 	u32 tbus_addr_lo_addr;
313 
314 	/* Relative address of indirect TBUS address register (bits 8..10) */
315 	u32 tbus_addr_hi_addr;
316 
317 	/* Relative address of indirect TBUS data register (bits 0..7) */
318 	u32 tbus_data_lo_addr;
319 
320 	/* Relative address of indirect TBUS data register (bits 8..11) */
321 	u32 tbus_data_hi_addr;
322 };
323 
324 /* Split type definitions */
325 struct split_type_defs {
326 	const char *name;
327 };
328 
329 /******************************** Constants **********************************/
330 
331 #define BYTES_IN_DWORD			sizeof(u32)
332 /* In the macros below, size and offset are specified in bits */
333 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
334 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
335 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
336 #define FIELD_DWORD_OFFSET(type, field) \
337 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
338 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
339 #define FIELD_BIT_MASK(type, field) \
340 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
341 	 FIELD_DWORD_SHIFT(type, field))
342 
343 #define SET_VAR_FIELD(var, type, field, val) \
344 	do { \
345 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
346 		(~FIELD_BIT_MASK(type, field));	\
347 		var[FIELD_DWORD_OFFSET(type, field)] |= \
348 		(val) << FIELD_DWORD_SHIFT(type, field); \
349 	} while (0)
350 
351 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
352 	do { \
353 		for (i = 0; i < (arr_size); i++) \
354 			ecore_wr(dev, ptt, addr,	(arr)[i]); \
355 	} while (0)
356 
357 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
358 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
359 
360 /* extra lines include a signature line + optional latency events line */
361 #define NUM_EXTRA_DBG_LINES(block) \
362 	(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
363 #define NUM_DBG_LINES(block) \
364 	((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
365 
366 #define USE_DMAE			true
367 #define PROTECT_WIDE_BUS		true
368 
369 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
370 #define RAM_LINES_TO_BYTES(lines) \
371 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372 
373 #define REG_DUMP_LEN_SHIFT		24
374 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
375 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376 
377 #define IDLE_CHK_RULE_SIZE_DWORDS \
378 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379 
380 #define IDLE_CHK_RESULT_HDR_DWORDS \
381 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382 
383 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
384 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385 
386 #define PAGE_MEM_DESC_SIZE_DWORDS \
387 	BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
388 
389 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
390 
391 /* The sizes and offsets below are specified in bits */
392 #define VFC_CAM_CMD_STRUCT_SIZE		64
393 #define VFC_CAM_CMD_ROW_OFFSET		48
394 #define VFC_CAM_CMD_ROW_SIZE		9
395 #define VFC_CAM_ADDR_STRUCT_SIZE	16
396 #define VFC_CAM_ADDR_OP_OFFSET		0
397 #define VFC_CAM_ADDR_OP_SIZE		4
398 #define VFC_CAM_RESP_STRUCT_SIZE	256
399 #define VFC_RAM_ADDR_STRUCT_SIZE	16
400 #define VFC_RAM_ADDR_OP_OFFSET		0
401 #define VFC_RAM_ADDR_OP_SIZE		2
402 #define VFC_RAM_ADDR_ROW_OFFSET		2
403 #define VFC_RAM_ADDR_ROW_SIZE		10
404 #define VFC_RAM_RESP_STRUCT_SIZE	256
405 
406 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
407 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
408 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
409 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
410 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
411 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
412 
413 #define NUM_VFC_RAM_TYPES		4
414 
415 #define VFC_CAM_NUM_ROWS		512
416 
417 #define VFC_OPCODE_CAM_RD		14
418 #define VFC_OPCODE_RAM_RD		0
419 
420 #define NUM_RSS_MEM_TYPES		5
421 
422 #define NUM_BIG_RAM_TYPES		3
423 #define BIG_RAM_NAME_LEN		3
424 
425 #define NUM_PHY_TBUS_ADDRESSES		2048
426 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
427 
428 #define RESET_REG_UNRESET_OFFSET	4
429 
430 #define STALL_DELAY_MS			500
431 
432 #define STATIC_DEBUG_LINE_DWORDS	9
433 
434 #define NUM_COMMON_GLOBAL_PARAMS	11
435 
436 #define MAX_RECURSION_DEPTH		10
437 
438 #define FW_IMG_MAIN			1
439 
440 #define REG_FIFO_ELEMENT_DWORDS		2
441 #define REG_FIFO_DEPTH_ELEMENTS		32
442 #define REG_FIFO_DEPTH_DWORDS \
443 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
444 
445 #define IGU_FIFO_ELEMENT_DWORDS		4
446 #define IGU_FIFO_DEPTH_ELEMENTS		64
447 #define IGU_FIFO_DEPTH_DWORDS \
448 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
449 
450 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
451 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
452 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
453 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
454 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
455 
456 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
457 	(MCP_REG_SCRATCH + \
458 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
459 
460 #define MAX_SW_PLTAFORM_STR_SIZE	64
461 
462 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
463 #define EMPTY_FW_IMAGE_STR		"???????????????"
464 
465 /***************************** Constant Arrays *******************************/
466 
467 /* Chip constant definitions array */
468 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
469 	{"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
470 	{"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
471 };
472 
473 /* Storm constant definitions array */
474 static struct storm_defs s_storm_defs[] = {
475 	/* Tstorm */
476 	{'T', BLOCK_TSEM,
477 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
478 		true,
479 		TSEM_REG_FAST_MEMORY,
480 		TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
481 		TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
482 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
483 		TCM_REG_CTX_RBC_ACCS,
484 		{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
485 		 TCM_REG_SM_TASK_CTX},
486 		{{4, 16, 2, 4}, {4, 16, 2, 4} } /* {bb} {k2} */
487 	},
488 
489 	/* Mstorm */
490 	{'M', BLOCK_MSEM,
491 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
492 		false,
493 		MSEM_REG_FAST_MEMORY,
494 		MSEM_REG_DBG_FRAME_MODE,
495 		MSEM_REG_SLOW_DBG_ACTIVE,
496 		MSEM_REG_SLOW_DBG_MODE,
497 		MSEM_REG_DBG_MODE1_CFG,
498 		MSEM_REG_SYNC_DBG_EMPTY,
499 		MSEM_REG_DBG_GPRE_VECT,
500 		MCM_REG_CTX_RBC_ACCS,
501 		{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
502 		 MCM_REG_SM_TASK_CTX },
503 		{{1, 10, 2, 7}, {1, 10, 2, 7} } /* {bb} {k2}*/
504 	},
505 
506 	/* Ustorm */
507 	{'U', BLOCK_USEM,
508 		{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
509 		false,
510 		USEM_REG_FAST_MEMORY,
511 		USEM_REG_DBG_FRAME_MODE,
512 		USEM_REG_SLOW_DBG_ACTIVE,
513 		USEM_REG_SLOW_DBG_MODE,
514 		USEM_REG_DBG_MODE1_CFG,
515 		USEM_REG_SYNC_DBG_EMPTY,
516 		USEM_REG_DBG_GPRE_VECT,
517 		UCM_REG_CTX_RBC_ACCS,
518 		{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
519 		 UCM_REG_SM_TASK_CTX},
520 		{{2, 13, 3, 3}, {2, 13, 3, 3} } /* {bb} {k2} */
521 	},
522 
523 	/* Xstorm */
524 	{'X', BLOCK_XSEM,
525 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
526 		false,
527 		XSEM_REG_FAST_MEMORY,
528 		XSEM_REG_DBG_FRAME_MODE,
529 		XSEM_REG_SLOW_DBG_ACTIVE,
530 		XSEM_REG_SLOW_DBG_MODE,
531 		XSEM_REG_DBG_MODE1_CFG,
532 		XSEM_REG_SYNC_DBG_EMPTY,
533 		XSEM_REG_DBG_GPRE_VECT,
534 		XCM_REG_CTX_RBC_ACCS,
535 		{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
536 		{{9, 15, 0, 0}, {9, 15,	0, 0} } /* {bb} {k2} */
537 	},
538 
539 	/* Ystorm */
540 	{'Y', BLOCK_YSEM,
541 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
542 		false,
543 		YSEM_REG_FAST_MEMORY,
544 		YSEM_REG_DBG_FRAME_MODE,
545 		YSEM_REG_SLOW_DBG_ACTIVE,
546 		YSEM_REG_SLOW_DBG_MODE,
547 		YSEM_REG_DBG_MODE1_CFG,
548 		YSEM_REG_SYNC_DBG_EMPTY,
549 		YSEM_REG_DBG_GPRE_VECT,
550 		YCM_REG_CTX_RBC_ACCS,
551 		{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
552 		 YCM_REG_SM_TASK_CTX},
553 		{{2, 3, 2, 12}, {2, 3, 2, 12} } /* {bb} {k2} */
554 	},
555 
556 	/* Pstorm */
557 	{'P', BLOCK_PSEM,
558 		{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
559 		true,
560 		PSEM_REG_FAST_MEMORY,
561 		PSEM_REG_DBG_FRAME_MODE,
562 		PSEM_REG_SLOW_DBG_ACTIVE,
563 		PSEM_REG_SLOW_DBG_MODE,
564 		PSEM_REG_DBG_MODE1_CFG,
565 		PSEM_REG_SYNC_DBG_EMPTY,
566 		PSEM_REG_DBG_GPRE_VECT,
567 		PCM_REG_CTX_RBC_ACCS,
568 		{0, PCM_REG_SM_CON_CTX, 0, 0},
569 		{{0, 10, 0, 0}, {0, 10, 0, 0} } /* {bb} {k2} */
570 	},
571 };
572 
573 static struct hw_type_defs s_hw_type_defs[] = {
574 	/* HW_TYPE_ASIC */
575 	{"asic", 1, 256, 32768},
576 	{"reserved", 0, 0, 0},
577 	{"reserved2", 0, 0, 0},
578 	{"reserved3", 0, 0, 0}
579 };
580 
581 static struct grc_param_defs s_grc_param_defs[] = {
582 	/* DBG_GRC_PARAM_DUMP_TSTORM */
583 	{{1, 1}, 0, 1, false, false, 1, {1, 1} },
584 
585 	/* DBG_GRC_PARAM_DUMP_MSTORM */
586 	{{1, 1}, 0, 1, false, false, 1, {1, 1} },
587 
588 	/* DBG_GRC_PARAM_DUMP_USTORM */
589 	{{1, 1}, 0, 1, false, false, 1, {1, 1} },
590 
591 	/* DBG_GRC_PARAM_DUMP_XSTORM */
592 	{{1, 1}, 0, 1, false, false, 1, {1, 1} },
593 
594 	/* DBG_GRC_PARAM_DUMP_YSTORM */
595 	{{1, 1}, 0, 1, false, false, 1, {1, 1} },
596 
597 	/* DBG_GRC_PARAM_DUMP_PSTORM */
598 	{{1, 1}, 0, 1, false, false, 1, {1, 1} },
599 
600 	/* DBG_GRC_PARAM_DUMP_REGS */
601 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
602 
603 	/* DBG_GRC_PARAM_DUMP_RAM */
604 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
605 
606 	/* DBG_GRC_PARAM_DUMP_PBUF */
607 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
608 
609 	/* DBG_GRC_PARAM_DUMP_IOR */
610 	{{0, 0}, 0, 1, false, false, 0, {1, 1} },
611 
612 	/* DBG_GRC_PARAM_DUMP_VFC */
613 	{{0, 0}, 0, 1, false, false, 0, {1, 1} },
614 
615 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
616 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
617 
618 	/* DBG_GRC_PARAM_DUMP_ILT */
619 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
620 
621 	/* DBG_GRC_PARAM_DUMP_RSS */
622 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
623 
624 	/* DBG_GRC_PARAM_DUMP_CAU */
625 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
626 
627 	/* DBG_GRC_PARAM_DUMP_QM */
628 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
629 
630 	/* DBG_GRC_PARAM_DUMP_MCP */
631 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
632 
633 	/* DBG_GRC_PARAM_DUMP_DORQ */
634 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
635 
636 	/* DBG_GRC_PARAM_DUMP_CFC */
637 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
638 
639 	/* DBG_GRC_PARAM_DUMP_IGU */
640 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
641 
642 	/* DBG_GRC_PARAM_DUMP_BRB */
643 	{{0, 0}, 0, 1, false, false, 0, {1, 1} },
644 
645 	/* DBG_GRC_PARAM_DUMP_BTB */
646 	{{0, 0}, 0, 1, false, false, 0, {1, 1} },
647 
648 	/* DBG_GRC_PARAM_DUMP_BMB */
649 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
650 
651 	/* DBG_GRC_PARAM_RESERVED1 */
652 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
653 
654 	/* DBG_GRC_PARAM_DUMP_MULD */
655 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
656 
657 	/* DBG_GRC_PARAM_DUMP_PRS */
658 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
659 
660 	/* DBG_GRC_PARAM_DUMP_DMAE */
661 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
662 
663 	/* DBG_GRC_PARAM_DUMP_TM */
664 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
665 
666 	/* DBG_GRC_PARAM_DUMP_SDM */
667 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
668 
669 	/* DBG_GRC_PARAM_DUMP_DIF */
670 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
671 
672 	/* DBG_GRC_PARAM_DUMP_STATIC */
673 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
674 
675 	/* DBG_GRC_PARAM_UNSTALL */
676 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
677 
678 	/* DBG_GRC_PARAM_RESERVED2 */
679 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
680 
681 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
682 	{{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0} },
683 
684 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
685 	{{0, 0}, 0, 1, true, false, 0, {0, 0} },
686 
687 	/* DBG_GRC_PARAM_CRASH */
688 	{{0, 0}, 0, 1, true, false, 0, {0, 0} },
689 
690 	/* DBG_GRC_PARAM_PARITY_SAFE */
691 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
692 
693 	/* DBG_GRC_PARAM_DUMP_CM */
694 	{{1, 1}, 0, 1, false, false, 0, {1, 1} },
695 
696 	/* DBG_GRC_PARAM_DUMP_PHY */
697 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
698 
699 	/* DBG_GRC_PARAM_NO_MCP */
700 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
701 
702 	/* DBG_GRC_PARAM_NO_FW_VER */
703 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
704 
705 	/* DBG_GRC_PARAM_RESERVED3 */
706 	{{0, 0}, 0, 1, false, false, 0, {0, 0} },
707 
708 	/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
709 	{{0, 1}, 0, 1, false, false, 0, {0, 1} },
710 
711 	/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
712 	{{1, 1}, 0, 1, false, false, 0, {0, 0} },
713 
714 	/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
715 	{{1, 1}, 0, 1, false, false, 0, {0, 0} },
716 
717 	/* DBG_GRC_PARAM_DUMP_CAU_EXT */
718 	{{0, 0}, 0, 1, false, false, 0, {1, 1} }
719 };
720 
721 static struct rss_mem_defs s_rss_mem_defs[] = {
722 	{"rss_mem_cid", "rss_cid", 0, 32,
723 	 {256, 320} },
724 
725 	{"rss_mem_key_msb", "rss_key", 1024, 256,
726 	 {128, 208} },
727 
728 	{"rss_mem_key_lsb", "rss_key", 2048, 64,
729 	 {128, 208} },
730 
731 	{"rss_mem_info", "rss_info", 3072, 16,
732 	 {128, 208} },
733 
734 	{"rss_mem_ind", "rss_ind", 4096, 16,
735 	 {16384, 26624} }
736 };
737 
738 static struct vfc_ram_defs s_vfc_ram_defs[] = {
739 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
740 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
741 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
742 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
743 };
744 
745 static struct big_ram_defs s_big_ram_defs[] = {
746 	{"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
747 	 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
748 	 MISC_REG_BLOCK_256B_EN, {0, 0},
749 	 {153600, 180224} },
750 
751 	{"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
752 	 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
753 	 MISC_REG_BLOCK_256B_EN, {0, 1},
754 	 {92160, 117760} },
755 
756 	{"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
757 	 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
758 	 MISCS_REG_BLOCK_256B_EN, {0, 0},
759 	 {36864, 36864} }
760 };
761 
762 static struct rbc_reset_defs s_rbc_reset_defs[] = {
763 	{MISCS_REG_RESET_PL_HV,
764 	 {0x0, 0x400} },
765 	{MISC_REG_RESET_PL_PDA_VMAIN_1,
766 	 {0x4404040, 0x4404040} },
767 	{MISC_REG_RESET_PL_PDA_VMAIN_2,
768 	 {0x7, 0x7c00007} },
769 	{MISC_REG_RESET_PL_PDA_VAUX,
770 	 {0x2, 0x2} },
771 };
772 
773 static struct phy_defs s_phy_defs[] = {
774 	{"nw_phy", NWS_REG_NWS_CMU_K2,
775 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
776 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
777 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
778 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
779 	{"sgmii_phy", MS_REG_MS_CMU_K2,
780 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
781 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
782 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
783 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
784 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
785 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
786 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
787 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
788 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
789 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
790 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
791 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
792 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
793 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
794 };
795 
796 static struct split_type_defs s_split_type_defs[] = {
797 	/* SPLIT_TYPE_NONE */
798 	{"eng"},
799 
800 	/* SPLIT_TYPE_PORT */
801 	{"port"},
802 
803 	/* SPLIT_TYPE_PF */
804 	{"pf"},
805 
806 	/* SPLIT_TYPE_PORT_PF */
807 	{"port"},
808 
809 	/* SPLIT_TYPE_VF */
810 	{"vf"}
811 };
812 
813 /******************************** Variables *********************************/
814 
815 /**
816  * The version of the calling app
817  */
818 static u32 s_app_ver;
819 
820 /**************************** Private Functions ******************************/
821 
822 /* Reads and returns a single dword from the specified unaligned buffer */
qed_read_unaligned_dword(u8 * buf)823 static u32 qed_read_unaligned_dword(u8 *buf)
824 {
825 	u32 dword;
826 
827 	memcpy((u8 *)&dword, buf, sizeof(dword));
828 	return dword;
829 }
830 
831 /* Sets the value of the specified GRC param */
qed_grc_set_param(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)832 static void qed_grc_set_param(struct ecore_hwfn *p_hwfn,
833 			      enum dbg_grc_params grc_param, u32 val)
834 {
835 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
836 
837 	dev_data->grc.param_val[grc_param] = val;
838 }
839 
840 /* Returns the value of the specified GRC param */
qed_grc_get_param(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param)841 static u32 qed_grc_get_param(struct ecore_hwfn *p_hwfn,
842 			     enum dbg_grc_params grc_param)
843 {
844 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
845 
846 	return dev_data->grc.param_val[grc_param];
847 }
848 
849 /* Initializes the GRC parameters */
qed_dbg_grc_init_params(struct ecore_hwfn * p_hwfn)850 static void qed_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
851 {
852 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
853 
854 	if (!dev_data->grc.params_initialized) {
855 		qed_dbg_grc_set_params_default(p_hwfn);
856 		dev_data->grc.params_initialized = 1;
857 	}
858 }
859 
860 /* Sets pointer and size for the specified binary buffer type */
qed_set_dbg_bin_buf(struct ecore_hwfn * p_hwfn,enum bin_dbg_buffer_type buf_type,const u32 * ptr,u32 size)861 static void qed_set_dbg_bin_buf(struct ecore_hwfn *p_hwfn,
862 				enum bin_dbg_buffer_type buf_type,
863 				const u32 *ptr, u32 size)
864 {
865 	struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
866 
867 	buf->ptr = (void *)(osal_uintptr_t)ptr;
868 	buf->size = size;
869 }
870 
871 /* Initializes debug data for the specified device */
qed_dbg_dev_init(struct ecore_hwfn * p_hwfn)872 static enum dbg_status qed_dbg_dev_init(struct ecore_hwfn *p_hwfn)
873 {
874 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
875 	u8 num_pfs = 0, max_pfs_per_port = 0;
876 
877 	if (dev_data->initialized)
878 		return DBG_STATUS_OK;
879 
880 	/* Set chip */
881 	if (ECORE_IS_K2(p_hwfn->p_dev)) {
882 		dev_data->chip_id = CHIP_K2;
883 		dev_data->mode_enable[MODE_K2] = 1;
884 		dev_data->num_vfs = MAX_NUM_VFS_K2;
885 		num_pfs = MAX_NUM_PFS_K2;
886 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
887 	} else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
888 		dev_data->chip_id = CHIP_BB;
889 		dev_data->mode_enable[MODE_BB] = 1;
890 		dev_data->num_vfs = MAX_NUM_VFS_BB;
891 		num_pfs = MAX_NUM_PFS_BB;
892 		max_pfs_per_port = MAX_NUM_PFS_BB;
893 	} else {
894 		return DBG_STATUS_UNKNOWN_CHIP;
895 	}
896 
897 	/* Set HW type */
898 	dev_data->hw_type = HW_TYPE_ASIC;
899 	dev_data->mode_enable[MODE_ASIC] = 1;
900 
901 	/* Set port mode */
902 	switch (p_hwfn->p_dev->num_ports_in_engine) {
903 	case 1:
904 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
905 		break;
906 	case 2:
907 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
908 		break;
909 	case 4:
910 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
911 		break;
912 	}
913 
914 	/* Set 100G mode */
915 	if (ECORE_IS_CMT(p_hwfn->p_dev))
916 		dev_data->mode_enable[MODE_100G] = 1;
917 
918 	/* Set number of ports */
919 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
920 	    dev_data->mode_enable[MODE_100G])
921 		dev_data->num_ports = 1;
922 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
923 		dev_data->num_ports = 2;
924 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
925 		dev_data->num_ports = 4;
926 
927 	/* Set number of PFs per port */
928 	dev_data->num_pfs_per_port = OSAL_MIN_T(u32,
929 						num_pfs / dev_data->num_ports,
930 						max_pfs_per_port);
931 
932 	/* Initializes the GRC parameters */
933 	qed_dbg_grc_init_params(p_hwfn);
934 
935 	dev_data->use_dmae = true;
936 	dev_data->initialized = 1;
937 
938 	return DBG_STATUS_OK;
939 }
940 
get_dbg_block(struct ecore_hwfn * p_hwfn,enum block_id block_id)941 static const struct dbg_block *get_dbg_block(struct ecore_hwfn *p_hwfn,
942 					     enum block_id block_id)
943 {
944 	const struct dbg_block *dbg_block;
945 
946 	dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
947 	return dbg_block + block_id;
948 }
949 
qed_get_dbg_block_per_chip(struct ecore_hwfn * p_hwfn,enum block_id block_id)950 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct ecore_hwfn
951 							       *p_hwfn,
952 							       enum block_id
953 							       block_id)
954 {
955 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
956 
957 	return (const struct dbg_block_chip *)
958 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
959 	    block_id * MAX_CHIP_IDS + dev_data->chip_id;
960 }
961 
qed_get_dbg_reset_reg(struct ecore_hwfn * p_hwfn,u8 reset_reg_id)962 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct ecore_hwfn
963 							 *p_hwfn,
964 							 u8 reset_reg_id)
965 {
966 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
967 
968 	return (const struct dbg_reset_reg *)
969 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
970 	    reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
971 }
972 
973 /* Reads the FW info structure for the specified Storm from the chip,
974  * and writes it to the specified fw_info pointer.
975  */
qed_read_storm_fw_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 storm_id,struct fw_info * fw_info)976 static void qed_read_storm_fw_info(struct ecore_hwfn *p_hwfn,
977 				   struct ecore_ptt *p_ptt,
978 				   u8 storm_id, struct fw_info *fw_info)
979 {
980 	struct storm_defs *storm = &s_storm_defs[storm_id];
981 	struct fw_info_location fw_info_location;
982 	u32 addr, i, *dest;
983 
984 	memset(&fw_info_location, 0, sizeof(fw_info_location));
985 	memset(fw_info, 0, sizeof(*fw_info));
986 
987 	/* Read first the address that points to fw_info location.
988 	 * The address is located in the last line of the Storm RAM.
989 	 */
990 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
991 	    DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
992 	    sizeof(fw_info_location);
993 
994 	dest = (u32 *)&fw_info_location;
995 
996 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
997 	     i++, addr += BYTES_IN_DWORD)
998 		dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
999 
1000 	/* Read FW version info from Storm RAM */
1001 	if (fw_info_location.size > 0 && fw_info_location.size <=
1002 	    sizeof(*fw_info)) {
1003 		addr = fw_info_location.grc_addr;
1004 		dest = (u32 *)fw_info;
1005 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1006 		     i++, addr += BYTES_IN_DWORD)
1007 			dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1008 	}
1009 }
1010 
1011 /* Dumps the specified string to the specified buffer.
1012  * Returns the dumped size in bytes.
1013  */
qed_dump_str(char * dump_buf,bool dump,const char * str)1014 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1015 {
1016 	if (dump)
1017 		strcpy(dump_buf, str);
1018 
1019 	return (u32)strlen(str) + 1;
1020 }
1021 
1022 /* Dumps zeros to align the specified buffer to dwords.
1023  * Returns the dumped size in bytes.
1024  */
qed_dump_align(char * dump_buf,bool dump,u32 byte_offset)1025 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1026 {
1027 	u8 offset_in_dword, align_size;
1028 
1029 	offset_in_dword = (u8)(byte_offset & 0x3);
1030 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1031 
1032 	if (dump && align_size)
1033 		memset(dump_buf, 0, align_size);
1034 
1035 	return align_size;
1036 }
1037 
1038 /* Writes the specified string param to the specified buffer.
1039  * Returns the dumped size in dwords.
1040  */
qed_dump_str_param(u32 * dump_buf,bool dump,const char * param_name,const char * param_val)1041 static u32 qed_dump_str_param(u32 *dump_buf,
1042 			      bool dump,
1043 			      const char *param_name, const char *param_val)
1044 {
1045 	char *char_buf = (char *)dump_buf;
1046 	u32 offset = 0;
1047 
1048 	/* Dump param name */
1049 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1050 
1051 	/* Indicate a string param value */
1052 	if (dump)
1053 		*(char_buf + offset) = 1;
1054 	offset++;
1055 
1056 	/* Dump param value */
1057 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1058 
1059 	/* Align buffer to next dword */
1060 	offset += qed_dump_align(char_buf + offset, dump, offset);
1061 
1062 	return BYTES_TO_DWORDS(offset);
1063 }
1064 
1065 /* Writes the specified numeric param to the specified buffer.
1066  * Returns the dumped size in dwords.
1067  */
qed_dump_num_param(u32 * dump_buf,bool dump,const char * param_name,u32 param_val)1068 static u32 qed_dump_num_param(u32 *dump_buf,
1069 			      bool dump, const char *param_name, u32 param_val)
1070 {
1071 	char *char_buf = (char *)dump_buf;
1072 	u32 offset = 0;
1073 
1074 	/* Dump param name */
1075 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1076 
1077 	/* Indicate a numeric param value */
1078 	if (dump)
1079 		*(char_buf + offset) = 0;
1080 	offset++;
1081 
1082 	/* Align buffer to next dword */
1083 	offset += qed_dump_align(char_buf + offset, dump, offset);
1084 
1085 	/* Dump param value (and change offset from bytes to dwords) */
1086 	offset = BYTES_TO_DWORDS(offset);
1087 	if (dump)
1088 		*(dump_buf + offset) = param_val;
1089 	offset++;
1090 
1091 	return offset;
1092 }
1093 
1094 /* Reads the FW version and writes it as a param to the specified buffer.
1095  * Returns the dumped size in dwords.
1096  */
qed_dump_fw_ver_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)1097 static u32 qed_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
1098 				 struct ecore_ptt *p_ptt,
1099 				 u32 *dump_buf, bool dump)
1100 {
1101 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1102 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1103 	struct fw_info fw_info = { {0}, {0} };
1104 	u32 offset = 0;
1105 
1106 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1107 		/* Read FW info from chip */
1108 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1109 
1110 		/* Create FW version/image strings */
1111 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1112 			     "%d_%d_%d_%d", fw_info.ver.num.major,
1113 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
1114 			     fw_info.ver.num.eng) < 0)
1115 			DP_NOTICE(p_hwfn, false,
1116 				  "Unexpected debug error: invalid FW version string\n");
1117 		switch (fw_info.ver.image_id) {
1118 		case FW_IMG_MAIN:
1119 			strcpy(fw_img_str, "main");
1120 			break;
1121 		default:
1122 			strcpy(fw_img_str, "unknown");
1123 			break;
1124 		}
1125 	}
1126 
1127 	/* Dump FW version, image and timestamp */
1128 	offset += qed_dump_str_param(dump_buf + offset,
1129 				     dump, "fw-version", fw_ver_str);
1130 	offset += qed_dump_str_param(dump_buf + offset,
1131 				     dump, "fw-image", fw_img_str);
1132 	offset += qed_dump_num_param(dump_buf + offset,
1133 				     dump,
1134 				     "fw-timestamp", fw_info.ver.timestamp);
1135 
1136 	return offset;
1137 }
1138 
1139 /* Reads the MFW version and writes it as a param to the specified buffer.
1140  * Returns the dumped size in dwords.
1141  */
qed_dump_mfw_ver_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)1142 static u32 qed_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
1143 				  struct ecore_ptt *p_ptt,
1144 				  u32 *dump_buf, bool dump)
1145 {
1146 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1147 
1148 	if (dump &&
1149 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1150 		u32 global_section_offsize, global_section_addr, mfw_ver;
1151 		u32 public_data_addr, global_section_offsize_addr;
1152 
1153 		/* Find MCP public data GRC address. Needs to be ORed with
1154 		 * MCP_REG_SCRATCH due to a HW bug.
1155 		 */
1156 		public_data_addr = ecore_rd(p_hwfn,
1157 					  p_ptt,
1158 					  MISC_REG_SHARED_MEM_ADDR) |
1159 				   MCP_REG_SCRATCH;
1160 
1161 		/* Find MCP public global section offset */
1162 		global_section_offsize_addr = public_data_addr +
1163 					      offsetof(struct mcp_public_data,
1164 						       sections) +
1165 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1166 		global_section_offsize = ecore_rd(p_hwfn, p_ptt,
1167 						global_section_offsize_addr);
1168 		global_section_addr =
1169 			MCP_REG_SCRATCH +
1170 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1171 
1172 		/* Read MFW version from MCP public global section */
1173 		mfw_ver = ecore_rd(p_hwfn, p_ptt,
1174 				 global_section_addr +
1175 				 offsetof(struct public_global, mfw_ver));
1176 
1177 		/* Dump MFW version param */
1178 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1179 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1180 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1181 			DP_NOTICE(p_hwfn, false,
1182 				  "Unexpected debug error: invalid MFW version string\n");
1183 	}
1184 
1185 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1186 }
1187 
1188 /* Reads the chip revision from the chip and writes it as a param to the
1189  * specified buffer. Returns the dumped size in dwords.
1190  */
qed_dump_chip_revision_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)1191 static u32 qed_dump_chip_revision_param(struct ecore_hwfn *p_hwfn,
1192 					struct ecore_ptt *p_ptt,
1193 					u32 *dump_buf, bool dump)
1194 {
1195 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1196 	char param_str[3] = "??";
1197 
1198 	if (dev_data->hw_type == HW_TYPE_ASIC) {
1199 		u32 chip_rev, chip_metal;
1200 
1201 		chip_rev = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1202 		chip_metal = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1203 
1204 		param_str[0] = 'a' + (u8)chip_rev;
1205 		param_str[1] = '0' + (u8)chip_metal;
1206 	}
1207 
1208 	return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1209 }
1210 
1211 /* Writes a section header to the specified buffer.
1212  * Returns the dumped size in dwords.
1213  */
qed_dump_section_hdr(u32 * dump_buf,bool dump,const char * name,u32 num_params)1214 static u32 qed_dump_section_hdr(u32 *dump_buf,
1215 				bool dump, const char *name, u32 num_params)
1216 {
1217 	return qed_dump_num_param(dump_buf, dump, name, num_params);
1218 }
1219 
1220 /* Writes the common global params to the specified buffer.
1221  * Returns the dumped size in dwords.
1222  */
qed_dump_common_global_params(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 num_specific_global_params)1223 static u32 qed_dump_common_global_params(struct ecore_hwfn *p_hwfn,
1224 					 struct ecore_ptt *p_ptt,
1225 					 u32 *dump_buf,
1226 					 bool dump,
1227 					 u8 num_specific_global_params)
1228 {
1229 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1230 	char sw_platform_str[MAX_SW_PLTAFORM_STR_SIZE];
1231 	u32 offset = 0;
1232 	u8 num_params;
1233 
1234 	/* Fill platform string */
1235 	ecore_set_platform_str(p_hwfn, sw_platform_str,
1236 			       MAX_SW_PLTAFORM_STR_SIZE);
1237 
1238 	/* Dump global params section header */
1239 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1240 		(dev_data->chip_id == CHIP_BB ? 1 : 0);
1241 	offset += qed_dump_section_hdr(dump_buf + offset,
1242 				       dump, "global_params", num_params);
1243 
1244 	/* Store params */
1245 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1246 	offset += qed_dump_mfw_ver_param(p_hwfn,
1247 					 p_ptt, dump_buf + offset, dump);
1248 	offset += qed_dump_chip_revision_param(p_hwfn,
1249 					       p_ptt, dump_buf + offset, dump);
1250 	offset += qed_dump_num_param(dump_buf + offset,
1251 				     dump, "tools-version", TOOLS_VERSION);
1252 	offset += qed_dump_str_param(dump_buf + offset,
1253 				     dump,
1254 				     "chip",
1255 				     s_chip_defs[dev_data->chip_id].name);
1256 	offset += qed_dump_str_param(dump_buf + offset,
1257 				     dump,
1258 				     "platform",
1259 				     s_hw_type_defs[dev_data->hw_type].name);
1260 	offset += qed_dump_str_param(dump_buf + offset,
1261 				     dump, "sw-platform", sw_platform_str);
1262 	offset += qed_dump_num_param(dump_buf + offset,
1263 				     dump, "pci-func", p_hwfn->abs_pf_id);
1264 	offset += qed_dump_num_param(dump_buf + offset,
1265 				     dump, "epoch", OSAL_GET_EPOCH(p_hwfn));
1266 	if (dev_data->chip_id == CHIP_BB)
1267 		offset += qed_dump_num_param(dump_buf + offset,
1268 					     dump, "path",
1269 					     ECORE_PATH_ID(p_hwfn));
1270 
1271 	return offset;
1272 }
1273 
1274 /* Writes the "last" section (including CRC) to the specified buffer at the
1275  * given offset. Returns the dumped size in dwords.
1276  */
qed_dump_last_section(u32 * dump_buf,u32 offset,bool dump)1277 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1278 {
1279 	u32 start_offset = offset;
1280 
1281 	/* Dump CRC section header */
1282 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1283 
1284 	/* Calculate CRC32 and add it to the dword after the "last" section */
1285 	if (dump)
1286 		*(dump_buf + offset) = ~OSAL_CRC32(0xffffffff,
1287 					      (u8 *)dump_buf,
1288 					      DWORDS_TO_BYTES(offset));
1289 
1290 	offset++;
1291 
1292 	return offset - start_offset;
1293 }
1294 
1295 /* Update blocks reset state  */
qed_update_blocks_reset_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1296 static void qed_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
1297 					  struct ecore_ptt *p_ptt)
1298 {
1299 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1300 	u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1301 	u8 rst_reg_id;
1302 	u32 blk_id;
1303 
1304 	/* Read reset registers */
1305 	for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1306 		const struct dbg_reset_reg *rst_reg;
1307 		bool rst_reg_removed;
1308 		u32 rst_reg_addr;
1309 
1310 		rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1311 		rst_reg_removed = GET_FIELD(rst_reg->data,
1312 					    DBG_RESET_REG_IS_REMOVED);
1313 		rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1314 							 DBG_RESET_REG_ADDR));
1315 
1316 		if (!rst_reg_removed)
1317 			reg_val[rst_reg_id] = ecore_rd(p_hwfn, p_ptt,
1318 						     rst_reg_addr);
1319 	}
1320 
1321 	/* Check if blocks are in reset */
1322 	for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1323 		const struct dbg_block_chip *blk;
1324 		bool has_rst_reg;
1325 		bool is_removed;
1326 
1327 		blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1328 		is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1329 		has_rst_reg = GET_FIELD(blk->flags,
1330 					DBG_BLOCK_CHIP_HAS_RESET_REG);
1331 
1332 		if (!is_removed && has_rst_reg)
1333 			dev_data->block_in_reset[blk_id] =
1334 			    !(reg_val[blk->reset_reg_id] &
1335 			      OSAL_BIT(blk->reset_reg_bit_offset));
1336 	}
1337 }
1338 
1339 /* is_mode_match recursive function */
qed_is_mode_match_rec(struct ecore_hwfn * p_hwfn,u16 * modes_buf_offset,u8 rec_depth)1340 static bool qed_is_mode_match_rec(struct ecore_hwfn *p_hwfn,
1341 				  u16 *modes_buf_offset, u8 rec_depth)
1342 {
1343 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1344 	const u8 *dbg_array;
1345 	bool arg1, arg2;
1346 	u8 tree_val;
1347 
1348 	if (rec_depth > MAX_RECURSION_DEPTH) {
1349 		DP_NOTICE(p_hwfn, false,
1350 			  "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1351 		return false;
1352 	}
1353 
1354 	/* Get next element from modes tree buffer */
1355 	dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1356 	tree_val = dbg_array[(*modes_buf_offset)++];
1357 
1358 	switch (tree_val) {
1359 	case INIT_MODE_OP_NOT:
1360 		return !qed_is_mode_match_rec(p_hwfn,
1361 					      modes_buf_offset, rec_depth + 1);
1362 	case INIT_MODE_OP_OR:
1363 	case INIT_MODE_OP_AND:
1364 		arg1 = qed_is_mode_match_rec(p_hwfn,
1365 					     modes_buf_offset, rec_depth + 1);
1366 		arg2 = qed_is_mode_match_rec(p_hwfn,
1367 					     modes_buf_offset, rec_depth + 1);
1368 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1369 							arg2) : (arg1 && arg2);
1370 	default:
1371 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1372 	}
1373 }
1374 
1375 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
qed_is_mode_match(struct ecore_hwfn * p_hwfn,u16 * modes_buf_offset)1376 static bool qed_is_mode_match(struct ecore_hwfn *p_hwfn, u16 *modes_buf_offset)
1377 {
1378 	return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1379 }
1380 
1381 /* Enable / disable the Debug block */
qed_bus_enable_dbg_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool enable)1382 static void qed_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
1383 				     struct ecore_ptt *p_ptt, bool enable)
1384 {
1385 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1386 }
1387 
1388 /* Resets the Debug block */
qed_bus_reset_dbg_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1389 static void qed_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
1390 				    struct ecore_ptt *p_ptt)
1391 {
1392 	u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1393 	const struct dbg_reset_reg *reset_reg;
1394 	const struct dbg_block_chip *block;
1395 
1396 	block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1397 	reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1398 	reset_reg_addr =
1399 	    DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1400 
1401 	old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, reset_reg_addr);
1402 	new_reset_reg_val =
1403 	    old_reset_reg_val & ~OSAL_BIT(block->reset_reg_bit_offset);
1404 
1405 	ecore_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1406 	ecore_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1407 }
1408 
1409 /* Enable / disable Debug Bus clients according to the specified mask
1410  * (1 = enable, 0 = disable).
1411  */
qed_bus_enable_clients(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 client_mask)1412 static void qed_bus_enable_clients(struct ecore_hwfn *p_hwfn,
1413 				   struct ecore_ptt *p_ptt, u32 client_mask)
1414 {
1415 	ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1416 }
1417 
qed_bus_config_dbg_line(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 line_id,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)1418 static void qed_bus_config_dbg_line(struct ecore_hwfn *p_hwfn,
1419 				    struct ecore_ptt *p_ptt,
1420 				    enum block_id block_id,
1421 				    u8 line_id,
1422 				    u8 enable_mask,
1423 				    u8 right_shift,
1424 				    u8 force_valid_mask, u8 force_frame_mask)
1425 {
1426 	const struct dbg_block_chip *block =
1427 		qed_get_dbg_block_per_chip(p_hwfn, block_id);
1428 
1429 	ecore_wr(p_hwfn, p_ptt,
1430 		 DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1431 		 line_id);
1432 	ecore_wr(p_hwfn, p_ptt,
1433 		 DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1434 		 enable_mask);
1435 	ecore_wr(p_hwfn, p_ptt,
1436 		 DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1437 		 right_shift);
1438 	ecore_wr(p_hwfn, p_ptt,
1439 		 DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1440 		 force_valid_mask);
1441 	ecore_wr(p_hwfn, p_ptt,
1442 		 DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1443 		 force_frame_mask);
1444 }
1445 
1446 /* Disable debug bus in all blocks */
qed_bus_disable_blocks(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1447 static void qed_bus_disable_blocks(struct ecore_hwfn *p_hwfn,
1448 				   struct ecore_ptt *p_ptt)
1449 {
1450 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1451 	u32 block_id;
1452 
1453 	/* Disable all blocks */
1454 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1455 		const struct dbg_block_chip *block_per_chip =
1456 		    qed_get_dbg_block_per_chip(p_hwfn,
1457 					       (enum block_id)block_id);
1458 
1459 		if (GET_FIELD(block_per_chip->flags,
1460 			      DBG_BLOCK_CHIP_IS_REMOVED) ||
1461 		    dev_data->block_in_reset[block_id])
1462 			continue;
1463 
1464 		/* Disable debug bus */
1465 		if (GET_FIELD(block_per_chip->flags,
1466 			      DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1467 			u32 dbg_en_addr =
1468 				block_per_chip->dbg_dword_enable_reg_addr;
1469 			u16 modes_buf_offset =
1470 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1471 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
1472 			bool eval_mode =
1473 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1474 				      DBG_MODE_HDR_EVAL_MODE) > 0;
1475 
1476 			if (!eval_mode ||
1477 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1478 				ecore_wr(p_hwfn, p_ptt,
1479 				       DWORDS_TO_BYTES(dbg_en_addr),
1480 				       0);
1481 		}
1482 	}
1483 }
1484 
1485 /* Returns true if the specified entity (indicated by GRC param) should be
1486  * included in the dump, false otherwise.
1487  */
qed_grc_is_included(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param)1488 static bool qed_grc_is_included(struct ecore_hwfn *p_hwfn,
1489 				enum dbg_grc_params grc_param)
1490 {
1491 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1492 }
1493 
1494 /* Returns the storm_id that matches the specified Storm letter,
1495  * or MAX_DBG_STORMS if invalid storm letter.
1496  */
qed_get_id_from_letter(char storm_letter)1497 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1498 {
1499 	u8 storm_id;
1500 
1501 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1502 		if (s_storm_defs[storm_id].letter == storm_letter)
1503 			return (enum dbg_storms)storm_id;
1504 
1505 	return MAX_DBG_STORMS;
1506 }
1507 
1508 /* Returns true of the specified Storm should be included in the dump, false
1509  * otherwise.
1510  */
qed_grc_is_storm_included(struct ecore_hwfn * p_hwfn,enum dbg_storms storm)1511 static bool qed_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
1512 				      enum dbg_storms storm)
1513 {
1514 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1515 }
1516 
1517 /* Returns true if the specified memory should be included in the dump, false
1518  * otherwise.
1519  */
qed_grc_is_mem_included(struct ecore_hwfn * p_hwfn,enum block_id block_id,u8 mem_group_id)1520 static bool qed_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
1521 				    enum block_id block_id, u8 mem_group_id)
1522 {
1523 	const struct dbg_block *block;
1524 	u8 i;
1525 
1526 	block = get_dbg_block(p_hwfn, block_id);
1527 
1528 	/* If the block is associated with a Storm, check Storm match */
1529 	if (block->associated_storm_letter) {
1530 		enum dbg_storms associated_storm_id =
1531 		    qed_get_id_from_letter(block->associated_storm_letter);
1532 
1533 		if (associated_storm_id == MAX_DBG_STORMS ||
1534 		    !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1535 			return false;
1536 	}
1537 
1538 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1539 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1540 
1541 		if (mem_group_id == big_ram->mem_group_id ||
1542 		    mem_group_id == big_ram->ram_mem_group_id)
1543 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1544 	}
1545 
1546 	switch (mem_group_id) {
1547 	case MEM_GROUP_PXP_ILT:
1548 	case MEM_GROUP_PXP_MEM:
1549 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1550 	case MEM_GROUP_RAM:
1551 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1552 	case MEM_GROUP_PBUF:
1553 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1554 	case MEM_GROUP_CAU_MEM:
1555 	case MEM_GROUP_CAU_SB:
1556 	case MEM_GROUP_CAU_PI:
1557 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1558 	case MEM_GROUP_CAU_MEM_EXT:
1559 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1560 	case MEM_GROUP_QM_MEM:
1561 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1562 	case MEM_GROUP_CFC_MEM:
1563 	case MEM_GROUP_CONN_CFC_MEM:
1564 	case MEM_GROUP_TASK_CFC_MEM:
1565 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1566 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1567 	case MEM_GROUP_DORQ_MEM:
1568 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1569 	case MEM_GROUP_IGU_MEM:
1570 	case MEM_GROUP_IGU_MSIX:
1571 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1572 	case MEM_GROUP_MULD_MEM:
1573 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1574 	case MEM_GROUP_PRS_MEM:
1575 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1576 	case MEM_GROUP_DMAE_MEM:
1577 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1578 	case MEM_GROUP_TM_MEM:
1579 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1580 	case MEM_GROUP_SDM_MEM:
1581 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1582 	case MEM_GROUP_TDIF_CTX:
1583 	case MEM_GROUP_RDIF_CTX:
1584 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1585 	case MEM_GROUP_CM_MEM:
1586 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1587 	case MEM_GROUP_IOR:
1588 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1589 	default:
1590 		return true;
1591 	}
1592 }
1593 
1594 /* Stalls all Storms */
qed_grc_stall_storms(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool stall)1595 static void qed_grc_stall_storms(struct ecore_hwfn *p_hwfn,
1596 				 struct ecore_ptt *p_ptt, bool stall)
1597 {
1598 	u32 reg_addr;
1599 	u8 storm_id;
1600 
1601 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1602 		if (!qed_grc_is_storm_included(p_hwfn,
1603 					       (enum dbg_storms)storm_id))
1604 			continue;
1605 
1606 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1607 		    SEM_FAST_REG_STALL_0;
1608 		ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1609 	}
1610 
1611 	OSAL_MSLEEP(STALL_DELAY_MS);
1612 }
1613 
1614 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1615  * taken out of reset.
1616  */
qed_grc_unreset_blocks(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool rbc_only)1617 static void qed_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
1618 				   struct ecore_ptt *p_ptt, bool rbc_only)
1619 {
1620 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1621 	u8 chip_id = dev_data->chip_id;
1622 	u32 i;
1623 
1624 	/* Take RBCs out of reset */
1625 	for (i = 0; i < OSAL_ARRAY_SIZE(s_rbc_reset_defs); i++)
1626 		if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1627 			ecore_wr(p_hwfn,
1628 			       p_ptt,
1629 			       s_rbc_reset_defs[i].reset_reg_addr +
1630 			       RESET_REG_UNRESET_OFFSET,
1631 			       s_rbc_reset_defs[i].reset_val[chip_id]);
1632 
1633 	if (!rbc_only) {
1634 		u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1635 		u8 reset_reg_id;
1636 		u32 block_id;
1637 
1638 		/* Fill reset regs values */
1639 		for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1640 			bool is_removed, has_reset_reg, unreset_before_dump;
1641 			const struct dbg_block_chip *block;
1642 
1643 			block = qed_get_dbg_block_per_chip(p_hwfn,
1644 							   (enum block_id)
1645 							   block_id);
1646 			is_removed =
1647 			    GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1648 			has_reset_reg =
1649 			    GET_FIELD(block->flags,
1650 				      DBG_BLOCK_CHIP_HAS_RESET_REG);
1651 			unreset_before_dump =
1652 			    GET_FIELD(block->flags,
1653 				      DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1654 
1655 			if (!is_removed && has_reset_reg && unreset_before_dump)
1656 				reg_val[block->reset_reg_id] |=
1657 				    OSAL_BIT(block->reset_reg_bit_offset);
1658 		}
1659 
1660 		/* Write reset registers */
1661 		for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1662 		     reset_reg_id++) {
1663 			const struct dbg_reset_reg *reset_reg;
1664 			u32 reset_reg_addr;
1665 
1666 			reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1667 
1668 			if (GET_FIELD
1669 			    (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1670 				continue;
1671 
1672 			if (reg_val[reset_reg_id]) {
1673 				reset_reg_addr =
1674 				    GET_FIELD(reset_reg->data,
1675 					      DBG_RESET_REG_ADDR);
1676 				ecore_wr(p_hwfn,
1677 				       p_ptt,
1678 				       DWORDS_TO_BYTES(reset_reg_addr) +
1679 				       RESET_REG_UNRESET_OFFSET,
1680 				       reg_val[reset_reg_id]);
1681 			}
1682 		}
1683 	}
1684 }
1685 
1686 /* Returns the attention block data of the specified block */
1687 static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(struct ecore_hwfn * p_hwfn,enum block_id block_id,enum dbg_attn_type attn_type)1688 qed_get_block_attn_data(struct ecore_hwfn *p_hwfn,
1689 			enum block_id block_id, enum dbg_attn_type attn_type)
1690 {
1691 	const struct dbg_attn_block *base_attn_block_arr =
1692 	    (const struct dbg_attn_block *)
1693 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1694 
1695 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
1696 }
1697 
1698 /* Returns the attention registers of the specified block */
1699 static const struct dbg_attn_reg *
qed_get_block_attn_regs(struct ecore_hwfn * p_hwfn,enum block_id block_id,enum dbg_attn_type attn_type,u8 * num_attn_regs)1700 qed_get_block_attn_regs(struct ecore_hwfn *p_hwfn,
1701 			enum block_id block_id, enum dbg_attn_type attn_type,
1702 			u8 *num_attn_regs)
1703 {
1704 	const struct dbg_attn_block_type_data *block_type_data =
1705 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1706 
1707 	*num_attn_regs = block_type_data->num_regs;
1708 
1709 	return (const struct dbg_attn_reg *)
1710 		p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1711 		block_type_data->regs_offset;
1712 }
1713 
1714 /* For each block, clear the status of all parities */
qed_grc_clear_all_prty(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1715 static void qed_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
1716 				   struct ecore_ptt *p_ptt)
1717 {
1718 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1719 	const struct dbg_attn_reg *attn_reg_arr;
1720 	u8 reg_idx, num_attn_regs;
1721 	u32 block_id;
1722 
1723 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1724 		if (dev_data->block_in_reset[block_id])
1725 			continue;
1726 
1727 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1728 						       (enum block_id)block_id,
1729 						       ATTN_TYPE_PARITY,
1730 						       &num_attn_regs);
1731 
1732 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1733 			const struct dbg_attn_reg *reg_data =
1734 				&attn_reg_arr[reg_idx];
1735 			u16 modes_buf_offset;
1736 			bool eval_mode;
1737 
1738 			/* Check mode */
1739 			eval_mode = GET_FIELD(reg_data->mode.data,
1740 					      DBG_MODE_HDR_EVAL_MODE) > 0;
1741 			modes_buf_offset =
1742 				GET_FIELD(reg_data->mode.data,
1743 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
1744 
1745 			/* If Mode match: clear parity status */
1746 			if (!eval_mode ||
1747 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1748 				ecore_rd(p_hwfn, p_ptt,
1749 				    DWORDS_TO_BYTES(reg_data->sts_clr_address));
1750 		}
1751 	}
1752 }
1753 
1754 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1755  * the following parameters are dumped:
1756  * - count: no. of dumped entries
1757  * - split_type: split type
1758  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1759  * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1760  */
qed_grc_dump_regs_hdr(u32 * dump_buf,bool dump,u32 num_reg_entries,enum init_split_types split_type,u8 split_id,const char * reg_type_name)1761 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1762 				 bool dump,
1763 				 u32 num_reg_entries,
1764 				 enum init_split_types split_type,
1765 				 u8 split_id, const char *reg_type_name)
1766 {
1767 	u8 num_params = 2 +
1768 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1769 	u32 offset = 0;
1770 
1771 	offset += qed_dump_section_hdr(dump_buf + offset,
1772 				       dump, "grc_regs", num_params);
1773 	offset += qed_dump_num_param(dump_buf + offset,
1774 				     dump, "count", num_reg_entries);
1775 	offset += qed_dump_str_param(dump_buf + offset,
1776 				     dump, "split",
1777 				     s_split_type_defs[split_type].name);
1778 	if (split_type != SPLIT_TYPE_NONE)
1779 		offset += qed_dump_num_param(dump_buf + offset,
1780 					     dump, "id", split_id);
1781 	if (reg_type_name)
1782 		offset += qed_dump_str_param(dump_buf + offset,
1783 					     dump, "type", reg_type_name);
1784 
1785 	return offset;
1786 }
1787 
1788 /* Reads the specified registers into the specified buffer.
1789  * The addr and len arguments are specified in dwords.
1790  */
qed_read_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf,u32 addr,u32 len)1791 void qed_read_regs(struct ecore_hwfn *p_hwfn,
1792 		   struct ecore_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1793 {
1794 	u32 i;
1795 
1796 	for (i = 0; i < len; i++)
1797 		buf[i] = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1798 }
1799 
1800 /* Dumps the GRC registers in the specified address range.
1801  * Returns the dumped size in dwords.
1802  * The addr and len arguments are specified in dwords.
1803  */
qed_grc_dump_addr_range(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)1804 static u32 qed_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
1805 				   struct ecore_ptt *p_ptt,
1806 				   u32 *dump_buf,
1807 				   bool dump, u32 addr, u32 len, bool wide_bus,
1808 				   enum init_split_types split_type,
1809 				   u8 split_id)
1810 {
1811 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1812 	u8 port_id = 0, pf_id = 0;
1813 	u16 vf_id = 0, fid = 0;
1814 	bool read_using_dmae = false;
1815 	u32 thresh;
1816 
1817 	if (!dump)
1818 		return len;
1819 
1820 	switch (split_type) {
1821 	case SPLIT_TYPE_PORT:
1822 		port_id = split_id;
1823 		break;
1824 	case SPLIT_TYPE_PF:
1825 		pf_id = split_id;
1826 		break;
1827 	case SPLIT_TYPE_PORT_PF:
1828 		port_id = split_id / dev_data->num_pfs_per_port;
1829 		pf_id = port_id + dev_data->num_ports *
1830 		    (split_id % dev_data->num_pfs_per_port);
1831 		break;
1832 	case SPLIT_TYPE_VF:
1833 		vf_id = split_id;
1834 		break;
1835 	default:
1836 		break;
1837 	}
1838 
1839 	/* Try reading using DMAE */
1840 	if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
1841 	    (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
1842 	     (PROTECT_WIDE_BUS && wide_bus))) {
1843 		struct dmae_params dmae_params;
1844 
1845 		/* Set DMAE params */
1846 		memset(&dmae_params, 0, sizeof(dmae_params));
1847 		SET_FIELD(dmae_params.flags, DMAE_PARAMS_COMPLETION_DST, 1);
1848 		switch (split_type) {
1849 		case SPLIT_TYPE_PORT:
1850 			SET_FIELD(dmae_params.flags, DMAE_PARAMS_PORT_VALID,
1851 				  1);
1852 			dmae_params.port_id = port_id;
1853 			break;
1854 		case SPLIT_TYPE_PF:
1855 			SET_FIELD(dmae_params.flags,
1856 				  DMAE_PARAMS_SRC_PF_VALID, 1);
1857 			dmae_params.src_pf_id = pf_id;
1858 			break;
1859 		case SPLIT_TYPE_PORT_PF:
1860 			SET_FIELD(dmae_params.flags, DMAE_PARAMS_PORT_VALID,
1861 				  1);
1862 			SET_FIELD(dmae_params.flags,
1863 				  DMAE_PARAMS_SRC_PF_VALID, 1);
1864 			dmae_params.port_id = port_id;
1865 			dmae_params.src_pf_id = pf_id;
1866 			break;
1867 		default:
1868 			break;
1869 		}
1870 
1871 		/* Execute DMAE command */
1872 		read_using_dmae = !ecore_dmae_grc2host(p_hwfn,
1873 						     p_ptt,
1874 						     DWORDS_TO_BYTES(addr),
1875 						     (u64)(uintptr_t)(dump_buf),
1876 						     len, &dmae_params);
1877 		if (!read_using_dmae) {
1878 			dev_data->use_dmae = 0;
1879 			DP_VERBOSE(p_hwfn->p_dev,
1880 				   ECORE_MSG_DEBUG,
1881 				   "Failed reading from chip using DMAE, using GRC instead\n");
1882 		}
1883 	}
1884 
1885 	if (read_using_dmae)
1886 		goto print_log;
1887 
1888 	/* If not read using DMAE, read using GRC */
1889 
1890 	/* Set pretend */
1891 	if (split_type != dev_data->pretend.split_type ||
1892 	    split_id != dev_data->pretend.split_id) {
1893 		switch (split_type) {
1894 		case SPLIT_TYPE_PORT:
1895 			ecore_port_pretend(p_hwfn, p_ptt, port_id);
1896 			break;
1897 		case SPLIT_TYPE_PF:
1898 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1899 					  pf_id);
1900 			ecore_fid_pretend(p_hwfn, p_ptt, fid);
1901 			break;
1902 		case SPLIT_TYPE_PORT_PF:
1903 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1904 					  pf_id);
1905 			ecore_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
1906 			break;
1907 		case SPLIT_TYPE_VF:
1908 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
1909 			      | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
1910 					  vf_id);
1911 			ecore_fid_pretend(p_hwfn, p_ptt, fid);
1912 			break;
1913 		default:
1914 			break;
1915 		}
1916 
1917 		dev_data->pretend.split_type = (u8)split_type;
1918 		dev_data->pretend.split_id = split_id;
1919 	}
1920 
1921 	/* Read registers using GRC */
1922 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
1923 
1924 print_log:
1925 	/* Print log */
1926 	dev_data->num_regs_read += len;
1927 	thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
1928 	if ((dev_data->num_regs_read / thresh) >
1929 	    ((dev_data->num_regs_read - len) / thresh))
1930 		DP_VERBOSE(p_hwfn->p_dev,
1931 			   ECORE_MSG_DEBUG,
1932 			   "Dumped %d registers...\n", dev_data->num_regs_read);
1933 
1934 	return len;
1935 }
1936 
1937 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
1938  * The addr and len arguments are specified in dwords.
1939  */
qed_grc_dump_reg_entry_hdr(u32 * dump_buf,bool dump,u32 addr,u32 len)1940 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
1941 				      bool dump, u32 addr, u32 len)
1942 {
1943 	if (dump)
1944 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
1945 
1946 	return 1;
1947 }
1948 
1949 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
1950  * The addr and len arguments are specified in dwords.
1951  */
qed_grc_dump_reg_entry(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)1952 static u32 qed_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
1953 				  struct ecore_ptt *p_ptt,
1954 				  u32 *dump_buf,
1955 				  bool dump, u32 addr, u32 len, bool wide_bus,
1956 				  enum init_split_types split_type, u8 split_id)
1957 {
1958 	u32 offset = 0;
1959 
1960 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
1961 	offset += qed_grc_dump_addr_range(p_hwfn,
1962 					  p_ptt,
1963 					  dump_buf + offset,
1964 					  dump, addr, len, wide_bus,
1965 					  split_type, split_id);
1966 
1967 	return offset;
1968 }
1969 
1970 /* Dumps GRC registers sequence with skip cycle.
1971  * Returns the dumped size in dwords.
1972  * - addr:	start GRC address in dwords
1973  * - total_len:	total no. of dwords to dump
1974  * - read_len:	no. consecutive dwords to read
1975  * - skip_len:	no. of dwords to skip (and fill with zeros)
1976  */
qed_grc_dump_reg_entry_skip(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 total_len,u32 read_len,u32 skip_len)1977 static u32 qed_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
1978 				       struct ecore_ptt *p_ptt,
1979 				       u32 *dump_buf,
1980 				       bool dump,
1981 				       u32 addr,
1982 				       u32 total_len,
1983 				       u32 read_len, u32 skip_len)
1984 {
1985 	u32 offset = 0, reg_offset = 0;
1986 
1987 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
1988 
1989 	if (!dump)
1990 		return offset + total_len;
1991 
1992 	while (reg_offset < total_len) {
1993 		u32 curr_len = OSAL_MIN_T(u32, read_len,
1994 					  total_len - reg_offset);
1995 
1996 		offset += qed_grc_dump_addr_range(p_hwfn,
1997 						  p_ptt,
1998 						  dump_buf + offset,
1999 						  dump,  addr, curr_len, false,
2000 						  SPLIT_TYPE_NONE, 0);
2001 		reg_offset += curr_len;
2002 		addr += curr_len;
2003 
2004 		if (reg_offset < total_len) {
2005 			curr_len = OSAL_MIN_T(u32, skip_len,
2006 					      total_len - skip_len);
2007 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2008 			offset += curr_len;
2009 			reg_offset += curr_len;
2010 			addr += curr_len;
2011 		}
2012 	}
2013 
2014 	return offset;
2015 }
2016 
2017 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_regs_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct virt_mem_desc input_regs_arr,u32 * dump_buf,bool dump,enum init_split_types split_type,u8 split_id,bool block_enable[MAX_BLOCK_ID],u32 * num_dumped_reg_entries)2018 static u32 qed_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
2019 				     struct ecore_ptt *p_ptt,
2020 				     struct virt_mem_desc input_regs_arr,
2021 				     u32 *dump_buf,
2022 				     bool dump,
2023 				     enum init_split_types split_type,
2024 				     u8 split_id,
2025 				     bool block_enable[MAX_BLOCK_ID],
2026 				     u32 *num_dumped_reg_entries)
2027 {
2028 	u32 i, offset = 0, input_offset = 0;
2029 	bool mode_match = true;
2030 
2031 	*num_dumped_reg_entries = 0;
2032 
2033 	while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2034 		const struct dbg_dump_cond_hdr *cond_hdr =
2035 		    (const struct dbg_dump_cond_hdr *)
2036 		    input_regs_arr.ptr + input_offset++;
2037 		u16 modes_buf_offset;
2038 		bool eval_mode;
2039 
2040 		/* Check mode/block */
2041 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2042 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2043 		if (eval_mode) {
2044 			modes_buf_offset =
2045 				GET_FIELD(cond_hdr->mode.data,
2046 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2047 			mode_match = qed_is_mode_match(p_hwfn,
2048 						       &modes_buf_offset);
2049 		}
2050 
2051 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2052 			input_offset += cond_hdr->data_size;
2053 			continue;
2054 		}
2055 
2056 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2057 			const struct dbg_dump_reg *reg =
2058 			    (const struct dbg_dump_reg *)
2059 			    input_regs_arr.ptr + input_offset;
2060 			u32 addr, len;
2061 			bool wide_bus;
2062 
2063 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2064 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2065 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2066 			offset += qed_grc_dump_reg_entry(p_hwfn,
2067 							 p_ptt,
2068 							 dump_buf + offset,
2069 							 dump,
2070 							 addr,
2071 							 len,
2072 							 wide_bus,
2073 							 split_type, split_id);
2074 			(*num_dumped_reg_entries)++;
2075 		}
2076 	}
2077 
2078 	return offset;
2079 }
2080 
2081 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_split_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct virt_mem_desc input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],enum init_split_types split_type,u8 split_id,const char * reg_type_name)2082 static u32 qed_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
2083 				   struct ecore_ptt *p_ptt,
2084 				   struct virt_mem_desc input_regs_arr,
2085 				   u32 *dump_buf,
2086 				   bool dump,
2087 				   bool block_enable[MAX_BLOCK_ID],
2088 				   enum init_split_types split_type,
2089 				   u8 split_id, const char *reg_type_name)
2090 {
2091 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2092 	enum init_split_types hdr_split_type = split_type;
2093 	u32 num_dumped_reg_entries, offset;
2094 	u8 hdr_split_id = split_id;
2095 
2096 	/* In PORT_PF split type, print a port split header */
2097 	if (split_type == SPLIT_TYPE_PORT_PF) {
2098 		hdr_split_type = SPLIT_TYPE_PORT;
2099 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2100 	}
2101 
2102 	/* Calculate register dump header size (and skip it for now) */
2103 	offset = qed_grc_dump_regs_hdr(dump_buf,
2104 				       false,
2105 				       0,
2106 				       hdr_split_type,
2107 				       hdr_split_id, reg_type_name);
2108 
2109 	/* Dump registers */
2110 	offset += qed_grc_dump_regs_entries(p_hwfn,
2111 					    p_ptt,
2112 					    input_regs_arr,
2113 					    dump_buf + offset,
2114 					    dump,
2115 					    split_type,
2116 					    split_id,
2117 					    block_enable,
2118 					    &num_dumped_reg_entries);
2119 
2120 	/* Write register dump header */
2121 	if (dump && num_dumped_reg_entries > 0)
2122 		qed_grc_dump_regs_hdr(dump_buf,
2123 				      dump,
2124 				      num_dumped_reg_entries,
2125 				      hdr_split_type,
2126 				      hdr_split_id, reg_type_name);
2127 
2128 	return num_dumped_reg_entries > 0 ? offset : 0;
2129 }
2130 
2131 /* Dumps registers according to the input registers array. Returns the dumped
2132  * size in dwords.
2133  */
qed_grc_dump_registers(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * reg_type_name)2134 static u32 qed_grc_dump_registers(struct ecore_hwfn *p_hwfn,
2135 				  struct ecore_ptt *p_ptt,
2136 				  u32 *dump_buf,
2137 				  bool dump,
2138 				  bool block_enable[MAX_BLOCK_ID],
2139 				  const char *reg_type_name)
2140 {
2141 	struct virt_mem_desc *dbg_buf =
2142 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2143 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2144 	u32 offset = 0, input_offset = 0;
2145 
2146 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2147 		const struct dbg_dump_split_hdr *split_hdr;
2148 		struct virt_mem_desc curr_input_regs_arr;
2149 		enum init_split_types split_type;
2150 		u16 split_count = 0;
2151 		u32 split_data_size;
2152 		u8 split_id;
2153 
2154 		split_hdr =
2155 		    (const struct dbg_dump_split_hdr *)
2156 		    dbg_buf->ptr + input_offset++;
2157 		split_type =
2158 		    GET_FIELD(split_hdr->hdr,
2159 			      DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2160 		split_data_size = GET_FIELD(split_hdr->hdr,
2161 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2162 		curr_input_regs_arr.ptr =
2163 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2164 		    input_offset;
2165 		curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2166 
2167 		switch (split_type) {
2168 		case SPLIT_TYPE_NONE:
2169 			split_count = 1;
2170 			break;
2171 		case SPLIT_TYPE_PORT:
2172 			split_count = dev_data->num_ports;
2173 			break;
2174 		case SPLIT_TYPE_PF:
2175 		case SPLIT_TYPE_PORT_PF:
2176 			split_count = dev_data->num_ports *
2177 			    dev_data->num_pfs_per_port;
2178 			break;
2179 		case SPLIT_TYPE_VF:
2180 			split_count = dev_data->num_vfs;
2181 			break;
2182 		default:
2183 			return 0;
2184 		}
2185 
2186 		for (split_id = 0; split_id < split_count; split_id++)
2187 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2188 							  curr_input_regs_arr,
2189 							  dump_buf + offset,
2190 							  dump, block_enable,
2191 							  split_type,
2192 							  split_id,
2193 							  reg_type_name);
2194 
2195 		input_offset += split_data_size;
2196 	}
2197 
2198 	/* Cancel pretends (pretend to original PF) */
2199 	if (dump) {
2200 		ecore_fid_pretend(p_hwfn, p_ptt,
2201 				FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2202 					    p_hwfn->rel_pf_id));
2203 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2204 		dev_data->pretend.split_id = 0;
2205 	}
2206 
2207 	return offset;
2208 }
2209 
2210 /* Dump reset registers. Returns the dumped size in dwords. */
qed_grc_dump_reset_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2211 static u32 qed_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
2212 				   struct ecore_ptt *p_ptt,
2213 				   u32 *dump_buf, bool dump)
2214 {
2215 	u32 offset = 0, num_regs = 0;
2216 	u8 reset_reg_id;
2217 
2218 	/* Calculate header size */
2219 	offset += qed_grc_dump_regs_hdr(dump_buf,
2220 					false,
2221 					0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2222 
2223 	/* Write reset registers */
2224 	for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2225 	     reset_reg_id++) {
2226 		const struct dbg_reset_reg *reset_reg;
2227 		u32 reset_reg_addr;
2228 
2229 		reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2230 
2231 		if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2232 			continue;
2233 
2234 		reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2235 		offset += qed_grc_dump_reg_entry(p_hwfn,
2236 						 p_ptt,
2237 						 dump_buf + offset,
2238 						 dump,
2239 						 reset_reg_addr,
2240 						 1, false, SPLIT_TYPE_NONE, 0);
2241 		num_regs++;
2242 	}
2243 
2244 	/* Write header */
2245 	if (dump)
2246 		qed_grc_dump_regs_hdr(dump_buf,
2247 				      true, num_regs, SPLIT_TYPE_NONE,
2248 				      0, "RESET_REGS");
2249 
2250 	return offset;
2251 }
2252 
2253 /* Dump registers that are modified during GRC Dump and therefore must be
2254  * dumped first. Returns the dumped size in dwords.
2255  */
qed_grc_dump_modified_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2256 static u32 qed_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
2257 				      struct ecore_ptt *p_ptt,
2258 				      u32 *dump_buf, bool dump)
2259 {
2260 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2261 	u32 block_id, offset = 0, stall_regs_offset;
2262 	const struct dbg_attn_reg *attn_reg_arr;
2263 	u8 storm_id, reg_idx, num_attn_regs;
2264 	u32 num_reg_entries = 0;
2265 
2266 	/* Write empty header for attention registers */
2267 	offset += qed_grc_dump_regs_hdr(dump_buf,
2268 					false,
2269 					0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2270 
2271 	/* Write parity registers */
2272 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2273 		if (dev_data->block_in_reset[block_id] && dump)
2274 			continue;
2275 
2276 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2277 						       (enum block_id)block_id,
2278 						       ATTN_TYPE_PARITY,
2279 						       &num_attn_regs);
2280 
2281 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2282 			const struct dbg_attn_reg *reg_data =
2283 				&attn_reg_arr[reg_idx];
2284 			u16 modes_buf_offset;
2285 			bool eval_mode;
2286 			u32 addr;
2287 
2288 			/* Check mode */
2289 			eval_mode = GET_FIELD(reg_data->mode.data,
2290 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2291 			modes_buf_offset =
2292 				GET_FIELD(reg_data->mode.data,
2293 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2294 			if (eval_mode &&
2295 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2296 				continue;
2297 
2298 			/* Mode match: read & dump registers */
2299 			addr = reg_data->mask_address;
2300 			offset += qed_grc_dump_reg_entry(p_hwfn,
2301 							 p_ptt,
2302 							 dump_buf + offset,
2303 							 dump,
2304 							 addr,
2305 							 1, false,
2306 							 SPLIT_TYPE_NONE, 0);
2307 			addr = GET_FIELD(reg_data->data,
2308 					 DBG_ATTN_REG_STS_ADDRESS);
2309 			offset += qed_grc_dump_reg_entry(p_hwfn,
2310 							 p_ptt,
2311 							 dump_buf + offset,
2312 							 dump,
2313 							 addr,
2314 							 1, false,
2315 							 SPLIT_TYPE_NONE, 0);
2316 			num_reg_entries += 2;
2317 		}
2318 	}
2319 
2320 	/* Overwrite header for attention registers */
2321 	if (dump)
2322 		qed_grc_dump_regs_hdr(dump_buf,
2323 				      true,
2324 				      num_reg_entries,
2325 				      SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2326 
2327 	/* Write empty header for stall registers */
2328 	stall_regs_offset = offset;
2329 	offset += qed_grc_dump_regs_hdr(dump_buf,
2330 					false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2331 
2332 	/* Write Storm stall status registers */
2333 	for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2334 	     storm_id++) {
2335 		struct storm_defs *storm = &s_storm_defs[storm_id];
2336 		u32 addr;
2337 
2338 		if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2339 			continue;
2340 
2341 		addr =
2342 		    BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2343 				    SEM_FAST_REG_STALLED);
2344 		offset += qed_grc_dump_reg_entry(p_hwfn,
2345 						 p_ptt,
2346 						 dump_buf + offset,
2347 						 dump,
2348 						 addr,
2349 						 1,
2350 						 false, SPLIT_TYPE_NONE, 0);
2351 		num_reg_entries++;
2352 	}
2353 
2354 	/* Overwrite header for stall registers */
2355 	if (dump)
2356 		qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2357 				      true,
2358 				      num_reg_entries,
2359 				      SPLIT_TYPE_NONE, 0, "REGS");
2360 
2361 	return offset;
2362 }
2363 
2364 /* Dumps registers that can't be represented in the debug arrays */
qed_grc_dump_special_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2365 static u32 qed_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
2366 				     struct ecore_ptt *p_ptt,
2367 				     u32 *dump_buf, bool dump)
2368 {
2369 	u32 offset = 0, addr;
2370 
2371 	offset += qed_grc_dump_regs_hdr(dump_buf,
2372 					dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2373 
2374 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2375 	 * skipped).
2376 	 */
2377 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2378 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2379 					      p_ptt,
2380 					      dump_buf + offset,
2381 					      dump,
2382 					      addr,
2383 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2384 					      7,
2385 					      1);
2386 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2387 	offset +=
2388 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2389 					p_ptt,
2390 					dump_buf + offset,
2391 					dump,
2392 					addr,
2393 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2394 					7,
2395 					1);
2396 
2397 	return offset;
2398 }
2399 
2400 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2401  * dwords. The following parameters are dumped:
2402  * - name:	   dumped only if it's not NULL.
2403  * - addr:	   in dwords, dumped only if name is NULL.
2404  * - len:	   in dwords, always dumped.
2405  * - width:	   dumped if it's not zero.
2406  * - packed:	   dumped only if it's not false.
2407  * - mem_group:	   always dumped.
2408  * - is_storm:	   true only if the memory is related to a Storm.
2409  * - storm_letter: valid only if is_storm is true.
2410  *
2411  */
qed_grc_dump_mem_hdr(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,u32 bit_width,bool packed,const char * mem_group,char storm_letter)2412 static u32 qed_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
2413 				u32 *dump_buf,
2414 				bool dump,
2415 				const char *name,
2416 				u32 addr,
2417 				u32 len,
2418 				u32 bit_width,
2419 				bool packed,
2420 				const char *mem_group, char storm_letter)
2421 {
2422 	u8 num_params = 3;
2423 	u32 offset = 0;
2424 	char buf[64];
2425 
2426 	if (!len)
2427 		DP_NOTICE(p_hwfn, false,
2428 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2429 
2430 	if (bit_width)
2431 		num_params++;
2432 	if (packed)
2433 		num_params++;
2434 
2435 	/* Dump section header */
2436 	offset += qed_dump_section_hdr(dump_buf + offset,
2437 				       dump, "grc_mem", num_params);
2438 
2439 	if (name) {
2440 		/* Dump name */
2441 		if (storm_letter) {
2442 			strcpy(buf, "?STORM_");
2443 			buf[0] = storm_letter;
2444 			strcpy(buf + strlen(buf), name);
2445 		} else {
2446 			strcpy(buf, name);
2447 		}
2448 
2449 		offset += qed_dump_str_param(dump_buf + offset,
2450 					     dump, "name", buf);
2451 	} else {
2452 		/* Dump address */
2453 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2454 
2455 		offset += qed_dump_num_param(dump_buf + offset,
2456 					     dump, "addr", addr_in_bytes);
2457 	}
2458 
2459 	/* Dump len */
2460 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2461 
2462 	/* Dump bit width */
2463 	if (bit_width)
2464 		offset += qed_dump_num_param(dump_buf + offset,
2465 					     dump, "width", bit_width);
2466 
2467 	/* Dump packed */
2468 	if (packed)
2469 		offset += qed_dump_num_param(dump_buf + offset,
2470 					     dump, "packed", 1);
2471 
2472 	/* Dump reg type */
2473 	if (storm_letter) {
2474 		strcpy(buf, "?STORM_");
2475 		buf[0] = storm_letter;
2476 		strcpy(buf + strlen(buf), mem_group);
2477 	} else {
2478 		strcpy(buf, mem_group);
2479 	}
2480 
2481 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2482 
2483 	return offset;
2484 }
2485 
2486 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2487  * Returns the dumped size in dwords.
2488  * The addr and len arguments are specified in dwords.
2489  */
qed_grc_dump_mem(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,bool wide_bus,u32 bit_width,bool packed,const char * mem_group,char storm_letter)2490 static u32 qed_grc_dump_mem(struct ecore_hwfn *p_hwfn,
2491 			    struct ecore_ptt *p_ptt,
2492 			    u32 *dump_buf,
2493 			    bool dump,
2494 			    const char *name,
2495 			    u32 addr,
2496 			    u32 len,
2497 			    bool wide_bus,
2498 			    u32 bit_width,
2499 			    bool packed,
2500 			    const char *mem_group, char storm_letter)
2501 {
2502 	u32 offset = 0;
2503 
2504 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2505 				       dump_buf + offset,
2506 				       dump,
2507 				       name,
2508 				       addr,
2509 				       len,
2510 				       bit_width,
2511 				       packed, mem_group, storm_letter);
2512 	offset += qed_grc_dump_addr_range(p_hwfn,
2513 					  p_ptt,
2514 					  dump_buf + offset,
2515 					  dump, addr, len, wide_bus,
2516 					  SPLIT_TYPE_NONE, 0);
2517 
2518 	return offset;
2519 }
2520 
2521 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
qed_grc_dump_mem_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct virt_mem_desc input_mems_arr,u32 * dump_buf,bool dump)2522 static u32 qed_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
2523 				    struct ecore_ptt *p_ptt,
2524 				    struct virt_mem_desc input_mems_arr,
2525 				    u32 *dump_buf, bool dump)
2526 {
2527 	u32 i, offset = 0, input_offset = 0;
2528 	bool mode_match = true;
2529 
2530 	while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2531 		const struct dbg_dump_cond_hdr *cond_hdr;
2532 		u16 modes_buf_offset;
2533 		u32 num_entries;
2534 		bool eval_mode;
2535 
2536 		cond_hdr =
2537 		    (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2538 		    input_offset++;
2539 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2540 
2541 		/* Check required mode */
2542 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2543 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2544 		if (eval_mode) {
2545 			modes_buf_offset =
2546 				GET_FIELD(cond_hdr->mode.data,
2547 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2548 			mode_match = qed_is_mode_match(p_hwfn,
2549 						       &modes_buf_offset);
2550 		}
2551 
2552 		if (!mode_match) {
2553 			input_offset += cond_hdr->data_size;
2554 			continue;
2555 		}
2556 
2557 		for (i = 0; i < num_entries;
2558 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2559 			const struct dbg_dump_mem *mem =
2560 			    (const struct dbg_dump_mem *)((u32 *)
2561 							  input_mems_arr.ptr
2562 							  + input_offset);
2563 			const struct dbg_block *block;
2564 			char storm_letter = 0;
2565 			u32 mem_addr, mem_len;
2566 			bool mem_wide_bus;
2567 			u8 mem_group_id;
2568 
2569 			mem_group_id = GET_FIELD(mem->dword0,
2570 						 DBG_DUMP_MEM_MEM_GROUP_ID);
2571 			if (mem_group_id >= MEM_GROUPS_NUM) {
2572 				DP_NOTICE(p_hwfn, false, "Invalid mem_group_id\n");
2573 				return 0;
2574 			}
2575 
2576 			if (!qed_grc_is_mem_included(p_hwfn,
2577 						     (enum block_id)
2578 						     cond_hdr->block_id,
2579 						     mem_group_id))
2580 				continue;
2581 
2582 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2583 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2584 			mem_wide_bus = GET_FIELD(mem->dword1,
2585 						 DBG_DUMP_MEM_WIDE_BUS);
2586 
2587 			block = get_dbg_block(p_hwfn,
2588 					      cond_hdr->block_id);
2589 
2590 			/* If memory is associated with Storm,
2591 			 * update storm details
2592 			 */
2593 			if (block->associated_storm_letter)
2594 				storm_letter = block->associated_storm_letter;
2595 
2596 			/* Dump memory */
2597 			offset += qed_grc_dump_mem(p_hwfn,
2598 						p_ptt,
2599 						dump_buf + offset,
2600 						dump,
2601 						NULL,
2602 						mem_addr,
2603 						mem_len,
2604 						mem_wide_bus,
2605 						0,
2606 						false,
2607 						s_mem_group_names[mem_group_id],
2608 						storm_letter);
2609 		}
2610 	}
2611 
2612 	return offset;
2613 }
2614 
2615 /* Dumps GRC memories according to the input array dump_mem.
2616  * Returns the dumped size in dwords.
2617  */
qed_grc_dump_memories(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2618 static u32 qed_grc_dump_memories(struct ecore_hwfn *p_hwfn,
2619 				 struct ecore_ptt *p_ptt,
2620 				 u32 *dump_buf, bool dump)
2621 {
2622 	struct virt_mem_desc *dbg_buf =
2623 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2624 	u32 offset = 0, input_offset = 0;
2625 
2626 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2627 		const struct dbg_dump_split_hdr *split_hdr;
2628 		struct virt_mem_desc curr_input_mems_arr;
2629 		enum init_split_types split_type;
2630 		u32 split_data_size;
2631 
2632 		split_hdr =
2633 		    (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2634 		    input_offset++;
2635 		split_type = GET_FIELD(split_hdr->hdr,
2636 				       DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2637 		split_data_size = GET_FIELD(split_hdr->hdr,
2638 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2639 		curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2640 		curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2641 
2642 		if (split_type == SPLIT_TYPE_NONE)
2643 			offset += qed_grc_dump_mem_entries(p_hwfn,
2644 							   p_ptt,
2645 							   curr_input_mems_arr,
2646 							   dump_buf + offset,
2647 							   dump);
2648 		else
2649 			DP_NOTICE(p_hwfn, false,
2650 				  "Dumping split memories is currently not supported\n");
2651 
2652 		input_offset += split_data_size;
2653 	}
2654 
2655 	return offset;
2656 }
2657 
2658 /* Dumps GRC context data for the specified Storm.
2659  * Returns the dumped size in dwords.
2660  * The lid_size argument is specified in quad-regs.
2661  */
qed_grc_dump_ctx_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 num_lids,enum cm_ctx_types ctx_type,u8 storm_id)2662 static u32 qed_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
2663 				 struct ecore_ptt *p_ptt,
2664 				 u32 *dump_buf,
2665 				 bool dump,
2666 				 const char *name,
2667 				 u32 num_lids,
2668 				 enum cm_ctx_types ctx_type, u8 storm_id)
2669 {
2670 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2671 	struct storm_defs *storm = &s_storm_defs[storm_id];
2672 	u32 i, lid, lid_size, total_size;
2673 	u32 rd_reg_addr, offset = 0;
2674 
2675 	/* Convert quad-regs to dwords */
2676 	lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2677 
2678 	if (!lid_size)
2679 		return 0;
2680 
2681 	total_size = num_lids * lid_size;
2682 
2683 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2684 				       dump_buf + offset,
2685 				       dump,
2686 				       name,
2687 				       0,
2688 				       total_size,
2689 				       lid_size * 32,
2690 				       false, name, storm->letter);
2691 
2692 	if (!dump)
2693 		return offset + total_size;
2694 
2695 	rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2696 
2697 	/* Dump context data */
2698 	for (lid = 0; lid < num_lids; lid++) {
2699 		for (i = 0; i < lid_size; i++) {
2700 			ecore_wr(p_hwfn,
2701 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2702 			offset += qed_grc_dump_addr_range(p_hwfn,
2703 							  p_ptt,
2704 							  dump_buf + offset,
2705 							  dump,
2706 							  rd_reg_addr,
2707 							  1,
2708 							  false,
2709 							  SPLIT_TYPE_NONE, 0);
2710 		}
2711 	}
2712 
2713 	return offset;
2714 }
2715 
2716 /* Dumps GRC contexts. Returns the dumped size in dwords. */
qed_grc_dump_ctx(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2717 static u32 qed_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
2718 			    struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
2719 {
2720 	u32 offset = 0;
2721 	u8 storm_id;
2722 
2723 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2724 		if (!qed_grc_is_storm_included(p_hwfn,
2725 					       (enum dbg_storms)storm_id))
2726 			continue;
2727 
2728 		/* Dump Conn AG context size */
2729 		offset += qed_grc_dump_ctx_data(p_hwfn,
2730 						p_ptt,
2731 						dump_buf + offset,
2732 						dump,
2733 						"CONN_AG_CTX",
2734 						NUM_OF_LCIDS,
2735 						CM_CTX_CONN_AG, storm_id);
2736 
2737 		/* Dump Conn ST context size */
2738 		offset += qed_grc_dump_ctx_data(p_hwfn,
2739 						p_ptt,
2740 						dump_buf + offset,
2741 						dump,
2742 						"CONN_ST_CTX",
2743 						NUM_OF_LCIDS,
2744 						CM_CTX_CONN_ST, storm_id);
2745 
2746 		/* Dump Task AG context size */
2747 		offset += qed_grc_dump_ctx_data(p_hwfn,
2748 						p_ptt,
2749 						dump_buf + offset,
2750 						dump,
2751 						"TASK_AG_CTX",
2752 						NUM_OF_LTIDS,
2753 						CM_CTX_TASK_AG, storm_id);
2754 
2755 		/* Dump Task ST context size */
2756 		offset += qed_grc_dump_ctx_data(p_hwfn,
2757 						p_ptt,
2758 						dump_buf + offset,
2759 						dump,
2760 						"TASK_ST_CTX",
2761 						NUM_OF_LTIDS,
2762 						CM_CTX_TASK_ST, storm_id);
2763 	}
2764 
2765 	return offset;
2766 }
2767 
2768 #define VFC_STATUS_RESP_READY_BIT	0
2769 #define VFC_STATUS_BUSY_BIT		1
2770 #define VFC_STATUS_SENDING_CMD_BIT	2
2771 
2772 #define VFC_POLLING_DELAY_MS	1
2773 #define VFC_POLLING_COUNT		20
2774 
2775 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2776  * Sizes are specified in dwords.
2777  */
qed_grc_dump_read_from_vfc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct storm_defs * storm,u32 * cmd_data,u32 cmd_size,u32 * addr_data,u32 addr_size,u32 resp_size,u32 * dump_buf)2778 static u32 qed_grc_dump_read_from_vfc(struct ecore_hwfn *p_hwfn,
2779 				      struct ecore_ptt *p_ptt,
2780 				      struct storm_defs *storm,
2781 				      u32 *cmd_data,
2782 				      u32 cmd_size,
2783 				      u32 *addr_data,
2784 				      u32 addr_size,
2785 				      u32 resp_size, u32 *dump_buf)
2786 {
2787 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2788 	u32 vfc_status, polling_ms, polling_count = 0, i;
2789 	u32 reg_addr, sem_base;
2790 	bool is_ready = false;
2791 
2792 	sem_base = storm->sem_fast_mem_addr;
2793 	polling_ms = VFC_POLLING_DELAY_MS *
2794 	    s_hw_type_defs[dev_data->hw_type].delay_factor;
2795 
2796 	/* Write VFC command */
2797 	ARR_REG_WR(p_hwfn,
2798 		   p_ptt,
2799 		   sem_base + SEM_FAST_REG_VFC_DATA_WR,
2800 		   cmd_data, cmd_size);
2801 
2802 	/* Write VFC address */
2803 	ARR_REG_WR(p_hwfn,
2804 		   p_ptt,
2805 		   sem_base + SEM_FAST_REG_VFC_ADDR,
2806 		   addr_data, addr_size);
2807 
2808 	/* Read response */
2809 	for (i = 0; i < resp_size; i++) {
2810 		/* Poll until ready */
2811 		do {
2812 			reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2813 			qed_grc_dump_addr_range(p_hwfn,
2814 						p_ptt,
2815 						&vfc_status,
2816 						true,
2817 						BYTES_TO_DWORDS(reg_addr),
2818 						1,
2819 						false, SPLIT_TYPE_NONE, 0);
2820 			is_ready = vfc_status &
2821 				   OSAL_BIT(VFC_STATUS_RESP_READY_BIT);
2822 
2823 			if (!is_ready) {
2824 				if (polling_count++ == VFC_POLLING_COUNT)
2825 					return 0;
2826 
2827 				OSAL_MSLEEP(polling_ms);
2828 			}
2829 		} while (!is_ready);
2830 
2831 		reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2832 		qed_grc_dump_addr_range(p_hwfn,
2833 					p_ptt,
2834 					dump_buf + i,
2835 					true,
2836 					BYTES_TO_DWORDS(reg_addr),
2837 					1, false, SPLIT_TYPE_NONE, 0);
2838 	}
2839 
2840 	return resp_size;
2841 }
2842 
2843 /* Dump VFC CAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_cam(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id)2844 static u32 qed_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
2845 				struct ecore_ptt *p_ptt,
2846 				u32 *dump_buf, bool dump, u8 storm_id)
2847 {
2848 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2849 	struct storm_defs *storm = &s_storm_defs[storm_id];
2850 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2851 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2852 	u32 row, offset = 0;
2853 
2854 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2855 				       dump_buf + offset,
2856 				       dump,
2857 				       "vfc_cam",
2858 				       0,
2859 				       total_size,
2860 				       256,
2861 				       false, "vfc_cam", storm->letter);
2862 
2863 	if (!dump)
2864 		return offset + total_size;
2865 
2866 	/* Prepare CAM address */
2867 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2868 
2869 	/* Read VFC CAM data */
2870 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
2871 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2872 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
2873 						     p_ptt,
2874 						     storm,
2875 						     cam_cmd,
2876 						     VFC_CAM_CMD_DWORDS,
2877 						     cam_addr,
2878 						     VFC_CAM_ADDR_DWORDS,
2879 						     VFC_CAM_RESP_DWORDS,
2880 						     dump_buf + offset);
2881 	}
2882 
2883 	return offset;
2884 }
2885 
2886 /* Dump VFC RAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id,struct vfc_ram_defs * ram_defs)2887 static u32 qed_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
2888 				struct ecore_ptt *p_ptt,
2889 				u32 *dump_buf,
2890 				bool dump,
2891 				u8 storm_id, struct vfc_ram_defs *ram_defs)
2892 {
2893 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2894 	struct storm_defs *storm = &s_storm_defs[storm_id];
2895 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2896 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2897 	u32 row, offset = 0;
2898 
2899 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2900 				       dump_buf + offset,
2901 				       dump,
2902 				       ram_defs->mem_name,
2903 				       0,
2904 				       total_size,
2905 				       256,
2906 				       false,
2907 				       ram_defs->type_name,
2908 				       storm->letter);
2909 
2910 	if (!dump)
2911 		return offset + total_size;
2912 
2913 	/* Prepare RAM address */
2914 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2915 
2916 	/* Read VFC RAM data */
2917 	for (row = ram_defs->base_row;
2918 	     row < ram_defs->base_row + ram_defs->num_rows; row++) {
2919 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2920 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
2921 						     p_ptt,
2922 						     storm,
2923 						     ram_cmd,
2924 						     VFC_RAM_CMD_DWORDS,
2925 						     ram_addr,
2926 						     VFC_RAM_ADDR_DWORDS,
2927 						     VFC_RAM_RESP_DWORDS,
2928 						     dump_buf + offset);
2929 	}
2930 
2931 	return offset;
2932 }
2933 
2934 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
qed_grc_dump_vfc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2935 static u32 qed_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
2936 			    struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
2937 {
2938 	u8 storm_id, i;
2939 	u32 offset = 0;
2940 
2941 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2942 		if (!qed_grc_is_storm_included(p_hwfn,
2943 					       (enum dbg_storms)storm_id) ||
2944 		    !s_storm_defs[storm_id].has_vfc)
2945 			continue;
2946 
2947 		/* Read CAM */
2948 		offset += qed_grc_dump_vfc_cam(p_hwfn,
2949 					       p_ptt,
2950 					       dump_buf + offset,
2951 					       dump, storm_id);
2952 
2953 		/* Read RAM */
2954 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2955 			offset += qed_grc_dump_vfc_ram(p_hwfn,
2956 						       p_ptt,
2957 						       dump_buf + offset,
2958 						       dump,
2959 						       storm_id,
2960 						       &s_vfc_ram_defs[i]);
2961 	}
2962 
2963 	return offset;
2964 }
2965 
2966 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
qed_grc_dump_rss(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2967 static u32 qed_grc_dump_rss(struct ecore_hwfn *p_hwfn,
2968 			    struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
2969 {
2970 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2971 	u32 offset = 0;
2972 	u8 rss_mem_id;
2973 
2974 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2975 		u32 rss_addr, num_entries, total_dwords;
2976 		struct rss_mem_defs *rss_defs;
2977 		u32 addr, num_dwords_to_read;
2978 		bool packed;
2979 
2980 		rss_defs = &s_rss_mem_defs[rss_mem_id];
2981 		rss_addr = rss_defs->addr;
2982 		num_entries = rss_defs->num_entries[dev_data->chip_id];
2983 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
2984 		packed = (rss_defs->entry_width == 16);
2985 
2986 		offset += qed_grc_dump_mem_hdr(p_hwfn,
2987 					       dump_buf + offset,
2988 					       dump,
2989 					       rss_defs->mem_name,
2990 					       0,
2991 					       total_dwords,
2992 					       rss_defs->entry_width,
2993 					       packed,
2994 					       rss_defs->type_name, 0);
2995 
2996 		/* Dump RSS data */
2997 		if (!dump) {
2998 			offset += total_dwords;
2999 			continue;
3000 		}
3001 
3002 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3003 		while (total_dwords) {
3004 			num_dwords_to_read = OSAL_MIN_T(u32,
3005 						      RSS_REG_RSS_RAM_DATA_SIZE,
3006 						      total_dwords);
3007 			ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3008 			offset += qed_grc_dump_addr_range(p_hwfn,
3009 							  p_ptt,
3010 							  dump_buf + offset,
3011 							  dump,
3012 							  addr,
3013 							  num_dwords_to_read,
3014 							  false,
3015 							  SPLIT_TYPE_NONE, 0);
3016 			total_dwords -= num_dwords_to_read;
3017 			rss_addr++;
3018 		}
3019 	}
3020 
3021 	return offset;
3022 }
3023 
3024 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
qed_grc_dump_big_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 big_ram_id)3025 static u32 qed_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3026 				struct ecore_ptt *p_ptt,
3027 				u32 *dump_buf, bool dump, u8 big_ram_id)
3028 {
3029 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3030 	u32 block_size, ram_size, offset = 0, reg_val, i;
3031 	char mem_name[12] = "???_BIG_RAM";
3032 	char type_name[8] = "???_RAM";
3033 	struct big_ram_defs *big_ram;
3034 
3035 	big_ram = &s_big_ram_defs[big_ram_id];
3036 	ram_size = big_ram->ram_size[dev_data->chip_id];
3037 
3038 	reg_val = ecore_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3039 	block_size = reg_val &
3040 		     OSAL_BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ?
3041 								     256 : 128;
3042 
3043 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3044 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3045 
3046 	/* Dump memory header */
3047 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3048 				       dump_buf + offset,
3049 				       dump,
3050 				       mem_name,
3051 				       0,
3052 				       ram_size,
3053 				       block_size * 8,
3054 				       false, type_name, 0);
3055 
3056 	/* Read and dump Big RAM data */
3057 	if (!dump)
3058 		return offset + ram_size;
3059 
3060 	/* Dump Big RAM */
3061 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3062 	     i++) {
3063 		u32 addr, len;
3064 
3065 		ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3066 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3067 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3068 		offset += qed_grc_dump_addr_range(p_hwfn,
3069 						  p_ptt,
3070 						  dump_buf + offset,
3071 						  dump,
3072 						  addr,
3073 						  len,
3074 						  false, SPLIT_TYPE_NONE, 0);
3075 	}
3076 
3077 	return offset;
3078 }
3079 
3080 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
qed_grc_dump_mcp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3081 static u32 qed_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3082 			    struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
3083 {
3084 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3085 	u32 offset = 0, addr;
3086 	bool halted = false;
3087 
3088 	/* Halt MCP */
3089 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3090 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3091 		if (!halted)
3092 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3093 	}
3094 
3095 	/* Dump MCP scratchpad */
3096 	offset += qed_grc_dump_mem(p_hwfn,
3097 				   p_ptt,
3098 				   dump_buf + offset,
3099 				   dump,
3100 				   NULL,
3101 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3102 				   MCP_REG_SCRATCH_SIZE,
3103 				   false, 0, false, "MCP", 0);
3104 
3105 	/* Dump MCP cpu_reg_file */
3106 	offset += qed_grc_dump_mem(p_hwfn,
3107 				   p_ptt,
3108 				   dump_buf + offset,
3109 				   dump,
3110 				   NULL,
3111 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3112 				   MCP_REG_CPU_REG_FILE_SIZE,
3113 				   false, 0, false, "MCP", 0);
3114 
3115 	/* Dump MCP registers */
3116 	block_enable[BLOCK_MCP] = true;
3117 	offset += qed_grc_dump_registers(p_hwfn,
3118 					 p_ptt,
3119 					 dump_buf + offset,
3120 					 dump, block_enable, "MCP");
3121 
3122 	/* Dump required non-MCP registers */
3123 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3124 					dump, 1, SPLIT_TYPE_NONE, 0,
3125 					"MCP");
3126 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3127 	offset += qed_grc_dump_reg_entry(p_hwfn,
3128 					 p_ptt,
3129 					 dump_buf + offset,
3130 					 dump,
3131 					 addr,
3132 					 1,
3133 					 false, SPLIT_TYPE_NONE, 0);
3134 
3135 	/* Release MCP */
3136 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3137 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3138 
3139 	return offset;
3140 }
3141 
3142 /* Dumps the tbus indirect memory for all PHYs.
3143  * Returns the dumped size in dwords.
3144  */
qed_grc_dump_phy(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3145 static u32 qed_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3146 			    struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
3147 {
3148 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3149 	char mem_name[32];
3150 	u8 phy_id;
3151 
3152 	for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3153 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3154 		struct phy_defs *phy_defs;
3155 		u8 *bytes_buf;
3156 
3157 		phy_defs = &s_phy_defs[phy_id];
3158 		addr_lo_addr = phy_defs->base_addr +
3159 			       phy_defs->tbus_addr_lo_addr;
3160 		addr_hi_addr = phy_defs->base_addr +
3161 			       phy_defs->tbus_addr_hi_addr;
3162 		data_lo_addr = phy_defs->base_addr +
3163 			       phy_defs->tbus_data_lo_addr;
3164 		data_hi_addr = phy_defs->base_addr +
3165 			       phy_defs->tbus_data_hi_addr;
3166 
3167 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3168 			     phy_defs->phy_name) < 0)
3169 			DP_NOTICE(p_hwfn, false,
3170 				  "Unexpected debug error: invalid PHY memory name\n");
3171 
3172 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3173 					       dump_buf + offset,
3174 					       dump,
3175 					       mem_name,
3176 					       0,
3177 					       PHY_DUMP_SIZE_DWORDS,
3178 					       16, true, mem_name, 0);
3179 
3180 		if (!dump) {
3181 			offset += PHY_DUMP_SIZE_DWORDS;
3182 			continue;
3183 		}
3184 
3185 		bytes_buf = (u8 *)(dump_buf + offset);
3186 		for (tbus_hi_offset = 0;
3187 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3188 		     tbus_hi_offset++) {
3189 			ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3190 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3191 			     tbus_lo_offset++) {
3192 				ecore_wr(p_hwfn,
3193 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3194 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn,
3195 							    p_ptt,
3196 							    data_lo_addr);
3197 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn,
3198 							    p_ptt,
3199 							    data_hi_addr);
3200 			}
3201 		}
3202 
3203 		offset += PHY_DUMP_SIZE_DWORDS;
3204 	}
3205 
3206 	return offset;
3207 }
3208 
3209 static enum dbg_status qed_find_nvram_image(struct ecore_hwfn *p_hwfn,
3210 					    struct ecore_ptt *p_ptt,
3211 					    u32 image_type,
3212 					    u32 *nvram_offset_bytes,
3213 					    u32 *nvram_size_bytes);
3214 
3215 static enum dbg_status qed_nvram_read(struct ecore_hwfn *p_hwfn,
3216 				      struct ecore_ptt *p_ptt,
3217 				      u32 nvram_offset_bytes,
3218 				      u32 nvram_size_bytes, u32 *ret_buf);
3219 
3220 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
qed_grc_dump_mcp_hw_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3221 static u32 qed_grc_dump_mcp_hw_dump(struct ecore_hwfn *p_hwfn,
3222 				    struct ecore_ptt *p_ptt,
3223 				    u32 *dump_buf, bool dump)
3224 {
3225 	u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3226 	u32 hw_dump_size_dwords = 0, offset = 0;
3227 	enum dbg_status status;
3228 
3229 	/* Read HW dump image from NVRAM */
3230 	status = qed_find_nvram_image(p_hwfn,
3231 				      p_ptt,
3232 				      NVM_TYPE_HW_DUMP_OUT,
3233 				      &hw_dump_offset_bytes,
3234 				      &hw_dump_size_bytes);
3235 	if (status != DBG_STATUS_OK)
3236 		return 0;
3237 
3238 	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3239 
3240 	/* Dump HW dump image section */
3241 	offset += qed_dump_section_hdr(dump_buf + offset,
3242 				       dump, "mcp_hw_dump", 1);
3243 	offset += qed_dump_num_param(dump_buf + offset,
3244 				     dump, "size", hw_dump_size_dwords);
3245 
3246 	/* Read MCP HW dump image into dump buffer */
3247 	if (dump && hw_dump_size_dwords) {
3248 		status = qed_nvram_read(p_hwfn,
3249 					p_ptt,
3250 					hw_dump_offset_bytes,
3251 					hw_dump_size_bytes, dump_buf + offset);
3252 		if (status != DBG_STATUS_OK) {
3253 			DP_NOTICE(p_hwfn, false,
3254 				  "Failed to read MCP HW Dump image from NVRAM\n");
3255 			return 0;
3256 		}
3257 	}
3258 	offset += hw_dump_size_dwords;
3259 
3260 	return offset;
3261 }
3262 
3263 /* Dumps Static Debug data. Returns the dumped size in dwords. */
qed_grc_dump_static_debug(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3264 static u32 qed_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3265 				     struct ecore_ptt *p_ptt,
3266 				     u32 *dump_buf, bool dump)
3267 {
3268 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3269 	u32 block_id, line_id, offset = 0, addr, len;
3270 
3271 	/* Don't dump static debug if a debug bus recording is in progress */
3272 	if (dump && ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3273 		return 0;
3274 
3275 	if (dump) {
3276 		/* Disable debug bus in all blocks */
3277 		qed_bus_disable_blocks(p_hwfn, p_ptt);
3278 
3279 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3280 		ecore_wr(p_hwfn,
3281 		       p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3282 		ecore_wr(p_hwfn,
3283 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3284 		ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3285 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3286 	}
3287 
3288 	/* Dump all static debug lines for each relevant block */
3289 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3290 		const struct dbg_block_chip *block_per_chip;
3291 		const struct dbg_block *block;
3292 		bool is_removed, has_dbg_bus;
3293 		u16 modes_buf_offset;
3294 		u32 block_dwords;
3295 
3296 		block_per_chip =
3297 		    qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3298 		is_removed = GET_FIELD(block_per_chip->flags,
3299 				       DBG_BLOCK_CHIP_IS_REMOVED);
3300 		has_dbg_bus = GET_FIELD(block_per_chip->flags,
3301 					DBG_BLOCK_CHIP_HAS_DBG_BUS);
3302 
3303 		/* read+clear for NWS parity is not working, skip NWS block */
3304 		if (block_id == BLOCK_NWS)
3305 			continue;
3306 
3307 		if (!is_removed && has_dbg_bus &&
3308 		    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3309 			      DBG_MODE_HDR_EVAL_MODE) > 0) {
3310 			modes_buf_offset =
3311 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3312 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
3313 			if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3314 				has_dbg_bus = false;
3315 		}
3316 
3317 		if (is_removed || !has_dbg_bus)
3318 			continue;
3319 
3320 		block_dwords = NUM_DBG_LINES(block_per_chip) *
3321 			       STATIC_DEBUG_LINE_DWORDS;
3322 
3323 		/* Dump static section params */
3324 		block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3325 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3326 					       dump_buf + offset,
3327 					       dump,
3328 					       (const char *)block->name,
3329 					       0,
3330 					       block_dwords,
3331 					       32, false, "STATIC", 0);
3332 
3333 		if (!dump) {
3334 			offset += block_dwords;
3335 			continue;
3336 		}
3337 
3338 		/* If all lines are invalid - dump zeros */
3339 		if (dev_data->block_in_reset[block_id]) {
3340 			memset(dump_buf + offset, 0,
3341 			       DWORDS_TO_BYTES(block_dwords));
3342 			offset += block_dwords;
3343 			continue;
3344 		}
3345 
3346 		/* Enable block's client */
3347 		qed_bus_enable_clients(p_hwfn,
3348 				       p_ptt,
3349 				       OSAL_BIT(block_per_chip->dbg_client_id));
3350 
3351 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3352 		len = STATIC_DEBUG_LINE_DWORDS;
3353 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3354 		     line_id++) {
3355 			/* Configure debug line ID */
3356 			qed_bus_config_dbg_line(p_hwfn,
3357 						p_ptt,
3358 						(enum block_id)block_id,
3359 						(u8)line_id, 0xf, 0, 0, 0);
3360 
3361 			/* Read debug line info */
3362 			offset += qed_grc_dump_addr_range(p_hwfn,
3363 							  p_ptt,
3364 							  dump_buf + offset,
3365 							  dump,
3366 							  addr,
3367 							  len,
3368 							  true, SPLIT_TYPE_NONE,
3369 							  0);
3370 		}
3371 
3372 		/* Disable block's client and debug output */
3373 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3374 		qed_bus_config_dbg_line(p_hwfn, p_ptt,
3375 					(enum block_id)block_id, 0, 0, 0, 0, 0);
3376 	}
3377 
3378 	if (dump) {
3379 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3380 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3381 	}
3382 
3383 	return offset;
3384 }
3385 
3386 /* Performs GRC Dump to the specified buffer.
3387  * Returns the dumped size in dwords.
3388  */
qed_grc_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)3389 static enum dbg_status qed_grc_dump(struct ecore_hwfn *p_hwfn,
3390 				    struct ecore_ptt *p_ptt,
3391 				    u32 *dump_buf,
3392 				    bool dump, u32 *num_dumped_dwords)
3393 {
3394 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3395 	u32 dwords_read, offset = 0;
3396 	bool parities_masked = false;
3397 	u8 i;
3398 
3399 	*num_dumped_dwords = 0;
3400 	dev_data->num_regs_read = 0;
3401 
3402 	/* Update reset state */
3403 	if (dump)
3404 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3405 
3406 	/* Dump global params */
3407 	offset += qed_dump_common_global_params(p_hwfn,
3408 						p_ptt,
3409 						dump_buf + offset, dump, 4);
3410 	offset += qed_dump_str_param(dump_buf + offset,
3411 				     dump, "dump-type", "grc-dump");
3412 	offset += qed_dump_num_param(dump_buf + offset,
3413 				     dump,
3414 				     "num-lcids",
3415 				     NUM_OF_LCIDS);
3416 	offset += qed_dump_num_param(dump_buf + offset,
3417 				     dump,
3418 				     "num-ltids",
3419 				     NUM_OF_LTIDS);
3420 	offset += qed_dump_num_param(dump_buf + offset,
3421 				     dump, "num-ports", dev_data->num_ports);
3422 
3423 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3424 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3425 		offset += qed_grc_dump_reset_regs(p_hwfn,
3426 						  p_ptt,
3427 						  dump_buf + offset, dump);
3428 
3429 	/* Take all blocks out of reset (using reset registers) */
3430 	if (dump) {
3431 		qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3432 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3433 	}
3434 
3435 	/* Disable all parities using MFW command */
3436 	if (dump &&
3437 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3438 		parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
3439 		if (!parities_masked) {
3440 			DP_NOTICE(p_hwfn, false,
3441 				  "Failed to mask parities using MFW\n");
3442 			if (qed_grc_get_param
3443 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3444 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3445 		}
3446 	}
3447 
3448 	/* Dump modified registers (dumped before modifying them) */
3449 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3450 		offset += qed_grc_dump_modified_regs(p_hwfn,
3451 						     p_ptt,
3452 						     dump_buf + offset, dump);
3453 
3454 	/* Stall storms */
3455 	if (dump &&
3456 	    (qed_grc_is_included(p_hwfn,
3457 				 DBG_GRC_PARAM_DUMP_IOR) ||
3458 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3459 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3460 
3461 	/* Dump all regs  */
3462 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3463 		bool block_enable[MAX_BLOCK_ID];
3464 
3465 		/* Dump all blocks except MCP */
3466 		for (i = 0; i < MAX_BLOCK_ID; i++)
3467 			block_enable[i] = true;
3468 		block_enable[BLOCK_MCP] = false;
3469 		offset += qed_grc_dump_registers(p_hwfn,
3470 						 p_ptt,
3471 						 dump_buf +
3472 						 offset,
3473 						 dump,
3474 						 block_enable, NULL);
3475 
3476 		/* Dump special registers */
3477 		offset += qed_grc_dump_special_regs(p_hwfn,
3478 						    p_ptt,
3479 						    dump_buf + offset, dump);
3480 	}
3481 
3482 	/* Dump memories */
3483 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3484 
3485 	/* Dump MCP */
3486 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3487 		offset += qed_grc_dump_mcp(p_hwfn,
3488 					   p_ptt, dump_buf + offset, dump);
3489 
3490 	/* Dump context */
3491 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3492 		offset += qed_grc_dump_ctx(p_hwfn,
3493 					   p_ptt, dump_buf + offset, dump);
3494 
3495 	/* Dump RSS memories */
3496 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3497 		offset += qed_grc_dump_rss(p_hwfn,
3498 					   p_ptt, dump_buf + offset, dump);
3499 
3500 	/* Dump Big RAM */
3501 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3502 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3503 			offset += qed_grc_dump_big_ram(p_hwfn,
3504 						       p_ptt,
3505 						       dump_buf + offset,
3506 						       dump, i);
3507 
3508 	/* Dump VFC */
3509 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3510 		dwords_read = qed_grc_dump_vfc(p_hwfn,
3511 					       p_ptt, dump_buf + offset, dump);
3512 		offset += dwords_read;
3513 		if (!dwords_read)
3514 			return DBG_STATUS_VFC_READ_ERROR;
3515 	}
3516 
3517 	/* Dump PHY tbus */
3518 	if (qed_grc_is_included(p_hwfn,
3519 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3520 	    CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3521 		offset += qed_grc_dump_phy(p_hwfn,
3522 					   p_ptt, dump_buf + offset, dump);
3523 
3524 	/* Dump MCP HW Dump */
3525 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3526 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP))
3527 		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3528 						   p_ptt,
3529 						   dump_buf + offset, dump);
3530 
3531 	/* Dump static debug data (only if not during debug bus recording) */
3532 	if (qed_grc_is_included(p_hwfn,
3533 				DBG_GRC_PARAM_DUMP_STATIC) &&
3534 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3535 		offset += qed_grc_dump_static_debug(p_hwfn,
3536 						    p_ptt,
3537 						    dump_buf + offset, dump);
3538 
3539 	/* Dump last section */
3540 	offset += qed_dump_last_section(dump_buf, offset, dump);
3541 
3542 	if (dump) {
3543 		/* Unstall storms */
3544 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3545 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3546 
3547 		/* Clear parity status */
3548 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3549 
3550 		/* Enable all parities using MFW command */
3551 		if (parities_masked)
3552 			ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
3553 	}
3554 
3555 	*num_dumped_dwords = offset;
3556 
3557 	return DBG_STATUS_OK;
3558 }
3559 
3560 /* Writes the specified failing Idle Check rule to the specified buffer.
3561  * Returns the dumped size in dwords.
3562  */
qed_idle_chk_dump_failure(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u16 rule_id,const struct dbg_idle_chk_rule * rule,u16 fail_entry_id,u32 * cond_reg_values)3563 static u32 qed_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
3564 				     struct ecore_ptt *p_ptt,
3565 				     u32 *
3566 				     dump_buf,
3567 				     bool dump,
3568 				     u16 rule_id,
3569 				     const struct dbg_idle_chk_rule *rule,
3570 				     u16 fail_entry_id, u32 *cond_reg_values)
3571 {
3572 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3573 	const struct dbg_idle_chk_cond_reg *cond_regs;
3574 	const struct dbg_idle_chk_info_reg *info_regs;
3575 	u32 i, next_reg_offset = 0, offset = 0;
3576 	struct dbg_idle_chk_result_hdr *hdr;
3577 	const union dbg_idle_chk_reg *regs;
3578 	u8 reg_id;
3579 
3580 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3581 	regs = (const union dbg_idle_chk_reg *)
3582 		p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3583 		rule->reg_offset;
3584 	cond_regs = &regs[0].cond_reg;
3585 	info_regs = &regs[rule->num_cond_regs].info_reg;
3586 
3587 	/* Dump rule data */
3588 	if (dump) {
3589 		memset(hdr, 0, sizeof(*hdr));
3590 		hdr->rule_id = rule_id;
3591 		hdr->mem_entry_id = fail_entry_id;
3592 		hdr->severity = rule->severity;
3593 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3594 	}
3595 
3596 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3597 
3598 	/* Dump condition register values */
3599 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3600 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3601 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3602 
3603 		reg_hdr =
3604 		    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3605 
3606 		/* Write register header */
3607 		if (!dump) {
3608 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3609 			    reg->entry_size;
3610 			continue;
3611 		}
3612 
3613 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3614 		memset(reg_hdr, 0, sizeof(*reg_hdr));
3615 		reg_hdr->start_entry = reg->start_entry;
3616 		reg_hdr->size = reg->entry_size;
3617 		SET_FIELD(reg_hdr->data,
3618 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3619 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3620 		SET_FIELD(reg_hdr->data,
3621 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3622 
3623 		/* Write register values */
3624 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3625 			dump_buf[offset] = cond_reg_values[next_reg_offset];
3626 	}
3627 
3628 	/* Dump info register values */
3629 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3630 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3631 		u32 block_id;
3632 
3633 		/* Check if register's block is in reset */
3634 		if (!dump) {
3635 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3636 			continue;
3637 		}
3638 
3639 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3640 		if (block_id >= MAX_BLOCK_ID) {
3641 			DP_NOTICE(p_hwfn, false, "Invalid block_id\n");
3642 			return 0;
3643 		}
3644 
3645 		if (!dev_data->block_in_reset[block_id]) {
3646 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3647 			bool wide_bus, eval_mode, mode_match = true;
3648 			u16 modes_buf_offset;
3649 			u32 addr;
3650 
3651 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3652 				  (dump_buf + offset);
3653 
3654 			/* Check mode */
3655 			eval_mode = GET_FIELD(reg->mode.data,
3656 					      DBG_MODE_HDR_EVAL_MODE) > 0;
3657 			if (eval_mode) {
3658 				modes_buf_offset =
3659 				    GET_FIELD(reg->mode.data,
3660 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
3661 				mode_match =
3662 					qed_is_mode_match(p_hwfn,
3663 							  &modes_buf_offset);
3664 			}
3665 
3666 			if (!mode_match)
3667 				continue;
3668 
3669 			addr = GET_FIELD(reg->data,
3670 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3671 			wide_bus = GET_FIELD(reg->data,
3672 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3673 
3674 			/* Write register header */
3675 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3676 			hdr->num_dumped_info_regs++;
3677 			memset(reg_hdr, 0, sizeof(*reg_hdr));
3678 			reg_hdr->size = reg->size;
3679 			SET_FIELD(reg_hdr->data,
3680 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3681 				  rule->num_cond_regs + reg_id);
3682 
3683 			/* Write register values */
3684 			offset += qed_grc_dump_addr_range(p_hwfn,
3685 							  p_ptt,
3686 							  dump_buf + offset,
3687 							  dump,
3688 							  addr,
3689 							  reg->size, wide_bus,
3690 							  SPLIT_TYPE_NONE, 0);
3691 		}
3692 	}
3693 
3694 	return offset;
3695 }
3696 
3697 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3698 static u32
qed_idle_chk_dump_rule_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const struct dbg_idle_chk_rule * input_rules,u32 num_input_rules,u32 * num_failing_rules)3699 qed_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
3700 			       struct ecore_ptt *p_ptt,
3701 			       u32 *dump_buf, bool dump,
3702 			       const struct dbg_idle_chk_rule *input_rules,
3703 			       u32 num_input_rules, u32 *num_failing_rules)
3704 {
3705 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3706 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3707 	u32 i, offset = 0;
3708 	u16 entry_id;
3709 	u8 reg_id;
3710 
3711 	*num_failing_rules = 0;
3712 
3713 	for (i = 0; i < num_input_rules; i++) {
3714 		const struct dbg_idle_chk_cond_reg *cond_regs;
3715 		const struct dbg_idle_chk_rule *rule;
3716 		const union dbg_idle_chk_reg *regs;
3717 		u16 num_reg_entries = 1;
3718 		bool check_rule = true;
3719 		const u32 *imm_values;
3720 
3721 		rule = &input_rules[i];
3722 		regs = (const union dbg_idle_chk_reg *)
3723 			p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3724 			rule->reg_offset;
3725 		cond_regs = &regs[0].cond_reg;
3726 		imm_values =
3727 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3728 		    rule->imm_offset;
3729 
3730 		/* Check if all condition register blocks are out of reset, and
3731 		 * find maximal number of entries (all condition registers that
3732 		 * are memories must have the same size, which is > 1).
3733 		 */
3734 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3735 		     reg_id++) {
3736 			u32 block_id =
3737 				GET_FIELD(cond_regs[reg_id].data,
3738 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3739 
3740 			if (block_id >= MAX_BLOCK_ID) {
3741 				DP_NOTICE(p_hwfn, false, "Invalid block_id\n");
3742 				return 0;
3743 			}
3744 
3745 			check_rule = !dev_data->block_in_reset[block_id];
3746 			if (cond_regs[reg_id].num_entries > num_reg_entries)
3747 				num_reg_entries = cond_regs[reg_id].num_entries;
3748 		}
3749 
3750 		if (!check_rule && dump)
3751 			continue;
3752 
3753 		if (!dump) {
3754 			u32 entry_dump_size =
3755 				qed_idle_chk_dump_failure(p_hwfn,
3756 							  p_ptt,
3757 							  dump_buf + offset,
3758 							  false,
3759 							  rule->rule_id,
3760 							  rule,
3761 							  0,
3762 							  NULL);
3763 
3764 			offset += num_reg_entries * entry_dump_size;
3765 			(*num_failing_rules) += num_reg_entries;
3766 			continue;
3767 		}
3768 
3769 		/* Go over all register entries (number of entries is the same
3770 		 * for all condition registers).
3771 		 */
3772 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3773 			u32 next_reg_offset = 0;
3774 
3775 			/* Read current entry of all condition registers */
3776 			for (reg_id = 0; reg_id < rule->num_cond_regs;
3777 			     reg_id++) {
3778 				const struct dbg_idle_chk_cond_reg *reg =
3779 					&cond_regs[reg_id];
3780 				u32 padded_entry_size, addr;
3781 				bool wide_bus;
3782 
3783 				/* Find GRC address (if it's a memory, the
3784 				 * address of the specific entry is calculated).
3785 				 */
3786 				addr = GET_FIELD(reg->data,
3787 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
3788 				wide_bus =
3789 				    GET_FIELD(reg->data,
3790 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3791 				if (reg->num_entries > 1 ||
3792 				    reg->start_entry > 0) {
3793 					padded_entry_size =
3794 					   reg->entry_size > 1 ?
3795 					   OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) :
3796 					   1;
3797 					addr += (reg->start_entry + entry_id) *
3798 						padded_entry_size;
3799 				}
3800 
3801 				/* Read registers */
3802 				if (next_reg_offset + reg->entry_size >=
3803 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3804 					DP_NOTICE(p_hwfn, false,
3805 						  "idle check registers entry is too large\n");
3806 					return 0;
3807 				}
3808 
3809 				next_reg_offset +=
3810 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
3811 							    cond_reg_values +
3812 							    next_reg_offset,
3813 							    dump, addr,
3814 							    reg->entry_size,
3815 							    wide_bus,
3816 							    SPLIT_TYPE_NONE, 0);
3817 			}
3818 
3819 			/* Call rule condition function.
3820 			 * If returns true, it's a failure.
3821 			 */
3822 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3823 							imm_values)) {
3824 				offset += qed_idle_chk_dump_failure(p_hwfn,
3825 							p_ptt,
3826 							dump_buf + offset,
3827 							dump,
3828 							rule->rule_id,
3829 							rule,
3830 							entry_id,
3831 							cond_reg_values);
3832 				(*num_failing_rules)++;
3833 			}
3834 		}
3835 	}
3836 
3837 	return offset;
3838 }
3839 
3840 /* Performs Idle Check Dump to the specified buffer.
3841  * Returns the dumped size in dwords.
3842  */
qed_idle_chk_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3843 static u32 qed_idle_chk_dump(struct ecore_hwfn *p_hwfn,
3844 			     struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
3845 {
3846 	struct virt_mem_desc *dbg_buf =
3847 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3848 	u32 num_failing_rules_offset, offset = 0,
3849 	    input_offset = 0, num_failing_rules = 0;
3850 
3851 	/* Dump global params  - 1 must match below amount of params */
3852 	offset += qed_dump_common_global_params(p_hwfn,
3853 						p_ptt,
3854 						dump_buf + offset, dump, 1);
3855 	offset += qed_dump_str_param(dump_buf + offset,
3856 				     dump, "dump-type", "idle-chk");
3857 
3858 	/* Dump idle check section header with a single parameter */
3859 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3860 	num_failing_rules_offset = offset;
3861 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
3862 
3863 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
3864 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
3865 		    (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
3866 		    input_offset++;
3867 		bool eval_mode, mode_match = true;
3868 		u32 curr_failing_rules;
3869 		u16 modes_buf_offset;
3870 
3871 		/* Check mode */
3872 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3873 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3874 		if (eval_mode) {
3875 			modes_buf_offset =
3876 				GET_FIELD(cond_hdr->mode.data,
3877 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3878 			mode_match = qed_is_mode_match(p_hwfn,
3879 						       &modes_buf_offset);
3880 		}
3881 
3882 		if (mode_match) {
3883 			const struct dbg_idle_chk_rule *rule =
3884 			    (const struct dbg_idle_chk_rule *)((u32 *)
3885 							       dbg_buf->ptr
3886 							       + input_offset);
3887 			u32 num_input_rules =
3888 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
3889 			offset +=
3890 			    qed_idle_chk_dump_rule_entries(p_hwfn,
3891 							   p_ptt,
3892 							   dump_buf +
3893 							   offset,
3894 							   dump,
3895 							   rule,
3896 							   num_input_rules,
3897 							   &curr_failing_rules);
3898 			num_failing_rules += curr_failing_rules;
3899 		}
3900 
3901 		input_offset += cond_hdr->data_size;
3902 	}
3903 
3904 	/* Overwrite num_rules parameter */
3905 	if (dump)
3906 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
3907 				   dump, "num_rules", num_failing_rules);
3908 
3909 	/* Dump last section */
3910 	offset += qed_dump_last_section(dump_buf, offset, dump);
3911 
3912 	return offset;
3913 }
3914 
3915 /* Finds the meta data image in NVRAM */
qed_find_nvram_image(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 image_type,u32 * nvram_offset_bytes,u32 * nvram_size_bytes)3916 static enum dbg_status qed_find_nvram_image(struct ecore_hwfn *p_hwfn,
3917 					    struct ecore_ptt *p_ptt,
3918 					    u32 image_type,
3919 					    u32 *nvram_offset_bytes,
3920 					    u32 *nvram_size_bytes)
3921 {
3922 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
3923 	struct mcp_file_att file_att;
3924 	int nvm_result;
3925 
3926 	/* Call NVRAM get file command */
3927 	nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn,
3928 					p_ptt,
3929 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
3930 					image_type,
3931 					&ret_mcp_resp,
3932 					&ret_mcp_param,
3933 					&ret_txn_size, (u32 *)&file_att);
3934 
3935 	/* Check response */
3936 	if (nvm_result ||
3937 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3938 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3939 
3940 	/* Update return values */
3941 	*nvram_offset_bytes = file_att.nvm_start_addr;
3942 	*nvram_size_bytes = file_att.len;
3943 
3944 	DP_VERBOSE(p_hwfn->p_dev,
3945 		   ECORE_MSG_DEBUG,
3946 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3947 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
3948 
3949 	/* Check alignment */
3950 	if (*nvram_size_bytes & 0x3)
3951 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
3952 
3953 	return DBG_STATUS_OK;
3954 }
3955 
3956 /* Reads data from NVRAM */
qed_nvram_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 nvram_offset_bytes,u32 nvram_size_bytes,u32 * ret_buf)3957 static enum dbg_status qed_nvram_read(struct ecore_hwfn *p_hwfn,
3958 				      struct ecore_ptt *p_ptt,
3959 				      u32 nvram_offset_bytes,
3960 				      u32 nvram_size_bytes, u32 *ret_buf)
3961 {
3962 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
3963 	s32 bytes_left = nvram_size_bytes;
3964 	u32 read_offset = 0, param = 0;
3965 
3966 	DP_NOTICE(p_hwfn->p_dev, false,
3967 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
3968 		   nvram_size_bytes);
3969 
3970 	do {
3971 		bytes_to_copy =
3972 		    (bytes_left >
3973 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
3974 
3975 		/* Call NVRAM read command */
3976 		SET_MFW_FIELD(param,
3977 			      DRV_MB_PARAM_NVM_OFFSET,
3978 			      nvram_offset_bytes + read_offset);
3979 		SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
3980 		if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3981 					 DRV_MSG_CODE_NVM_READ_NVRAM, param,
3982 					 &ret_mcp_resp,
3983 					 &ret_mcp_param, &ret_read_size,
3984 					 (u32 *)((u8 *)ret_buf +
3985 						 read_offset))) {
3986 			DP_NOTICE(p_hwfn->p_dev, false, "rc = DBG_STATUS_NVRAM_READ_FAILED\n");
3987 			return DBG_STATUS_NVRAM_READ_FAILED;
3988 		}
3989 
3990 		/* Check response */
3991 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) {
3992 			DP_NOTICE(p_hwfn->p_dev, false, "rc = DBG_STATUS_NVRAM_READ_FAILED\n");
3993 			return DBG_STATUS_NVRAM_READ_FAILED;
3994 		}
3995 
3996 		/* Update read offset */
3997 		read_offset += ret_read_size;
3998 		bytes_left -= ret_read_size;
3999 	} while (bytes_left > 0);
4000 
4001 	return DBG_STATUS_OK;
4002 }
4003 
4004 /* Get info on the MCP Trace data in the scratchpad:
4005  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4006  * - trace_data_size (OUT): trace data size in bytes (without the header)
4007  */
qed_mcp_trace_get_data_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * trace_data_grc_addr,u32 * trace_data_size)4008 static enum dbg_status qed_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4009 						   struct ecore_ptt *p_ptt,
4010 						   u32 *trace_data_grc_addr,
4011 						   u32 *trace_data_size)
4012 {
4013 	u32 spad_trace_offsize, signature;
4014 
4015 	/* Read trace section offsize structure from MCP scratchpad */
4016 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt,
4017 				      MCP_SPAD_TRACE_OFFSIZE_ADDR);
4018 
4019 	/* Extract trace section address from offsize (in scratchpad) */
4020 	*trace_data_grc_addr =
4021 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4022 
4023 	/* Read signature from MCP trace section */
4024 	signature = ecore_rd(p_hwfn, p_ptt,
4025 			   *trace_data_grc_addr +
4026 			   offsetof(struct mcp_trace, signature));
4027 
4028 	if (signature != MFW_TRACE_SIGNATURE)
4029 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4030 
4031 	/* Read trace size from MCP trace section */
4032 	*trace_data_size = ecore_rd(p_hwfn,
4033 				  p_ptt,
4034 				  *trace_data_grc_addr +
4035 				  offsetof(struct mcp_trace, size));
4036 
4037 	return DBG_STATUS_OK;
4038 }
4039 
4040 /* Reads MCP trace meta data image from NVRAM
4041  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4042  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4043  *			      loaded from file).
4044  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4045  */
qed_mcp_trace_get_meta_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 trace_data_size_bytes,u32 * running_bundle_id,u32 * trace_meta_offset,u32 * trace_meta_size)4046 static enum dbg_status qed_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4047 						   struct ecore_ptt *p_ptt,
4048 						   u32 trace_data_size_bytes,
4049 						   u32 *running_bundle_id,
4050 						   u32 *trace_meta_offset,
4051 						   u32 *trace_meta_size)
4052 {
4053 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4054 
4055 	/* Read MCP trace section offsize structure from MCP scratchpad */
4056 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt,
4057 				      MCP_SPAD_TRACE_OFFSIZE_ADDR);
4058 
4059 	/* Find running bundle ID */
4060 	running_mfw_addr =
4061 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4062 		SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4063 	*running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4064 	if (*running_bundle_id > 1)
4065 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4066 
4067 	/* Find image in NVRAM */
4068 	nvram_image_type =
4069 	    (*running_bundle_id ==
4070 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4071 	return qed_find_nvram_image(p_hwfn,
4072 				    p_ptt,
4073 				    nvram_image_type,
4074 				    trace_meta_offset, trace_meta_size);
4075 }
4076 
4077 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
qed_mcp_trace_read_meta(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 nvram_offset_in_bytes,u32 size_in_bytes,u32 * buf)4078 static enum dbg_status qed_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4079 					       struct ecore_ptt *p_ptt,
4080 					       u32 nvram_offset_in_bytes,
4081 					       u32 size_in_bytes, u32 *buf)
4082 {
4083 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4084 	enum dbg_status status;
4085 	u32 signature;
4086 
4087 	/* Read meta data from NVRAM */
4088 	status = qed_nvram_read(p_hwfn,
4089 				p_ptt,
4090 				nvram_offset_in_bytes, size_in_bytes, buf);
4091 	if (status != DBG_STATUS_OK)
4092 		return status;
4093 
4094 	/* Extract and check first signature */
4095 	signature = qed_read_unaligned_dword(byte_buf);
4096 	byte_buf += sizeof(signature);
4097 	if (signature != NVM_MAGIC_VALUE)
4098 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4099 
4100 	/* Extract number of modules */
4101 	modules_num = *(byte_buf++);
4102 
4103 	/* Skip all modules */
4104 	for (i = 0; i < modules_num; i++) {
4105 		module_len = *(byte_buf++);
4106 		byte_buf += module_len;
4107 	}
4108 
4109 	/* Extract and check second signature */
4110 	signature = qed_read_unaligned_dword(byte_buf);
4111 	byte_buf += sizeof(signature);
4112 	if (signature != NVM_MAGIC_VALUE)
4113 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4114 
4115 	return DBG_STATUS_OK;
4116 }
4117 
4118 /* Dump MCP Trace */
qed_mcp_trace_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4119 static enum dbg_status qed_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4120 					  struct ecore_ptt *p_ptt,
4121 					  u32 *dump_buf,
4122 					  bool dump, u32 *num_dumped_dwords)
4123 {
4124 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4125 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4126 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4127 	enum dbg_status status;
4128 	int halted = 0;
4129 	bool use_mfw;
4130 
4131 	*num_dumped_dwords = 0;
4132 
4133 	use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4134 
4135 	/* Get trace data info */
4136 	status = qed_mcp_trace_get_data_info(p_hwfn,
4137 					     p_ptt,
4138 					     &trace_data_grc_addr,
4139 					     &trace_data_size_bytes);
4140 	if (status != DBG_STATUS_OK)
4141 		return status;
4142 
4143 	/* Dump global params */
4144 	offset += qed_dump_common_global_params(p_hwfn,
4145 						p_ptt,
4146 						dump_buf + offset, dump, 1);
4147 	offset += qed_dump_str_param(dump_buf + offset,
4148 				     dump, "dump-type", "mcp-trace");
4149 
4150 	/* Halt MCP while reading from scratchpad so the read data will be
4151 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4152 	 * risk that it may be corrupt.
4153 	 */
4154 	if (dump && use_mfw) {
4155 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4156 		if (!halted)
4157 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4158 	}
4159 
4160 	/* Find trace data size */
4161 	trace_data_size_dwords =
4162 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4163 			 BYTES_IN_DWORD);
4164 
4165 	/* Dump trace data section header and param */
4166 	offset += qed_dump_section_hdr(dump_buf + offset,
4167 				       dump, "mcp_trace_data", 1);
4168 	offset += qed_dump_num_param(dump_buf + offset,
4169 				     dump, "size", trace_data_size_dwords);
4170 
4171 	/* Read trace data from scratchpad into dump buffer */
4172 	offset += qed_grc_dump_addr_range(p_hwfn,
4173 					  p_ptt,
4174 					  dump_buf + offset,
4175 					  dump,
4176 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4177 					  trace_data_size_dwords, false,
4178 					  SPLIT_TYPE_NONE, 0);
4179 
4180 	/* Resume MCP (only if halt succeeded) */
4181 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4182 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4183 
4184 	/* Dump trace meta section header */
4185 	offset += qed_dump_section_hdr(dump_buf + offset,
4186 				       dump, "mcp_trace_meta", 1);
4187 
4188 	/* If MCP Trace meta size parameter was set, use it.
4189 	 * Otherwise, read trace meta.
4190 	 * trace_meta_size_bytes is dword-aligned.
4191 	 */
4192 	trace_meta_size_bytes =
4193 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4194 	if ((!trace_meta_size_bytes || dump) && use_mfw)
4195 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4196 						     p_ptt,
4197 						     trace_data_size_bytes,
4198 						     &running_bundle_id,
4199 						     &trace_meta_offset_bytes,
4200 						     &trace_meta_size_bytes);
4201 	if (status == DBG_STATUS_OK)
4202 		trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4203 
4204 	/* Dump trace meta size param */
4205 	offset += qed_dump_num_param(dump_buf + offset,
4206 				     dump, "size", trace_meta_size_dwords);
4207 
4208 	/* Read trace meta image into dump buffer */
4209 	if (dump && trace_meta_size_dwords)
4210 		status = qed_mcp_trace_read_meta(p_hwfn,
4211 						 p_ptt,
4212 						 trace_meta_offset_bytes,
4213 						 trace_meta_size_bytes,
4214 						 dump_buf + offset);
4215 	if (status == DBG_STATUS_OK)
4216 		offset += trace_meta_size_dwords;
4217 
4218 	/* Dump last section */
4219 	offset += qed_dump_last_section(dump_buf, offset, dump);
4220 
4221 	*num_dumped_dwords = offset;
4222 
4223 	/* If no mcp access, indicate that the dump doesn't contain the meta
4224 	 * data from NVRAM.
4225 	 */
4226 	return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4227 }
4228 
4229 /* Dump GRC FIFO */
qed_reg_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4230 static enum dbg_status qed_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4231 					 struct ecore_ptt *p_ptt,
4232 					 u32 *dump_buf,
4233 					 bool dump, u32 *num_dumped_dwords)
4234 {
4235 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4236 	bool fifo_has_data;
4237 
4238 	*num_dumped_dwords = 0;
4239 
4240 	/* Dump global params */
4241 	offset += qed_dump_common_global_params(p_hwfn,
4242 						p_ptt,
4243 						dump_buf + offset, dump, 1);
4244 	offset += qed_dump_str_param(dump_buf + offset,
4245 				     dump, "dump-type", "reg-fifo");
4246 
4247 	/* Dump fifo data section header and param. The size param is 0 for
4248 	 * now, and is overwritten after reading the FIFO.
4249 	 */
4250 	offset += qed_dump_section_hdr(dump_buf + offset,
4251 				       dump, "reg_fifo_data", 1);
4252 	size_param_offset = offset;
4253 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4254 
4255 	if (!dump) {
4256 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4257 		 * test how much data is available, except for reading it.
4258 		 */
4259 		offset += REG_FIFO_DEPTH_DWORDS;
4260 		goto out;
4261 	}
4262 
4263 	fifo_has_data = ecore_rd(p_hwfn, p_ptt,
4264 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4265 
4266 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4267 	 * and must be accessed atomically. Test for dwords_read not passing
4268 	 * buffer size since more entries could be added to the buffer as we are
4269 	 * emptying it.
4270 	 */
4271 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4272 	len = REG_FIFO_ELEMENT_DWORDS;
4273 	for (dwords_read = 0;
4274 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4275 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4276 		offset += qed_grc_dump_addr_range(p_hwfn,
4277 						  p_ptt,
4278 						  dump_buf + offset,
4279 						  true,
4280 						  addr,
4281 						  len,
4282 						  true, SPLIT_TYPE_NONE,
4283 						  0);
4284 		fifo_has_data = ecore_rd(p_hwfn, p_ptt,
4285 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4286 	}
4287 
4288 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4289 			   dwords_read);
4290 out:
4291 	/* Dump last section */
4292 	offset += qed_dump_last_section(dump_buf, offset, dump);
4293 
4294 	*num_dumped_dwords = offset;
4295 
4296 	return DBG_STATUS_OK;
4297 }
4298 
4299 /* Dump IGU FIFO */
qed_igu_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4300 static enum dbg_status qed_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4301 					 struct ecore_ptt *p_ptt,
4302 					 u32 *dump_buf,
4303 					 bool dump, u32 *num_dumped_dwords)
4304 {
4305 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4306 	bool fifo_has_data;
4307 
4308 	*num_dumped_dwords = 0;
4309 
4310 	/* Dump global params */
4311 	offset += qed_dump_common_global_params(p_hwfn,
4312 						p_ptt,
4313 						dump_buf + offset, dump, 1);
4314 	offset += qed_dump_str_param(dump_buf + offset,
4315 				     dump, "dump-type", "igu-fifo");
4316 
4317 	/* Dump fifo data section header and param. The size param is 0 for
4318 	 * now, and is overwritten after reading the FIFO.
4319 	 */
4320 	offset += qed_dump_section_hdr(dump_buf + offset,
4321 				       dump, "igu_fifo_data", 1);
4322 	size_param_offset = offset;
4323 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4324 
4325 	if (!dump) {
4326 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4327 		 * test how much data is available, except for reading it.
4328 		 */
4329 		offset += IGU_FIFO_DEPTH_DWORDS;
4330 		goto out;
4331 	}
4332 
4333 	fifo_has_data = ecore_rd(p_hwfn, p_ptt,
4334 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4335 
4336 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4337 	 * and must be accessed atomically. Test for dwords_read not passing
4338 	 * buffer size since more entries could be added to the buffer as we are
4339 	 * emptying it.
4340 	 */
4341 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4342 	len = IGU_FIFO_ELEMENT_DWORDS;
4343 	for (dwords_read = 0;
4344 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4345 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4346 		offset += qed_grc_dump_addr_range(p_hwfn,
4347 						  p_ptt,
4348 						  dump_buf + offset,
4349 						  true,
4350 						  addr,
4351 						  len,
4352 						  true, SPLIT_TYPE_NONE,
4353 						  0);
4354 		fifo_has_data = ecore_rd(p_hwfn, p_ptt,
4355 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4356 	}
4357 
4358 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4359 			   dwords_read);
4360 out:
4361 	/* Dump last section */
4362 	offset += qed_dump_last_section(dump_buf, offset, dump);
4363 
4364 	*num_dumped_dwords = offset;
4365 
4366 	return DBG_STATUS_OK;
4367 }
4368 
4369 /* Protection Override dump */
qed_protection_override_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4370 static enum dbg_status qed_protection_override_dump(struct ecore_hwfn *p_hwfn,
4371 						    struct ecore_ptt *p_ptt,
4372 						    u32 *dump_buf,
4373 						    bool dump,
4374 						    u32 *num_dumped_dwords)
4375 {
4376 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4377 
4378 	*num_dumped_dwords = 0;
4379 
4380 	/* Dump global params */
4381 	offset += qed_dump_common_global_params(p_hwfn,
4382 						p_ptt,
4383 						dump_buf + offset, dump, 1);
4384 	offset += qed_dump_str_param(dump_buf + offset,
4385 				     dump, "dump-type", "protection-override");
4386 
4387 	/* Dump data section header and param. The size param is 0 for now,
4388 	 * and is overwritten after reading the data.
4389 	 */
4390 	offset += qed_dump_section_hdr(dump_buf + offset,
4391 				       dump, "protection_override_data", 1);
4392 	size_param_offset = offset;
4393 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4394 
4395 	if (!dump) {
4396 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4397 		goto out;
4398 	}
4399 
4400 	/* Add override window info to buffer */
4401 	override_window_dwords =
4402 		ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4403 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4404 	if (override_window_dwords) {
4405 		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4406 		offset += qed_grc_dump_addr_range(p_hwfn,
4407 						  p_ptt,
4408 						  dump_buf + offset,
4409 						  true,
4410 						  addr,
4411 						  override_window_dwords,
4412 						  true, SPLIT_TYPE_NONE, 0);
4413 		qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4414 				   override_window_dwords);
4415 	}
4416 out:
4417 	/* Dump last section */
4418 	offset += qed_dump_last_section(dump_buf, offset, dump);
4419 
4420 	*num_dumped_dwords = offset;
4421 
4422 	return DBG_STATUS_OK;
4423 }
4424 
4425 /* Performs FW Asserts Dump to the specified buffer.
4426  * Returns the dumped size in dwords.
4427  */
qed_fw_asserts_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)4428 static u32 qed_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4429 			       struct ecore_ptt *p_ptt, u32 *dump_buf,
4430 			       bool dump)
4431 {
4432 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4433 	struct fw_asserts_ram_section *asserts;
4434 	char storm_letter_str[2] = "?";
4435 	struct fw_info fw_info;
4436 	u32 offset = 0;
4437 	u8 storm_id;
4438 
4439 	/* Dump global params */
4440 	offset += qed_dump_common_global_params(p_hwfn,
4441 						p_ptt,
4442 						dump_buf + offset, dump, 1);
4443 	offset += qed_dump_str_param(dump_buf + offset,
4444 				     dump, "dump-type", "fw-asserts");
4445 
4446 	/* Find Storm dump size */
4447 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4448 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4449 		struct storm_defs *storm = &s_storm_defs[storm_id];
4450 		u32 last_list_idx, addr;
4451 
4452 		if (dev_data->block_in_reset[storm->sem_block_id])
4453 			continue;
4454 
4455 		/* Read FW info for the current Storm */
4456 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4457 
4458 		asserts = &fw_info.fw_asserts_section;
4459 
4460 		/* Dump FW Asserts section header and params */
4461 		storm_letter_str[0] = storm->letter;
4462 		offset += qed_dump_section_hdr(dump_buf + offset,
4463 					       dump, "fw_asserts", 2);
4464 		offset += qed_dump_str_param(dump_buf + offset,
4465 					     dump, "storm", storm_letter_str);
4466 		offset += qed_dump_num_param(dump_buf + offset,
4467 					     dump,
4468 					     "size",
4469 					     asserts->list_element_dword_size);
4470 
4471 		/* Read and dump FW Asserts data */
4472 		if (!dump) {
4473 			offset += asserts->list_element_dword_size;
4474 			continue;
4475 		}
4476 
4477 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4478 			SEM_FAST_REG_INT_RAM +
4479 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4480 		next_list_idx_addr = fw_asserts_section_addr +
4481 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4482 		next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4483 		last_list_idx = (next_list_idx > 0 ?
4484 				 next_list_idx :
4485 				 asserts->list_num_elements) - 1;
4486 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4487 		       asserts->list_dword_offset +
4488 		       last_list_idx * asserts->list_element_dword_size;
4489 		offset +=
4490 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4491 					    dump_buf + offset,
4492 					    dump, addr,
4493 					    asserts->list_element_dword_size,
4494 						  false, SPLIT_TYPE_NONE, 0);
4495 	}
4496 
4497 	/* Dump last section */
4498 	offset += qed_dump_last_section(dump_buf, offset, dump);
4499 
4500 	return offset;
4501 }
4502 
4503 /* Dumps the specified ILT pages to the specified buffer.
4504  * Returns the dumped size in dwords.
4505  */
qed_ilt_dump_pages_range(u32 * dump_buf,bool dump,u32 start_page_id,u32 num_pages,struct phys_mem_desc * ilt_pages,bool dump_page_ids)4506 static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
4507 				    bool dump,
4508 				    u32 start_page_id,
4509 				    u32 num_pages,
4510 				    struct phys_mem_desc *ilt_pages,
4511 				    bool dump_page_ids)
4512 {
4513 	u32 page_id, end_page_id, offset = 0;
4514 
4515 	if (num_pages == 0)
4516 		return offset;
4517 
4518 	end_page_id = start_page_id + num_pages - 1;
4519 
4520 	for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4521 		struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
4522 
4523 		/**
4524 		 *
4525 		 * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
4526 		 *     break;
4527 		 */
4528 
4529 		if (!ilt_pages[page_id].virt_addr)
4530 			continue;
4531 
4532 		if (dump_page_ids) {
4533 			/* Copy page ID to dump buffer */
4534 			if (dump)
4535 				*(dump_buf + offset) = page_id;
4536 			offset++;
4537 		} else {
4538 			/* Copy page memory to dump buffer */
4539 			if (dump)
4540 				memcpy(dump_buf + offset,
4541 				       mem_desc->virt_addr, mem_desc->size);
4542 			offset += BYTES_TO_DWORDS(mem_desc->size);
4543 		}
4544 	}
4545 
4546 	return offset;
4547 }
4548 
4549 /* Dumps a section containing the dumped ILT pages.
4550  * Returns the dumped size in dwords.
4551  */
qed_ilt_dump_pages_section(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump,u32 valid_conn_pf_pages,u32 valid_conn_vf_pages,struct phys_mem_desc * ilt_pages,bool dump_page_ids)4552 static u32 qed_ilt_dump_pages_section(struct ecore_hwfn *p_hwfn,
4553 				      u32 *dump_buf,
4554 				      bool dump,
4555 				      u32 valid_conn_pf_pages,
4556 				      u32 valid_conn_vf_pages,
4557 				      struct phys_mem_desc *ilt_pages,
4558 				      bool dump_page_ids)
4559 {
4560 	struct ecore_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4561 	u32 pf_start_line, start_page_id, offset = 0;
4562 	u32 cdut_pf_init_pages, cdut_vf_init_pages;
4563 	u32 cdut_pf_work_pages, cdut_vf_work_pages;
4564 	u32 base_data_offset, size_param_offset;
4565 	u32 cdut_pf_pages, cdut_vf_pages;
4566 	const char *section_name;
4567 	u8 i;
4568 
4569 	section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4570 	cdut_pf_init_pages = ecore_get_cdut_num_pf_init_pages(p_hwfn);
4571 	cdut_vf_init_pages = ecore_get_cdut_num_vf_init_pages(p_hwfn);
4572 	cdut_pf_work_pages = ecore_get_cdut_num_pf_work_pages(p_hwfn);
4573 	cdut_vf_work_pages = ecore_get_cdut_num_vf_work_pages(p_hwfn);
4574 	cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4575 	cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4576 	pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4577 
4578 	offset +=
4579 	    qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
4580 
4581 	/* Dump size parameter (0 for now, overwritten with real size later) */
4582 	size_param_offset = offset;
4583 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4584 	base_data_offset = offset;
4585 
4586 	/* CDUC pages are ordered as follows:
4587 	 * - PF pages - valid section (included in PF connection type mapping)
4588 	 * - PF pages - invalid section (not dumped)
4589 	 * - For each VF in the PF:
4590 	 *   - VF pages - valid section (included in VF connection type mapping)
4591 	 *   - VF pages - invalid section (not dumped)
4592 	 */
4593 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4594 		/* Dump connection PF pages */
4595 		start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4596 		offset += qed_ilt_dump_pages_range(dump_buf + offset,
4597 						   dump,
4598 						   start_page_id,
4599 						   valid_conn_pf_pages,
4600 						   ilt_pages, dump_page_ids);
4601 
4602 		/* Dump connection VF pages */
4603 		start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4604 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4605 		     i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4606 			offset += qed_ilt_dump_pages_range(dump_buf + offset,
4607 							   dump,
4608 							   start_page_id,
4609 							   valid_conn_vf_pages,
4610 							   ilt_pages,
4611 							   dump_page_ids);
4612 	}
4613 
4614 	/* CDUT pages are ordered as follows:
4615 	 * - PF init pages (not dumped)
4616 	 * - PF work pages
4617 	 * - For each VF in the PF:
4618 	 *   - VF init pages (not dumped)
4619 	 *   - VF work pages
4620 	 */
4621 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4622 		/* Dump task PF pages */
4623 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4624 		    cdut_pf_init_pages - pf_start_line;
4625 		offset += qed_ilt_dump_pages_range(dump_buf + offset,
4626 						   dump,
4627 						   start_page_id,
4628 						   cdut_pf_work_pages,
4629 						   ilt_pages, dump_page_ids);
4630 
4631 		/* Dump task VF pages */
4632 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4633 		    cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4634 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4635 		     i++, start_page_id += cdut_vf_pages)
4636 			offset += qed_ilt_dump_pages_range(dump_buf + offset,
4637 							   dump,
4638 							   start_page_id,
4639 							   cdut_vf_work_pages,
4640 							   ilt_pages,
4641 							   dump_page_ids);
4642 	}
4643 
4644 	/* Overwrite size param */
4645 	if (dump)
4646 		qed_dump_num_param(dump_buf + size_param_offset,
4647 				   dump, "size", offset - base_data_offset);
4648 
4649 	return offset;
4650 }
4651 
4652 /* Performs ILT Dump to the specified buffer.
4653  * Returns the dumped size in dwords.
4654  */
qed_ilt_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)4655 static u32 qed_ilt_dump(struct ecore_hwfn *p_hwfn,
4656 			struct ecore_ptt *p_ptt, u32 *dump_buf, bool dump)
4657 {
4658 	struct ecore_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4659 	u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
4660 	u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
4661 	u32 num_cids_per_page, conn_ctx_size;
4662 	u32 cduc_page_size, cdut_page_size;
4663 	struct phys_mem_desc *ilt_pages;
4664 	u8 conn_type;
4665 
4666 	cduc_page_size = 1 <<
4667 	    (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4668 	cdut_page_size = 1 <<
4669 	    (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4670 	conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
4671 	num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
4672 	ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
4673 
4674 	/* Dump global params - 22 must match number of params below */
4675 	offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4676 						dump_buf + offset, dump, 22);
4677 	offset += qed_dump_str_param(dump_buf + offset,
4678 				     dump, "dump-type", "ilt-dump");
4679 	offset += qed_dump_num_param(dump_buf + offset,
4680 				     dump,
4681 				     "cduc-page-size", cduc_page_size);
4682 	offset += qed_dump_num_param(dump_buf + offset,
4683 				     dump,
4684 				     "cduc-first-page-id",
4685 				     clients[ILT_CLI_CDUC].first.val);
4686 	offset += qed_dump_num_param(dump_buf + offset,
4687 				     dump,
4688 				     "cduc-last-page-id",
4689 				     clients[ILT_CLI_CDUC].last.val);
4690 	offset += qed_dump_num_param(dump_buf + offset,
4691 				     dump,
4692 				     "cduc-num-pf-pages",
4693 				     clients
4694 				     [ILT_CLI_CDUC].pf_total_lines);
4695 	offset += qed_dump_num_param(dump_buf + offset,
4696 				     dump,
4697 				     "cduc-num-vf-pages",
4698 				     clients
4699 				     [ILT_CLI_CDUC].vf_total_lines);
4700 	offset += qed_dump_num_param(dump_buf + offset,
4701 				     dump,
4702 				     "max-conn-ctx-size",
4703 				     conn_ctx_size);
4704 	offset += qed_dump_num_param(dump_buf + offset,
4705 				     dump,
4706 				     "cdut-page-size", cdut_page_size);
4707 	offset += qed_dump_num_param(dump_buf + offset,
4708 				     dump,
4709 				     "cdut-first-page-id",
4710 				     clients[ILT_CLI_CDUT].first.val);
4711 	offset += qed_dump_num_param(dump_buf + offset,
4712 				     dump,
4713 				     "cdut-last-page-id",
4714 				     clients[ILT_CLI_CDUT].last.val);
4715 	offset += qed_dump_num_param(dump_buf + offset,
4716 				     dump,
4717 				     "cdut-num-pf-init-pages",
4718 				     ecore_get_cdut_num_pf_init_pages(p_hwfn));
4719 	offset += qed_dump_num_param(dump_buf + offset,
4720 				     dump,
4721 				     "cdut-num-vf-init-pages",
4722 				     ecore_get_cdut_num_vf_init_pages(p_hwfn));
4723 	offset += qed_dump_num_param(dump_buf + offset,
4724 				     dump,
4725 				     "cdut-num-pf-work-pages",
4726 				     ecore_get_cdut_num_pf_work_pages(p_hwfn));
4727 	offset += qed_dump_num_param(dump_buf + offset,
4728 				     dump,
4729 				     "cdut-num-vf-work-pages",
4730 				     ecore_get_cdut_num_vf_work_pages(p_hwfn));
4731 	offset += qed_dump_num_param(dump_buf + offset,
4732 				     dump,
4733 				     "max-task-ctx-size",
4734 				     p_hwfn->p_cxt_mngr->task_ctx_size);
4735 	offset += qed_dump_num_param(dump_buf + offset,
4736 				     dump,
4737 				     "task-type-id",
4738 				     p_hwfn->p_cxt_mngr->task_type_id);
4739 	offset += qed_dump_num_param(dump_buf + offset,
4740 				     dump,
4741 				     "first-vf-id-in-pf",
4742 				     p_hwfn->p_cxt_mngr->first_vf_in_pf);
4743 	offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
4744 					      dump,
4745 					      "num-vfs-in-pf",
4746 					      p_hwfn->p_cxt_mngr->vf_count);
4747 	offset += qed_dump_num_param(dump_buf + offset,
4748 				     dump,
4749 				     "ptr-size-bytes", sizeof(void *));
4750 	offset += qed_dump_num_param(dump_buf + offset,
4751 				     dump,
4752 				     "pf-start-line",
4753 				     p_hwfn->p_cxt_mngr->pf_start_line);
4754 	offset += qed_dump_num_param(dump_buf + offset,
4755 				     dump,
4756 				     "page-mem-desc-size-dwords",
4757 				     PAGE_MEM_DESC_SIZE_DWORDS);
4758 	offset += qed_dump_num_param(dump_buf + offset,
4759 				     dump,
4760 				     "ilt-shadow-size",
4761 				     p_hwfn->p_cxt_mngr->ilt_shadow_size);
4762 	/* Additional/Less parameters require matching of number in call to
4763 	 * dump_common_global_params()
4764 	 */
4765 
4766 	/* Dump section containing number of PF CIDs per connection type */
4767 	offset += qed_dump_section_hdr(dump_buf + offset,
4768 				       dump, "num_pf_cids_per_conn_type", 1);
4769 	offset += qed_dump_num_param(dump_buf + offset,
4770 				     dump, "size", NUM_OF_CONNECTION_TYPES);
4771 	for (conn_type = 0, valid_conn_pf_cids = 0;
4772 	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4773 		u32 num_pf_cids =
4774 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4775 
4776 		if (dump)
4777 			*(dump_buf + offset) = num_pf_cids;
4778 		valid_conn_pf_cids += num_pf_cids;
4779 	}
4780 
4781 	/* Dump section containing number of VF CIDs per connection type */
4782 	offset += qed_dump_section_hdr(dump_buf + offset,
4783 				       dump, "num_vf_cids_per_conn_type", 1);
4784 	offset += qed_dump_num_param(dump_buf + offset,
4785 				     dump, "size", NUM_OF_CONNECTION_TYPES);
4786 	for (conn_type = 0, valid_conn_vf_cids = 0;
4787 	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4788 		u32 num_vf_cids =
4789 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4790 
4791 		if (dump)
4792 			*(dump_buf + offset) = num_vf_cids;
4793 		valid_conn_vf_cids += num_vf_cids;
4794 	}
4795 
4796 	/* Dump section containing physical memory descs for each ILT page */
4797 	num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
4798 	offset += qed_dump_section_hdr(dump_buf + offset,
4799 				       dump, "ilt_page_desc", 1);
4800 	offset += qed_dump_num_param(dump_buf + offset,
4801 				     dump,
4802 				     "size",
4803 				     num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
4804 
4805 	/* Copy memory descriptors to dump buffer */
4806 	if (dump) {
4807 		u32 page_id;
4808 
4809 		for (page_id = 0; page_id < num_pages;
4810 		     page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
4811 			memcpy(dump_buf + offset,
4812 			       &ilt_pages[page_id],
4813 			       DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
4814 	} else {
4815 		offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
4816 	}
4817 
4818 	valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
4819 					   num_cids_per_page);
4820 	valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
4821 					   num_cids_per_page);
4822 
4823 	/* Dump ILT pages IDs */
4824 	offset += qed_ilt_dump_pages_section(p_hwfn,
4825 					     dump_buf + offset,
4826 					     dump,
4827 					     valid_conn_pf_pages,
4828 					     valid_conn_vf_pages,
4829 					     ilt_pages, true);
4830 
4831 	/* Dump ILT pages memory */
4832 	offset += qed_ilt_dump_pages_section(p_hwfn,
4833 					     dump_buf + offset,
4834 					     dump,
4835 					     valid_conn_pf_pages,
4836 					     valid_conn_vf_pages,
4837 					     ilt_pages, false);
4838 
4839 	/* Dump last section */
4840 	offset += qed_dump_last_section(dump_buf, offset, dump);
4841 
4842 	return offset;
4843 }
4844 
4845 /***************************** Public Functions *******************************/
4846 
qed_dbg_set_bin_ptr(struct ecore_hwfn * p_hwfn,const u8 * const bin_ptr)4847 enum dbg_status qed_dbg_set_bin_ptr(struct ecore_hwfn *p_hwfn,
4848 				    const u8 * const bin_ptr)
4849 {
4850 	struct bin_buffer_hdr *buf_hdrs =
4851 			(struct bin_buffer_hdr *)(osal_uintptr_t)bin_ptr;
4852 	u8 buf_id;
4853 
4854 	/* Convert binary data to debug arrays */
4855 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
4856 		qed_set_dbg_bin_buf(p_hwfn,
4857 				    buf_id,
4858 				    (const u32 *)(bin_ptr +
4859 						  buf_hdrs[buf_id].offset),
4860 						  buf_hdrs[buf_id].length);
4861 
4862 	return DBG_STATUS_OK;
4863 }
4864 
qed_dbg_set_app_ver(u32 ver)4865 enum dbg_status qed_dbg_set_app_ver(u32 ver)
4866 {
4867 	if (ver < TOOLS_VERSION)
4868 		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4869 
4870 	s_app_ver = ver;
4871 
4872 	return DBG_STATUS_OK;
4873 }
4874 
qed_read_fw_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct fw_info * fw_info)4875 bool qed_read_fw_info(struct ecore_hwfn *p_hwfn,
4876 		      struct ecore_ptt *p_ptt, struct fw_info *fw_info)
4877 {
4878 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4879 	u8 storm_id;
4880 
4881 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4882 		struct storm_defs *storm = &s_storm_defs[storm_id];
4883 
4884 		/* Skip Storm if it's in reset */
4885 		if (dev_data->block_in_reset[storm->sem_block_id])
4886 			continue;
4887 
4888 		/* Read FW info for the current Storm */
4889 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
4890 
4891 		return true;
4892 	}
4893 
4894 	return false;
4895 }
4896 
qed_dbg_grc_config(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)4897 enum dbg_status qed_dbg_grc_config(struct ecore_hwfn *p_hwfn,
4898 				   enum dbg_grc_params grc_param, u32 val)
4899 {
4900 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4901 	enum dbg_status status;
4902 	int i;
4903 
4904 	DP_VERBOSE(p_hwfn->p_dev,
4905 		   ECORE_MSG_DEBUG,
4906 		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
4907 
4908 	status = qed_dbg_dev_init(p_hwfn);
4909 	if (status != DBG_STATUS_OK)
4910 		return status;
4911 
4912 	/* Initializes the GRC parameters (if not initialized). Needed in order
4913 	 * to set the default parameter values for the first time.
4914 	 */
4915 	qed_dbg_grc_init_params(p_hwfn);
4916 
4917 	if (grc_param >= MAX_DBG_GRC_PARAMS)
4918 		return DBG_STATUS_INVALID_ARGS;
4919 	if (val < s_grc_param_defs[grc_param].min ||
4920 	    val > s_grc_param_defs[grc_param].max)
4921 		return DBG_STATUS_INVALID_ARGS;
4922 
4923 	if (s_grc_param_defs[grc_param].is_preset) {
4924 		/* Preset param */
4925 
4926 		/* Disabling a preset is not allowed. Call
4927 		 * dbg_grc_set_params_default instead.
4928 		 */
4929 		if (!val)
4930 			return DBG_STATUS_INVALID_ARGS;
4931 
4932 		/* Update all params with the preset values */
4933 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
4934 			struct grc_param_defs *defs = &s_grc_param_defs[i];
4935 			u32 preset_val;
4936 			/* Skip persistent params */
4937 			if (defs->is_persistent)
4938 				continue;
4939 
4940 			/* Find preset value */
4941 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
4942 				preset_val =
4943 				    defs->exclude_all_preset_val;
4944 			else if (grc_param == DBG_GRC_PARAM_CRASH)
4945 				preset_val =
4946 				    defs->crash_preset_val[dev_data->chip_id];
4947 			else
4948 				return DBG_STATUS_INVALID_ARGS;
4949 
4950 			qed_grc_set_param(p_hwfn, i, preset_val);
4951 		}
4952 	} else {
4953 		/* Regular param - set its value */
4954 		qed_grc_set_param(p_hwfn, grc_param, val);
4955 	}
4956 
4957 	return DBG_STATUS_OK;
4958 }
4959 
4960 /* Assign default GRC param values */
qed_dbg_grc_set_params_default(struct ecore_hwfn * p_hwfn)4961 void qed_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
4962 {
4963 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4964 	u32 i;
4965 
4966 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4967 		if (!s_grc_param_defs[i].is_persistent)
4968 			dev_data->grc.param_val[i] =
4969 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
4970 }
4971 
qed_dbg_grc_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)4972 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
4973 					      struct ecore_ptt *p_ptt,
4974 					      u32 *buf_size)
4975 {
4976 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
4977 
4978 	*buf_size = 0;
4979 
4980 	if (status != DBG_STATUS_OK)
4981 		return status;
4982 
4983 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4984 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4985 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4986 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4987 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4988 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
4989 
4990 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4991 }
4992 
qed_dbg_grc_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4993 enum dbg_status qed_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
4994 				 struct ecore_ptt *p_ptt,
4995 				 u32 *dump_buf,
4996 				 u32 buf_size_in_dwords,
4997 				 u32 *num_dumped_dwords)
4998 {
4999 	u32 needed_buf_size_in_dwords;
5000 	enum dbg_status status;
5001 
5002 	*num_dumped_dwords = 0;
5003 
5004 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5005 					       p_ptt,
5006 					       &needed_buf_size_in_dwords);
5007 	if (status != DBG_STATUS_OK)
5008 		return status;
5009 
5010 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5011 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5012 
5013 	/* GRC Dump */
5014 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5015 
5016 	/* Revert GRC params to their default */
5017 	qed_dbg_grc_set_params_default(p_hwfn);
5018 
5019 	return status;
5020 }
5021 
qed_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5022 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5023 						   struct ecore_ptt *p_ptt,
5024 						   u32 *buf_size)
5025 {
5026 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5027 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
5028 	enum dbg_status status;
5029 
5030 	*buf_size = 0;
5031 
5032 	status = qed_dbg_dev_init(p_hwfn);
5033 	if (status != DBG_STATUS_OK)
5034 		return status;
5035 
5036 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5037 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5038 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5039 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5040 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5041 
5042 	if (!idle_chk->buf_size_set) {
5043 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5044 						       p_ptt, NULL, false);
5045 		idle_chk->buf_size_set = true;
5046 	}
5047 
5048 	*buf_size = idle_chk->buf_size;
5049 
5050 	return DBG_STATUS_OK;
5051 }
5052 
qed_dbg_idle_chk_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5053 enum dbg_status qed_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
5054 				      struct ecore_ptt *p_ptt,
5055 				      u32 *dump_buf,
5056 				      u32 buf_size_in_dwords,
5057 				      u32 *num_dumped_dwords)
5058 {
5059 	u32 needed_buf_size_in_dwords;
5060 	enum dbg_status status;
5061 
5062 	*num_dumped_dwords = 0;
5063 
5064 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5065 						    p_ptt,
5066 						    &needed_buf_size_in_dwords);
5067 	if (status != DBG_STATUS_OK)
5068 		return status;
5069 
5070 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5071 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5072 
5073 	/* Update reset state */
5074 	qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5075 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5076 
5077 	/* Idle Check Dump */
5078 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5079 
5080 	/* Revert GRC params to their default */
5081 	qed_dbg_grc_set_params_default(p_hwfn);
5082 
5083 	return DBG_STATUS_OK;
5084 }
5085 
qed_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5086 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5087 						    struct ecore_ptt *p_ptt,
5088 						    u32 *buf_size)
5089 {
5090 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5091 
5092 	*buf_size = 0;
5093 
5094 	if (status != DBG_STATUS_OK)
5095 		return status;
5096 
5097 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5098 }
5099 
qed_dbg_mcp_trace_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5100 enum dbg_status qed_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
5101 				       struct ecore_ptt *p_ptt,
5102 				       u32 *dump_buf,
5103 				       u32 buf_size_in_dwords,
5104 				       u32 *num_dumped_dwords)
5105 {
5106 	u32 needed_buf_size_in_dwords;
5107 	enum dbg_status status;
5108 
5109 	status =
5110 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5111 						    p_ptt,
5112 						    &needed_buf_size_in_dwords);
5113 	if (status != DBG_STATUS_OK && status !=
5114 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5115 		return status;
5116 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5117 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5118 
5119 	/* Update reset state */
5120 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5121 
5122 	/* Perform dump */
5123 	status = qed_mcp_trace_dump(p_hwfn,
5124 				    p_ptt, dump_buf, true, num_dumped_dwords);
5125 
5126 	/* Revert GRC params to their default */
5127 	qed_dbg_grc_set_params_default(p_hwfn);
5128 
5129 	return status;
5130 }
5131 
qed_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5132 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5133 						   struct ecore_ptt *p_ptt,
5134 						   u32 *buf_size)
5135 {
5136 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5137 
5138 	*buf_size = 0;
5139 
5140 	if (status != DBG_STATUS_OK)
5141 		return status;
5142 
5143 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5144 }
5145 
qed_dbg_reg_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5146 enum dbg_status qed_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
5147 				      struct ecore_ptt *p_ptt,
5148 				      u32 *dump_buf,
5149 				      u32 buf_size_in_dwords,
5150 				      u32 *num_dumped_dwords)
5151 {
5152 	u32 needed_buf_size_in_dwords;
5153 	enum dbg_status status;
5154 
5155 	*num_dumped_dwords = 0;
5156 
5157 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5158 						    p_ptt,
5159 						    &needed_buf_size_in_dwords);
5160 	if (status != DBG_STATUS_OK)
5161 		return status;
5162 
5163 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5164 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5165 
5166 	/* Update reset state */
5167 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5168 
5169 	status = qed_reg_fifo_dump(p_hwfn,
5170 				   p_ptt, dump_buf, true, num_dumped_dwords);
5171 
5172 	/* Revert GRC params to their default */
5173 	qed_dbg_grc_set_params_default(p_hwfn);
5174 
5175 	return status;
5176 }
5177 
qed_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5178 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5179 						   struct ecore_ptt *p_ptt,
5180 						   u32 *buf_size)
5181 {
5182 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5183 
5184 	*buf_size = 0;
5185 
5186 	if (status != DBG_STATUS_OK)
5187 		return status;
5188 
5189 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5190 }
5191 
qed_dbg_igu_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5192 enum dbg_status qed_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
5193 				      struct ecore_ptt *p_ptt,
5194 				      u32 *dump_buf,
5195 				      u32 buf_size_in_dwords,
5196 				      u32 *num_dumped_dwords)
5197 {
5198 	u32 needed_buf_size_in_dwords;
5199 	enum dbg_status status;
5200 
5201 	*num_dumped_dwords = 0;
5202 
5203 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5204 						    p_ptt,
5205 						    &needed_buf_size_in_dwords);
5206 	if (status != DBG_STATUS_OK)
5207 		return status;
5208 
5209 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5210 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5211 
5212 	/* Update reset state */
5213 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5214 
5215 	status = qed_igu_fifo_dump(p_hwfn,
5216 				   p_ptt, dump_buf, true, num_dumped_dwords);
5217 	/* Revert GRC params to their default */
5218 	qed_dbg_grc_set_params_default(p_hwfn);
5219 
5220 	return status;
5221 }
5222 
5223 enum dbg_status
qed_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5224 qed_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5225 					      struct ecore_ptt *p_ptt,
5226 					      u32 *buf_size)
5227 {
5228 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5229 
5230 	*buf_size = 0;
5231 
5232 	if (status != DBG_STATUS_OK)
5233 		return status;
5234 
5235 	return qed_protection_override_dump(p_hwfn,
5236 					    p_ptt, NULL, false, buf_size);
5237 }
5238 
qed_dbg_protection_override_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5239 enum dbg_status qed_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
5240 						 struct ecore_ptt *p_ptt,
5241 						 u32 *dump_buf,
5242 						 u32 buf_size_in_dwords,
5243 						 u32 *num_dumped_dwords)
5244 {
5245 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5246 	enum dbg_status status;
5247 
5248 	*num_dumped_dwords = 0;
5249 
5250 	status =
5251 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5252 							      p_ptt,
5253 							      p_size);
5254 	if (status != DBG_STATUS_OK)
5255 		return status;
5256 
5257 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5258 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5259 
5260 	/* Update reset state */
5261 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5262 
5263 	status = qed_protection_override_dump(p_hwfn,
5264 					      p_ptt,
5265 					      dump_buf,
5266 					      true, num_dumped_dwords);
5267 
5268 	/* Revert GRC params to their default */
5269 	qed_dbg_grc_set_params_default(p_hwfn);
5270 
5271 	return status;
5272 }
5273 
qed_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5274 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5275 						     struct ecore_ptt *p_ptt,
5276 						     u32 *buf_size)
5277 {
5278 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5279 
5280 	*buf_size = 0;
5281 
5282 	if (status != DBG_STATUS_OK)
5283 		return status;
5284 
5285 	/* Update reset state */
5286 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5287 
5288 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5289 
5290 	return DBG_STATUS_OK;
5291 }
5292 
qed_dbg_fw_asserts_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5293 enum dbg_status qed_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
5294 					struct ecore_ptt *p_ptt,
5295 					u32 *dump_buf,
5296 					u32 buf_size_in_dwords,
5297 					u32 *num_dumped_dwords)
5298 {
5299 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5300 	enum dbg_status status;
5301 
5302 	*num_dumped_dwords = 0;
5303 
5304 	status =
5305 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5306 						     p_ptt,
5307 						     p_size);
5308 	if (status != DBG_STATUS_OK)
5309 		return status;
5310 
5311 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5312 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5313 
5314 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5315 
5316 	/* Revert GRC params to their default */
5317 	qed_dbg_grc_set_params_default(p_hwfn);
5318 
5319 	return DBG_STATUS_OK;
5320 }
5321 
qed_dbg_ilt_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5322 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5323 						     struct ecore_ptt *p_ptt,
5324 						     u32 *buf_size)
5325 {
5326 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5327 
5328 	*buf_size = 0;
5329 
5330 	if (status != DBG_STATUS_OK)
5331 		return status;
5332 
5333 	*buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
5334 
5335 	return DBG_STATUS_OK;
5336 }
5337 
qed_dbg_ilt_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5338 static enum dbg_status qed_dbg_ilt_dump(struct ecore_hwfn *p_hwfn,
5339 					struct ecore_ptt *p_ptt,
5340 					u32 *dump_buf,
5341 					u32 buf_size_in_dwords,
5342 					u32 *num_dumped_dwords)
5343 {
5344 	u32 needed_buf_size_in_dwords;
5345 	enum dbg_status status;
5346 
5347 	*num_dumped_dwords = 0;
5348 
5349 	status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
5350 					       p_ptt,
5351 					       &needed_buf_size_in_dwords);
5352 	if (status != DBG_STATUS_OK)
5353 		return status;
5354 
5355 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5356 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5357 
5358 	*num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
5359 
5360 	/* Revert GRC params to their default */
5361 	qed_dbg_grc_set_params_default(p_hwfn);
5362 
5363 	return DBG_STATUS_OK;
5364 }
5365 
qed_dbg_read_attn(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,enum dbg_attn_type attn_type,bool clear_status,struct dbg_attn_block_result * results)5366 enum dbg_status qed_dbg_read_attn(struct ecore_hwfn *p_hwfn,
5367 				  struct ecore_ptt *p_ptt,
5368 				  enum block_id block_id,
5369 				  enum dbg_attn_type attn_type,
5370 				  bool clear_status,
5371 				  struct dbg_attn_block_result *results)
5372 {
5373 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5374 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5375 	const struct dbg_attn_reg *attn_reg_arr;
5376 
5377 	if (status != DBG_STATUS_OK)
5378 		return status;
5379 
5380 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5381 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5382 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5383 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5384 
5385 	attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5386 					       block_id,
5387 					       attn_type, &num_attn_regs);
5388 
5389 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5390 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5391 		struct dbg_attn_reg_result *reg_result;
5392 		u32 sts_addr, sts_val;
5393 		u16 modes_buf_offset;
5394 		bool eval_mode;
5395 
5396 		/* Check mode */
5397 		eval_mode = GET_FIELD(reg_data->mode.data,
5398 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5399 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5400 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5401 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5402 			continue;
5403 
5404 		/* Mode match - read attention status register */
5405 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5406 					   reg_data->sts_clr_address :
5407 					   GET_FIELD(reg_data->data,
5408 						     DBG_ATTN_REG_STS_ADDRESS));
5409 		sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
5410 		if (!sts_val)
5411 			continue;
5412 
5413 		/* Non-zero attention status - add to results */
5414 		reg_result = &results->reg_results[num_result_regs];
5415 		SET_FIELD(reg_result->data,
5416 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5417 		SET_FIELD(reg_result->data,
5418 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5419 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5420 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5421 		reg_result->sts_val = sts_val;
5422 		reg_result->mask_val = ecore_rd(p_hwfn,
5423 					      p_ptt,
5424 					      DWORDS_TO_BYTES
5425 					      (reg_data->mask_address));
5426 		num_result_regs++;
5427 	}
5428 
5429 	results->block_id = (u8)block_id;
5430 	results->names_offset =
5431 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5432 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5433 	SET_FIELD(results->data,
5434 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5435 
5436 	return DBG_STATUS_OK;
5437 }
5438 
5439 /******************************* Data Types **********************************/
5440 
5441 /* REG fifo element */
5442 struct reg_fifo_element {
5443 	u64 data;
5444 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5445 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5446 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5447 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5448 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5449 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5450 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5451 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5452 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5453 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5454 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5455 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5456 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5457 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5458 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5459 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5460 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5461 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5462 };
5463 
5464 /* REG fifo error element */
5465 struct reg_fifo_err {
5466 	u32 err_code;
5467 	const char *err_msg;
5468 };
5469 
5470 /* IGU fifo element */
5471 struct igu_fifo_element {
5472 	u32 dword0;
5473 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5474 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5475 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5476 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5477 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5478 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5479 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5480 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5481 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5482 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5483 	u32 dword1;
5484 	u32 dword2;
5485 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5486 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5487 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5488 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5489 	u32 reserved;
5490 };
5491 
5492 struct igu_fifo_wr_data {
5493 	u32 data;
5494 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5495 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5496 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5497 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5498 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5499 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5500 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5501 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5502 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5503 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5504 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5505 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5506 };
5507 
5508 struct igu_fifo_cleanup_wr_data {
5509 	u32 data;
5510 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5511 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5512 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5513 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5514 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5515 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5516 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5517 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5518 };
5519 
5520 /* Protection override element */
5521 struct protection_override_element {
5522 	u64 data;
5523 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5524 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5525 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5526 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5527 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5528 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5529 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5530 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5531 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5532 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5533 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5534 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5535 };
5536 
5537 enum igu_fifo_sources {
5538 	IGU_SRC_PXP0,
5539 	IGU_SRC_PXP1,
5540 	IGU_SRC_PXP2,
5541 	IGU_SRC_PXP3,
5542 	IGU_SRC_PXP4,
5543 	IGU_SRC_PXP5,
5544 	IGU_SRC_PXP6,
5545 	IGU_SRC_PXP7,
5546 	IGU_SRC_CAU,
5547 	IGU_SRC_ATTN,
5548 	IGU_SRC_GRC
5549 };
5550 
5551 enum igu_fifo_addr_types {
5552 	IGU_ADDR_TYPE_MSIX_MEM,
5553 	IGU_ADDR_TYPE_WRITE_PBA,
5554 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5555 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5556 	IGU_ADDR_TYPE_READ_INT,
5557 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5558 	IGU_ADDR_TYPE_RESERVED
5559 };
5560 
5561 struct igu_fifo_addr_data {
5562 	u16 start_addr;
5563 	u16 end_addr;
5564 	const char *desc;
5565 	const char *vf_desc;
5566 	enum igu_fifo_addr_types type;
5567 };
5568 
5569 /******************************** Constants **********************************/
5570 
5571 #define MAX_MSG_LEN				1024
5572 
5573 #define MCP_TRACE_MAX_MODULE_LEN		8
5574 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5575 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5576 	(MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5577 
5578 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5579 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5580 
5581 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5582 
5583 /***************************** Constant Arrays *******************************/
5584 
5585 /* Status string array */
5586 static const char * const s_status_str[] = {
5587 	/* DBG_STATUS_OK */
5588 	"Operation completed successfully",
5589 
5590 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5591 	"Debug application version wasn't set",
5592 
5593 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5594 	"Unsupported debug application version",
5595 
5596 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5597 	"The debug block wasn't reset since the last recording",
5598 
5599 	/* DBG_STATUS_INVALID_ARGS */
5600 	"Invalid arguments",
5601 
5602 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5603 	"The debug output was already set",
5604 
5605 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5606 	"Invalid PCI buffer size",
5607 
5608 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5609 	"PCI buffer allocation failed",
5610 
5611 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5612 	"A PCI buffer wasn't allocated",
5613 
5614 	/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5615 	"The filter/trigger constraint dword offsets are not enabled for recording",
5616 
5617 
5618 	/* DBG_STATUS_VFC_READ_ERROR */
5619 	"Error reading from VFC",
5620 
5621 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5622 	"The Storm was already enabled",
5623 
5624 	/* DBG_STATUS_STORM_NOT_ENABLED */
5625 	"The specified Storm wasn't enabled",
5626 
5627 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5628 	"The block was already enabled",
5629 
5630 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5631 	"The specified block wasn't enabled",
5632 
5633 	/* DBG_STATUS_NO_INPUT_ENABLED */
5634 	"No input was enabled for recording",
5635 
5636 	/* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5637 	"Filters and triggers are not allowed in E4 256-bit mode",
5638 
5639 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5640 	"The filter was already enabled",
5641 
5642 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5643 	"The trigger was already enabled",
5644 
5645 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5646 	"The trigger wasn't enabled",
5647 
5648 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5649 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5650 
5651 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5652 	"Cannot add more than 3 trigger states",
5653 
5654 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5655 	"Cannot add more than 4 constraints per filter or trigger state",
5656 
5657 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5658 	"The recording wasn't started",
5659 
5660 	/* DBG_STATUS_DATA_DID_NOT_TRIGGER */
5661 	"A trigger was configured, but it didn't trigger",
5662 
5663 	/* DBG_STATUS_NO_DATA_RECORDED */
5664 	"No data was recorded",
5665 
5666 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5667 	"Dump buffer is too small",
5668 
5669 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5670 	"Dumped data is not aligned to chunks",
5671 
5672 	/* DBG_STATUS_UNKNOWN_CHIP */
5673 	"Unknown chip",
5674 
5675 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5676 	"Failed allocating virtual memory",
5677 
5678 	/* DBG_STATUS_BLOCK_IN_RESET */
5679 	"The input block is in reset",
5680 
5681 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5682 	"Invalid MCP trace signature found in NVRAM",
5683 
5684 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5685 	"Invalid bundle ID found in NVRAM",
5686 
5687 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5688 	"Failed getting NVRAM image",
5689 
5690 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5691 	"NVRAM image is not dword-aligned",
5692 
5693 	/* DBG_STATUS_NVRAM_READ_FAILED */
5694 	"Failed reading from NVRAM",
5695 
5696 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5697 	"Idle check parsing failed",
5698 
5699 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5700 	"MCP Trace data is corrupt",
5701 
5702 	/* DBG_STATUS_MCP_TRACE_NO_META */
5703 	"Dump doesn't contain meta data - it must be provided in image file",
5704 
5705 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5706 	"Failed to halt MCP",
5707 
5708 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5709 	"Failed to resume MCP after halt",
5710 
5711 	/* DBG_STATUS_RESERVED0 */
5712 	"",
5713 
5714 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5715 	"Failed to empty SEMI sync FIFO",
5716 
5717 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5718 	"IGU FIFO data is corrupt",
5719 
5720 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5721 	"MCP failed to mask parities",
5722 
5723 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5724 	"FW Asserts parsing failed",
5725 
5726 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5727 	"GRC FIFO data is corrupt",
5728 
5729 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5730 	"Protection Override data is corrupt",
5731 
5732 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5733 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5734 
5735 	/* DBG_STATUS_RESERVED1 */
5736 	"",
5737 
5738 	/* DBG_STATUS_NON_MATCHING_LINES */
5739 	"Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
5740 
5741 	/* DBG_STATUS_INSUFFICIENT_HW_IDS */
5742 	"Insufficient HW IDs. Try to record less Storms/blocks",
5743 
5744 	/* DBG_STATUS_DBG_BUS_IN_USE */
5745 	"The debug bus is in use",
5746 
5747 	/* DBG_STATUS_INVALID_STORM_DBG_MODE */
5748 	"The storm debug mode is not supported in the current chip",
5749 
5750 	/* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
5751 	"Other engine is supported only in BB",
5752 
5753 	/* DBG_STATUS_FILTER_SINGLE_HW_ID */
5754 	"The configured filter mode requires a single Storm/block input",
5755 
5756 	/* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
5757 	"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
5758 
5759 	/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
5760 	"When triggering on Storm data, the Storm to trigger on must be specified"
5761 };
5762 
5763 /* Idle check severity names array */
5764 static const char * const s_idle_chk_severity_str[] = {
5765 	"Error",
5766 	"Error if no traffic",
5767 	"Warning"
5768 };
5769 
5770 /* MCP Trace level names array */
5771 static const char * const s_mcp_trace_level_str[] = {
5772 	"ERROR",
5773 	"TRACE",
5774 	"DEBUG"
5775 };
5776 
5777 /* Access type names array */
5778 static const char * const s_access_strs[] = {
5779 	"read",
5780 	"write"
5781 };
5782 
5783 /* Privilege type names array */
5784 static const char * const s_privilege_strs[] = {
5785 	"VF",
5786 	"PDA",
5787 	"HV",
5788 	"UA"
5789 };
5790 
5791 /* Protection type names array */
5792 static const char * const s_protection_strs[] = {
5793 	"(default)",
5794 	"(default)",
5795 	"(default)",
5796 	"(default)",
5797 	"override VF",
5798 	"override PDA",
5799 	"override HV",
5800 	"override UA"
5801 };
5802 
5803 /* Master type names array */
5804 static const char * const s_master_strs[] = {
5805 	"???",
5806 	"pxp",
5807 	"mcp",
5808 	"msdm",
5809 	"psdm",
5810 	"ysdm",
5811 	"usdm",
5812 	"tsdm",
5813 	"xsdm",
5814 	"dbu",
5815 	"dmae",
5816 	"jdap",
5817 	"???",
5818 	"???",
5819 	"???",
5820 	"???"
5821 };
5822 
5823 /* REG FIFO error messages array */
5824 static struct reg_fifo_err s_reg_fifo_errors[] = {
5825 	{1, "grc timeout"},
5826 	{2, "address doesn't belong to any block"},
5827 	{4, "reserved address in block or write to read-only address"},
5828 	{8, "privilege/protection mismatch"},
5829 	{16, "path isolation error"},
5830 	{17, "RSL error"}
5831 };
5832 
5833 /* IGU FIFO sources array */
5834 static const char * const s_igu_fifo_source_strs[] = {
5835 	"TSTORM",
5836 	"MSTORM",
5837 	"USTORM",
5838 	"XSTORM",
5839 	"YSTORM",
5840 	"PSTORM",
5841 	"PCIE",
5842 	"NIG_QM_PBF",
5843 	"CAU",
5844 	"ATTN",
5845 	"GRC",
5846 };
5847 
5848 /* IGU FIFO error messages */
5849 static const char * const s_igu_fifo_error_strs[] = {
5850 	"no error",
5851 	"length error",
5852 	"function disabled",
5853 	"VF sent command to attention address",
5854 	"host sent prod update command",
5855 	"read of during interrupt register while in MIMD mode",
5856 	"access to PXP BAR reserved address",
5857 	"producer update command to attention index",
5858 	"unknown error",
5859 	"SB index not valid",
5860 	"SB relative index and FID not found",
5861 	"FID not match",
5862 	"command with error flag asserted (PCI error or CAU discard)",
5863 	"VF sent cleanup and RF cleanup is disabled",
5864 	"cleanup command on type bigger than 4"
5865 };
5866 
5867 /* IGU FIFO address data */
5868 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5869 	{0x0, 0x101, "MSI-X Memory", NULL,
5870 	 IGU_ADDR_TYPE_MSIX_MEM},
5871 	{0x102, 0x1ff, "reserved", NULL,
5872 	 IGU_ADDR_TYPE_RESERVED},
5873 	{0x200, 0x200, "Write PBA[0:63]", NULL,
5874 	 IGU_ADDR_TYPE_WRITE_PBA},
5875 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
5876 	 IGU_ADDR_TYPE_WRITE_PBA},
5877 	{0x202, 0x202, "Write PBA[128]", "reserved",
5878 	 IGU_ADDR_TYPE_WRITE_PBA},
5879 	{0x203, 0x3ff, "reserved", NULL,
5880 	 IGU_ADDR_TYPE_RESERVED},
5881 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5882 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
5883 	{0x5f0, 0x5f0, "Attention bits update", NULL,
5884 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5885 	{0x5f1, 0x5f1, "Attention bits set", NULL,
5886 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5887 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
5888 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5889 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5890 	 IGU_ADDR_TYPE_READ_INT},
5891 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5892 	 IGU_ADDR_TYPE_READ_INT},
5893 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5894 	 IGU_ADDR_TYPE_READ_INT},
5895 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5896 	 IGU_ADDR_TYPE_READ_INT},
5897 	{0x5f7, 0x5ff, "reserved", NULL,
5898 	 IGU_ADDR_TYPE_RESERVED},
5899 	{0x600, 0x7ff, "Producer update", NULL,
5900 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5901 };
5902 
5903 /******************************** Variables **********************************/
5904 
5905 /* Temporary buffer, used for print size calculations */
5906 static char s_temp_buf[MAX_MSG_LEN];
5907 
5908 /**************************** Private Functions ******************************/
5909 
qed_cyclic_add(u32 a,u32 b,u32 size)5910 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5911 {
5912 	return (a + b) % size;
5913 }
5914 
qed_cyclic_sub(u32 a,u32 b,u32 size)5915 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5916 {
5917 	return (size + a - b) % size;
5918 }
5919 
5920 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5921  * bytes) and returns them as a dword value. the specified buffer offset is
5922  * updated.
5923  */
qed_read_from_cyclic_buf(void * buf,u32 * offset,u32 buf_size,u8 num_bytes_to_read)5924 static u32 qed_read_from_cyclic_buf(void *buf,
5925 				    u32 *offset,
5926 				    u32 buf_size, u8 num_bytes_to_read)
5927 {
5928 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
5929 	u32 val = 0;
5930 
5931 	val_ptr = (u8 *)&val;
5932 
5933 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
5934 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
5935 	 */
5936 	for (i = 0; i < num_bytes_to_read; i++) {
5937 		val_ptr[i] = bytes_buf[*offset];
5938 		*offset = qed_cyclic_add(*offset, 1, buf_size);
5939 	}
5940 
5941 	return val;
5942 }
5943 
5944 /* Reads and returns the next byte from the specified buffer.
5945  * The specified buffer offset is updated.
5946  */
qed_read_byte_from_buf(void * buf,u32 * offset)5947 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5948 {
5949 	return ((u8 *)buf)[(*offset)++];
5950 }
5951 
5952 /* Reads and returns the next dword from the specified buffer.
5953  * The specified buffer offset is updated.
5954  */
qed_read_dword_from_buf(void * buf,u32 * offset)5955 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5956 {
5957 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5958 
5959 	*offset += 4;
5960 
5961 	return dword_val;
5962 }
5963 
5964 /* Reads the next string from the specified buffer, and copies it to the
5965  * specified pointer. The specified buffer offset is updated.
5966  */
qed_read_str_from_buf(void * buf,u32 * offset,u32 size,char * dest)5967 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5968 {
5969 	const char *source_str = &((const char *)buf)[*offset];
5970 
5971 	OSAL_STRNCPY(dest, source_str, size);
5972 	dest[size - 1] = '\0';
5973 	*offset += size;
5974 }
5975 
5976 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5977  * If the specified buffer in NULL, a temporary buffer pointer is returned.
5978  */
qed_get_buf_ptr(void * buf,u32 offset)5979 static char *qed_get_buf_ptr(void *buf, u32 offset)
5980 {
5981 	return buf ? (char *)buf + offset : s_temp_buf;
5982 }
5983 
5984 /* Reads a param from the specified buffer. Returns the number of dwords read.
5985  * If the returned str_param is NULL, the param is numeric and its value is
5986  * returned in num_param.
5987  * Otherwise, the param is a string and its pointer is returned in str_param.
5988  */
qed_read_param(u32 * dump_buf,const char ** param_name,const char ** param_str_val,u32 * param_num_val)5989 static u32 qed_read_param(u32 *dump_buf,
5990 			  const char **param_name,
5991 			  const char **param_str_val, u32 *param_num_val)
5992 {
5993 	char *char_buf = (char *)dump_buf;
5994 	size_t offset = 0;
5995 
5996 	/* Extract param name */
5997 	*param_name = char_buf;
5998 	offset += strlen(*param_name) + 1;
5999 
6000 	/* Check param type */
6001 	if (*(char_buf + offset++)) {
6002 		/* String param */
6003 		*param_str_val = char_buf + offset;
6004 		*param_num_val = 0;
6005 		offset += strlen(*param_str_val) + 1;
6006 		if (offset & 0x3)
6007 			offset += (4 - (offset & 0x3));
6008 	} else {
6009 		/* Numeric param */
6010 		*param_str_val = NULL;
6011 		if (offset & 0x3)
6012 			offset += (4 - (offset & 0x3));
6013 		*param_num_val = *(u32 *)(char_buf + offset);
6014 		offset += 4;
6015 	}
6016 
6017 	return (u32)offset / 4;
6018 }
6019 
6020 /* Reads a section header from the specified buffer.
6021  * Returns the number of dwords read.
6022  */
qed_read_section_hdr(u32 * dump_buf,const char ** section_name,u32 * num_section_params)6023 static u32 qed_read_section_hdr(u32 *dump_buf,
6024 				const char **section_name,
6025 				u32 *num_section_params)
6026 {
6027 	const char *param_str_val;
6028 
6029 	return qed_read_param(dump_buf,
6030 			      section_name, &param_str_val, num_section_params);
6031 }
6032 
6033 /* Reads section params from the specified buffer and prints them to the results
6034  * buffer. Returns the number of dwords read.
6035  */
qed_print_section_params(u32 * dump_buf,u32 num_section_params,char * results_buf,u32 * num_chars_printed)6036 static u32 qed_print_section_params(u32 *dump_buf,
6037 				    u32 num_section_params,
6038 				    char *results_buf, u32 *num_chars_printed)
6039 {
6040 	u32 i, dump_offset = 0, results_offset = 0;
6041 
6042 	for (i = 0; i < num_section_params; i++) {
6043 		const char *param_name, *param_str_val;
6044 		u32 param_num_val = 0;
6045 
6046 		dump_offset += qed_read_param(dump_buf + dump_offset,
6047 					      &param_name,
6048 					      &param_str_val, &param_num_val);
6049 
6050 		if (param_str_val) {
6051 			results_offset +=
6052 				sprintf(qed_get_buf_ptr(results_buf,
6053 							results_offset),
6054 					"%s: %s\n", param_name, param_str_val);
6055 		} else if (strcmp(param_name, "fw-timestamp")) {
6056 			results_offset +=
6057 				sprintf(qed_get_buf_ptr(results_buf,
6058 							results_offset),
6059 					"%s: %d\n", param_name, param_num_val);
6060 		}
6061 	}
6062 
6063 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6064 				  "\n");
6065 
6066 	*num_chars_printed = results_offset;
6067 
6068 	return dump_offset;
6069 }
6070 
6071 /* Returns the block name that matches the specified block ID,
6072  * or NULL if not found.
6073  */
qed_dbg_get_block_name(struct ecore_hwfn * p_hwfn,enum block_id block_id)6074 static const char *qed_dbg_get_block_name(struct ecore_hwfn *p_hwfn,
6075 					  enum block_id block_id)
6076 {
6077 	const struct dbg_block_user *block =
6078 	    (const struct dbg_block_user *)
6079 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6080 
6081 	return (const char *)block->name;
6082 }
6083 
qed_dbg_get_user_data(struct ecore_hwfn * p_hwfn)6084 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct ecore_hwfn
6085 							 *p_hwfn)
6086 {
6087 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6088 }
6089 
6090 /* Parses the idle check rules and returns the number of characters printed.
6091  * In case of parsing error, returns 0.
6092  */
qed_parse_idle_chk_dump_rules(struct ecore_hwfn * p_hwfn,u32 * dump_buf,u32 * dump_buf_end,u32 num_rules,bool print_fw_idle_chk,char * results_buf,u32 * num_errors,u32 * num_warnings)6093 static u32 qed_parse_idle_chk_dump_rules(struct ecore_hwfn *p_hwfn,
6094 					 u32 *dump_buf,
6095 					 u32 *dump_buf_end,
6096 					 u32 num_rules,
6097 					 bool print_fw_idle_chk,
6098 					 char *results_buf,
6099 					 u32 *num_errors, u32 *num_warnings)
6100 {
6101 	/* Offset in results_buf in bytes */
6102 	u32 results_offset = 0;
6103 
6104 	u32 rule_idx;
6105 	u16 i, j;
6106 
6107 	*num_errors = 0;
6108 	*num_warnings = 0;
6109 
6110 	/* Go over dumped results */
6111 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6112 	     rule_idx++) {
6113 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6114 		struct dbg_idle_chk_result_hdr *hdr;
6115 		const char *parsing_str, *lsi_msg;
6116 		u32 parsing_str_offset;
6117 		bool has_fw_msg;
6118 		u8 curr_reg_id;
6119 
6120 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6121 		rule_parsing_data =
6122 		    (const struct dbg_idle_chk_rule_parsing_data *)
6123 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6124 		    hdr->rule_id;
6125 		parsing_str_offset =
6126 		    GET_FIELD(rule_parsing_data->data,
6127 			      DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6128 		has_fw_msg =
6129 		    GET_FIELD(rule_parsing_data->data,
6130 			      DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6131 		parsing_str = (const char *)
6132 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6133 		    parsing_str_offset;
6134 		lsi_msg = parsing_str;
6135 		curr_reg_id = 0;
6136 
6137 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6138 			return 0;
6139 
6140 		/* Skip rule header */
6141 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6142 
6143 		/* Update errors/warnings count */
6144 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6145 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6146 			(*num_errors)++;
6147 		else
6148 			(*num_warnings)++;
6149 
6150 		/* Print rule severity */
6151 		results_offset +=
6152 		    sprintf(qed_get_buf_ptr(results_buf,
6153 					    results_offset), "%s: ",
6154 			    s_idle_chk_severity_str[hdr->severity]);
6155 
6156 		/* Print rule message */
6157 		if (has_fw_msg)
6158 			parsing_str += strlen(parsing_str) + 1;
6159 		results_offset +=
6160 		    sprintf(qed_get_buf_ptr(results_buf,
6161 					    results_offset), "%s.",
6162 			    has_fw_msg &&
6163 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6164 		parsing_str += strlen(parsing_str) + 1;
6165 
6166 		/* Print register values */
6167 		results_offset +=
6168 		    sprintf(qed_get_buf_ptr(results_buf,
6169 					    results_offset), " Registers:");
6170 		for (i = 0;
6171 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6172 		     i++) {
6173 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6174 			bool is_mem;
6175 			u8 reg_id;
6176 
6177 			reg_hdr =
6178 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6179 			is_mem = GET_FIELD(reg_hdr->data,
6180 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6181 			reg_id = GET_FIELD(reg_hdr->data,
6182 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6183 
6184 			/* Skip reg header */
6185 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6186 
6187 			/* Skip register names until the required reg_id is
6188 			 * reached.
6189 			 */
6190 			while (reg_id > curr_reg_id) {
6191 				curr_reg_id++;
6192 				parsing_str += strlen(parsing_str) + 1;
6193 			}
6194 
6195 			results_offset +=
6196 			    sprintf(qed_get_buf_ptr(results_buf,
6197 						    results_offset), " %s",
6198 				    parsing_str);
6199 			if (i < hdr->num_dumped_cond_regs && is_mem)
6200 				results_offset +=
6201 				    sprintf(qed_get_buf_ptr(results_buf,
6202 							    results_offset),
6203 					    "[%d]", hdr->mem_entry_id +
6204 					    reg_hdr->start_entry);
6205 			results_offset +=
6206 			    sprintf(qed_get_buf_ptr(results_buf,
6207 						    results_offset), "=");
6208 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6209 				results_offset +=
6210 				    sprintf(qed_get_buf_ptr(results_buf,
6211 							    results_offset),
6212 					    "0x%x", *dump_buf);
6213 				if (j < reg_hdr->size - 1)
6214 					results_offset +=
6215 					    sprintf(qed_get_buf_ptr
6216 						    (results_buf,
6217 						     results_offset), ",");
6218 			}
6219 		}
6220 
6221 		results_offset +=
6222 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6223 	}
6224 
6225 	/* Check if end of dump buffer was exceeded */
6226 	if (dump_buf > dump_buf_end)
6227 		return 0;
6228 
6229 	return results_offset;
6230 }
6231 
6232 /* Parses an idle check dump buffer.
6233  * If result_buf is not NULL, the idle check results are printed to it.
6234  * In any case, the required results buffer size is assigned to
6235  * parsed_results_bytes.
6236  * The parsing status is returned.
6237  */
qed_parse_idle_chk_dump(struct ecore_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes,u32 * num_errors,u32 * num_warnings)6238 static enum dbg_status qed_parse_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6239 					       u32 *dump_buf,
6240 					       u32 num_dumped_dwords,
6241 					       char *results_buf,
6242 					       u32 *parsed_results_bytes,
6243 					       u32 *num_errors,
6244 					       u32 *num_warnings)
6245 {
6246 	const char *section_name, *param_name, *param_str_val;
6247 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6248 	u32 num_section_params = 0, num_rules;
6249 
6250 	/* Offset in results_buf in bytes */
6251 	u32 results_offset = 0;
6252 
6253 	*parsed_results_bytes = 0;
6254 	*num_errors = 0;
6255 	*num_warnings = 0;
6256 
6257 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6258 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6259 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6260 
6261 	/* Read global_params section */
6262 	dump_buf += qed_read_section_hdr(dump_buf,
6263 					 &section_name, &num_section_params);
6264 	if (strcmp(section_name, "global_params"))
6265 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6266 
6267 	/* Print global params */
6268 	dump_buf += qed_print_section_params(dump_buf,
6269 					     num_section_params,
6270 					     results_buf, &results_offset);
6271 
6272 	/* Read idle_chk section */
6273 	dump_buf += qed_read_section_hdr(dump_buf,
6274 					 &section_name, &num_section_params);
6275 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6276 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6277 	dump_buf += qed_read_param(dump_buf,
6278 				   &param_name, &param_str_val, &num_rules);
6279 	if (strcmp(param_name, "num_rules"))
6280 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6281 
6282 	if (num_rules) {
6283 		u32 rules_print_size;
6284 
6285 		/* Print FW output */
6286 		results_offset +=
6287 		    sprintf(qed_get_buf_ptr(results_buf,
6288 					    results_offset),
6289 			    "FW_IDLE_CHECK:\n");
6290 		rules_print_size =
6291 			qed_parse_idle_chk_dump_rules(p_hwfn,
6292 						      dump_buf,
6293 						      dump_buf_end,
6294 						      num_rules,
6295 						      true,
6296 						      results_buf ?
6297 						      results_buf +
6298 						      results_offset :
6299 						      NULL,
6300 						      num_errors,
6301 						      num_warnings);
6302 		results_offset += rules_print_size;
6303 		if (!rules_print_size)
6304 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6305 
6306 		/* Print LSI output */
6307 		results_offset +=
6308 		    sprintf(qed_get_buf_ptr(results_buf,
6309 					    results_offset),
6310 			    "\nLSI_IDLE_CHECK:\n");
6311 		rules_print_size =
6312 			qed_parse_idle_chk_dump_rules(p_hwfn,
6313 						      dump_buf,
6314 						      dump_buf_end,
6315 						      num_rules,
6316 						      false,
6317 						      results_buf ?
6318 						      results_buf +
6319 						      results_offset :
6320 						      NULL,
6321 						      num_errors,
6322 						      num_warnings);
6323 		results_offset += rules_print_size;
6324 		if (!rules_print_size)
6325 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6326 	}
6327 
6328 	/* Print errors/warnings count */
6329 	if (*num_errors)
6330 		results_offset +=
6331 		    sprintf(qed_get_buf_ptr(results_buf,
6332 					    results_offset),
6333 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6334 			    *num_errors, *num_warnings);
6335 	else if (*num_warnings)
6336 		results_offset +=
6337 		    sprintf(qed_get_buf_ptr(results_buf,
6338 					    results_offset),
6339 			    "\nIdle Check completed successfully (with %d warnings)\n",
6340 			    *num_warnings);
6341 	else
6342 		results_offset +=
6343 		    sprintf(qed_get_buf_ptr(results_buf,
6344 					    results_offset),
6345 			    "\nIdle Check completed successfully\n");
6346 
6347 	/* Add 1 for string NULL termination */
6348 	*parsed_results_bytes = results_offset + 1;
6349 
6350 	return DBG_STATUS_OK;
6351 }
6352 
6353 /* Allocates and fills MCP Trace meta data based on the specified meta data
6354  * dump buffer.
6355  * Returns debug status code.
6356  */
6357 static enum dbg_status
qed_mcp_trace_alloc_meta_data(struct ecore_hwfn * p_hwfn,const u32 * meta_buf)6358 qed_mcp_trace_alloc_meta_data(struct ecore_hwfn *p_hwfn,
6359 			      const u32 *meta_buf)
6360 {
6361 	struct dbg_tools_user_data *dev_user_data;
6362 	u32 offset = 0, signature, i;
6363 	struct mcp_trace_meta *meta;
6364 	u8 *meta_buf_bytes = (u8 *)(osal_uintptr_t)meta_buf;
6365 
6366 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6367 	meta = &dev_user_data->mcp_trace_meta;
6368 
6369 	/* Free the previous meta before loading a new one. */
6370 	if (meta->is_allocated)
6371 		qed_mcp_trace_free_meta_data(p_hwfn);
6372 
6373 	OSAL_MEMSET(meta, 0, sizeof(*meta));
6374 
6375 	/* Read first signature */
6376 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6377 	if (signature != NVM_MAGIC_VALUE)
6378 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6379 
6380 	/* Read no. of modules and allocate memory for their pointers */
6381 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6382 	meta->modules = (char **)OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
6383 				    meta->modules_num * sizeof(char *));
6384 	if (!meta->modules)
6385 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6386 
6387 	/* Allocate and read all module strings */
6388 	for (i = 0; i < meta->modules_num; i++) {
6389 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6390 
6391 		*(meta->modules + i) = (char *)OSAL_ZALLOC(p_hwfn->p_dev,
6392 							   GFP_KERNEL,
6393 							   module_len);
6394 		if (!(*(meta->modules + i))) {
6395 			/* Update number of modules to be released */
6396 			meta->modules_num = i ? i - 1 : 0;
6397 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6398 		}
6399 
6400 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6401 				      *(meta->modules + i));
6402 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6403 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6404 	}
6405 
6406 	/* Read second signature */
6407 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6408 	if (signature != NVM_MAGIC_VALUE)
6409 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6410 
6411 	/* Read number of formats and allocate memory for all formats */
6412 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6413 	meta->formats =
6414 		(struct mcp_trace_format *)OSAL_ZALLOC(p_hwfn->p_dev,
6415 						       GFP_KERNEL,
6416 						       meta->formats_num *
6417 					       sizeof(struct mcp_trace_format));
6418 	if (!meta->formats)
6419 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6420 
6421 	/* Allocate and read all strings */
6422 	for (i = 0; i < meta->formats_num; i++) {
6423 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6424 		u8 format_len;
6425 
6426 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6427 							   &offset);
6428 		format_len = GET_MFW_FIELD(format_ptr->data,
6429 					   MCP_TRACE_FORMAT_LEN);
6430 		format_ptr->format_str = (char *)OSAL_ZALLOC(p_hwfn->p_dev,
6431 							     GFP_KERNEL,
6432 							     format_len);
6433 		if (!format_ptr->format_str) {
6434 			/* Update number of modules to be released */
6435 			meta->formats_num = i ? i - 1 : 0;
6436 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6437 		}
6438 
6439 		qed_read_str_from_buf(meta_buf_bytes,
6440 				      &offset,
6441 				      format_len, format_ptr->format_str);
6442 	}
6443 
6444 	meta->is_allocated = true;
6445 	return DBG_STATUS_OK;
6446 }
6447 
6448 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6449  * are printed to it. The parsing status is returned.
6450  * Arguments:
6451  * trace_buf - MCP trace cyclic buffer
6452  * trace_buf_size - MCP trace cyclic buffer size in bytes
6453  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6454  *		 buffer.
6455  * data_size - size in bytes of data to parse.
6456  * parsed_buf - destination buffer for parsed data.
6457  * parsed_results_bytes - size of parsed data in bytes.
6458  */
qed_parse_mcp_trace_buf(struct ecore_hwfn * p_hwfn,u8 * trace_buf,u32 trace_buf_size,u32 data_offset,u32 data_size,char * parsed_buf,u32 * parsed_results_bytes)6459 static enum dbg_status qed_parse_mcp_trace_buf(struct ecore_hwfn *p_hwfn,
6460 					       u8 *trace_buf,
6461 					       u32 trace_buf_size,
6462 					       u32 data_offset,
6463 					       u32 data_size,
6464 					       char *parsed_buf,
6465 					       u32 *parsed_results_bytes)
6466 {
6467 	struct dbg_tools_user_data *dev_user_data;
6468 	struct mcp_trace_meta *meta;
6469 	u32 param_mask, param_shift;
6470 	enum dbg_status status;
6471 
6472 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6473 	meta = &dev_user_data->mcp_trace_meta;
6474 	*parsed_results_bytes = 0;
6475 
6476 	if (!meta->is_allocated)
6477 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6478 
6479 	status = DBG_STATUS_OK;
6480 
6481 	while (data_size) {
6482 		struct mcp_trace_format *format_ptr;
6483 		u8 format_level, format_module;
6484 		u32 params[3] = { 0, 0, 0 };
6485 		u32 header, format_idx, i;
6486 
6487 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6488 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6489 
6490 		header = qed_read_from_cyclic_buf(trace_buf,
6491 						  &data_offset,
6492 						  trace_buf_size,
6493 						  MFW_TRACE_ENTRY_SIZE);
6494 		data_size -= MFW_TRACE_ENTRY_SIZE;
6495 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6496 
6497 		/* Skip message if its index doesn't exist in the meta data */
6498 		if (format_idx >= meta->formats_num) {
6499 			u8 format_size = (u8)GET_MFW_FIELD(header,
6500 							   MFW_TRACE_PRM_SIZE);
6501 
6502 			if (data_size < format_size)
6503 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6504 
6505 			data_offset = qed_cyclic_add(data_offset,
6506 						     format_size,
6507 						     trace_buf_size);
6508 			data_size -= format_size;
6509 			continue;
6510 		}
6511 
6512 		format_ptr =
6513 			(struct mcp_trace_format *)&meta->formats[format_idx];
6514 
6515 		for (i = 0,
6516 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6517 		     MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6518 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6519 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6520 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6521 			/* Extract param size (0..3) */
6522 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6523 					     param_shift);
6524 
6525 			/* If the param size is zero, there are no other
6526 			 * parameters.
6527 			 */
6528 			if (!param_size)
6529 				break;
6530 
6531 			/* Size is encoded using 2 bits, where 3 is used to
6532 			 * encode 4.
6533 			 */
6534 			if (param_size == 3)
6535 				param_size = 4;
6536 
6537 			if (data_size < param_size)
6538 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6539 
6540 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6541 							     &data_offset,
6542 							     trace_buf_size,
6543 							     param_size);
6544 			data_size -= param_size;
6545 		}
6546 
6547 		format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6548 						 MCP_TRACE_FORMAT_LEVEL);
6549 		format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6550 						  MCP_TRACE_FORMAT_MODULE);
6551 		if (format_level >= OSAL_ARRAY_SIZE(s_mcp_trace_level_str))
6552 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6553 
6554 		/* Print current message to results buffer */
6555 		*parsed_results_bytes +=
6556 			OSAL_SPRINTF(qed_get_buf_ptr(parsed_buf,
6557 						*parsed_results_bytes),
6558 				"%s %-8s: ",
6559 				s_mcp_trace_level_str[format_level],
6560 				meta->modules[format_module]);
6561 		*parsed_results_bytes +=
6562 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6563 			    format_ptr->format_str,
6564 			    params[0], params[1], params[2]);
6565 	}
6566 
6567 	/* Add string NULL terminator */
6568 	(*parsed_results_bytes)++;
6569 
6570 	return status;
6571 }
6572 
6573 /* Parses an MCP Trace dump buffer.
6574  * If result_buf is not NULL, the MCP Trace results are printed to it.
6575  * In any case, the required results buffer size is assigned to
6576  * parsed_results_bytes.
6577  * The parsing status is returned.
6578  */
qed_parse_mcp_trace_dump(struct ecore_hwfn * p_hwfn,u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes,bool free_meta_data)6579 static enum dbg_status qed_parse_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6580 						u32 *dump_buf,
6581 						char *results_buf,
6582 						u32 *parsed_results_bytes,
6583 						bool free_meta_data)
6584 {
6585 	const char *section_name, *param_name, *param_str_val;
6586 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6587 	u32 offset, results_offset, results_buf_bytes;
6588 	u32 param_num_val, num_section_params;
6589 	struct mcp_trace *trace;
6590 	enum dbg_status status;
6591 	const u32 *meta_buf;
6592 	u8 *trace_buf;
6593 
6594 	*parsed_results_bytes = 0;
6595 
6596 	/* Read global_params section */
6597 	dump_buf += qed_read_section_hdr(dump_buf,
6598 					 &section_name, &num_section_params);
6599 	if (strcmp(section_name, "global_params"))
6600 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6601 
6602 	/* Print global params */
6603 	dump_buf += qed_print_section_params(dump_buf,
6604 					     num_section_params,
6605 					     results_buf, &results_offset);
6606 
6607 	/* Read trace_data section */
6608 	dump_buf += qed_read_section_hdr(dump_buf,
6609 					 &section_name, &num_section_params);
6610 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6611 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6612 	dump_buf += qed_read_param(dump_buf,
6613 				   &param_name, &param_str_val, &param_num_val);
6614 	if (strcmp(param_name, "size"))
6615 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6616 	trace_data_dwords = param_num_val;
6617 
6618 	/* Prepare trace info */
6619 	trace = (struct mcp_trace *)dump_buf;
6620 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6621 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6622 
6623 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6624 	offset = trace->trace_oldest;
6625 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6626 	dump_buf += trace_data_dwords;
6627 
6628 	/* Read meta_data section */
6629 	dump_buf += qed_read_section_hdr(dump_buf,
6630 					 &section_name, &num_section_params);
6631 	if (strcmp(section_name, "mcp_trace_meta"))
6632 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6633 	dump_buf += qed_read_param(dump_buf,
6634 				   &param_name, &param_str_val, &param_num_val);
6635 	if (strcmp(param_name, "size"))
6636 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6637 	trace_meta_dwords = param_num_val;
6638 
6639 	/* Choose meta data buffer */
6640 	if (!trace_meta_dwords) {
6641 		/* Dump doesn't include meta data */
6642 		struct dbg_tools_user_data *dev_user_data =
6643 			qed_dbg_get_user_data(p_hwfn);
6644 
6645 		if (!dev_user_data->mcp_trace_user_meta_buf)
6646 			return DBG_STATUS_MCP_TRACE_NO_META;
6647 
6648 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6649 	} else {
6650 		/* Dump includes meta data */
6651 		meta_buf = dump_buf;
6652 	}
6653 
6654 	/* Allocate meta data memory */
6655 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6656 	if (status != DBG_STATUS_OK)
6657 		return status;
6658 
6659 	status = qed_parse_mcp_trace_buf(p_hwfn,
6660 					 trace_buf,
6661 					 trace->size,
6662 					 offset,
6663 					 data_size,
6664 					 results_buf ?
6665 					 results_buf + results_offset :
6666 					 NULL,
6667 					 &results_buf_bytes);
6668 	if (status != DBG_STATUS_OK)
6669 		return status;
6670 
6671 	if (free_meta_data)
6672 		qed_mcp_trace_free_meta_data(p_hwfn);
6673 
6674 	*parsed_results_bytes = results_offset + results_buf_bytes;
6675 
6676 	return DBG_STATUS_OK;
6677 }
6678 
6679 /* Parses a Reg FIFO dump buffer.
6680  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6681  * In any case, the required results buffer size is assigned to
6682  * parsed_results_bytes.
6683  * The parsing status is returned.
6684  */
qed_parse_reg_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)6685 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6686 					       char *results_buf,
6687 					       u32 *parsed_results_bytes)
6688 {
6689 	const char *section_name, *param_name, *param_str_val;
6690 	u32 param_num_val, num_section_params, num_elements;
6691 	struct reg_fifo_element *elements;
6692 	u8 i, j, err_code, vf_val;
6693 	u32 results_offset = 0;
6694 	char vf_str[4];
6695 
6696 	/* Read global_params section */
6697 	dump_buf += qed_read_section_hdr(dump_buf,
6698 					 &section_name, &num_section_params);
6699 	if (strcmp(section_name, "global_params"))
6700 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6701 
6702 	/* Print global params */
6703 	dump_buf += qed_print_section_params(dump_buf,
6704 					     num_section_params,
6705 					     results_buf, &results_offset);
6706 
6707 	/* Read reg_fifo_data section */
6708 	dump_buf += qed_read_section_hdr(dump_buf,
6709 					 &section_name, &num_section_params);
6710 	if (strcmp(section_name, "reg_fifo_data"))
6711 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6712 	dump_buf += qed_read_param(dump_buf,
6713 				   &param_name, &param_str_val, &param_num_val);
6714 	if (strcmp(param_name, "size"))
6715 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6716 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6717 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6718 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6719 	elements = (struct reg_fifo_element *)dump_buf;
6720 
6721 	/* Decode elements */
6722 	for (i = 0; i < num_elements; i++) {
6723 		const char *err_msg = NULL;
6724 
6725 		/* Discover if element belongs to a VF or a PF */
6726 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6727 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6728 			sprintf(vf_str, "%s", "N/A");
6729 		else
6730 			sprintf(vf_str, "%d", vf_val);
6731 
6732 		/* Find error message */
6733 		err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
6734 		for (j = 0; j < OSAL_ARRAY_SIZE(s_reg_fifo_errors) && !err_msg;
6735 		     j++)
6736 			if (err_code == s_reg_fifo_errors[j].err_code)
6737 				err_msg = s_reg_fifo_errors[j].err_msg;
6738 
6739 		/* Add parsed element to parsed buffer */
6740 		results_offset +=
6741 		    sprintf(qed_get_buf_ptr(results_buf,
6742 					    results_offset),
6743 			    "raw: 0x%016"PRIx64", address: 0x%07x, access: %-5s, pf: %2d, vf: %s, "
6744 			    "port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
6745 			    elements[i].data,
6746 			    (u32)GET_FIELD(elements[i].data,
6747 					   REG_FIFO_ELEMENT_ADDRESS) *
6748 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6749 			    s_access_strs[GET_FIELD(elements[i].data,
6750 						    REG_FIFO_ELEMENT_ACCESS)],
6751 			    (u32)GET_FIELD(elements[i].data,
6752 					   REG_FIFO_ELEMENT_PF),
6753 			    vf_str,
6754 			    (u32)GET_FIELD(elements[i].data,
6755 					   REG_FIFO_ELEMENT_PORT),
6756 			    s_privilege_strs[GET_FIELD(elements[i].data,
6757 						REG_FIFO_ELEMENT_PRIVILEGE)],
6758 			    s_protection_strs[GET_FIELD(elements[i].data,
6759 						REG_FIFO_ELEMENT_PROTECTION)],
6760 			    s_master_strs[GET_FIELD(elements[i].data,
6761 						    REG_FIFO_ELEMENT_MASTER)],
6762 			    err_msg ? err_msg : "unknown error code");
6763 	}
6764 
6765 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6766 						  results_offset),
6767 				  "fifo contained %d elements", num_elements);
6768 
6769 	/* Add 1 for string NULL termination */
6770 	*parsed_results_bytes = results_offset + 1;
6771 
6772 	return DBG_STATUS_OK;
6773 }
6774 
qed_parse_igu_fifo_element(struct igu_fifo_element * element,char * results_buf,u32 * results_offset)6775 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6776 						  *element, char
6777 						  *results_buf,
6778 						  u32 *results_offset)
6779 {
6780 	const struct igu_fifo_addr_data *found_addr = NULL;
6781 	u8 source, err_type, i, is_cleanup;
6782 	char parsed_addr_data[32];
6783 	char parsed_wr_data[256];
6784 	u32 wr_data, prod_cons;
6785 	bool is_wr_cmd, is_pf;
6786 	u16 cmd_addr;
6787 	u64 dword12;
6788 
6789 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6790 	 * FIFO element.
6791 	 */
6792 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6793 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6794 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6795 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6796 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6797 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6798 
6799 	if (source >= OSAL_ARRAY_SIZE(s_igu_fifo_source_strs))
6800 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6801 	if (err_type >= OSAL_ARRAY_SIZE(s_igu_fifo_error_strs))
6802 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6803 
6804 	/* Find address data */
6805 	for (i = 0; i < OSAL_ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr;
6806 	     i++) {
6807 		const struct igu_fifo_addr_data *curr_addr =
6808 			&s_igu_fifo_addr_data[i];
6809 
6810 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6811 		    curr_addr->end_addr)
6812 			found_addr = curr_addr;
6813 	}
6814 
6815 	if (!found_addr)
6816 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6817 
6818 	/* Prepare parsed address data */
6819 	switch (found_addr->type) {
6820 	case IGU_ADDR_TYPE_MSIX_MEM:
6821 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6822 		break;
6823 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6824 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6825 		sprintf(parsed_addr_data,
6826 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6827 		break;
6828 	default:
6829 		parsed_addr_data[0] = '\0';
6830 	}
6831 
6832 	if (!is_wr_cmd) {
6833 		parsed_wr_data[0] = '\0';
6834 		goto out;
6835 	}
6836 
6837 	/* Prepare parsed write data */
6838 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6839 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6840 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6841 
6842 	if (source == IGU_SRC_ATTN) {
6843 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6844 	} else {
6845 		if (is_cleanup) {
6846 			u8 cleanup_val, cleanup_type;
6847 
6848 			cleanup_val =
6849 				GET_FIELD(wr_data,
6850 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6851 			cleanup_type =
6852 			    GET_FIELD(wr_data,
6853 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6854 
6855 			sprintf(parsed_wr_data,
6856 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6857 				cleanup_val ? "set" : "clear",
6858 				cleanup_type);
6859 		} else {
6860 			u8 update_flag, en_dis_int_for_sb, segment;
6861 			u8 timer_mask;
6862 
6863 			update_flag = GET_FIELD(wr_data,
6864 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
6865 			en_dis_int_for_sb =
6866 				GET_FIELD(wr_data,
6867 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6868 			segment = GET_FIELD(wr_data,
6869 					    IGU_FIFO_WR_DATA_SEGMENT);
6870 			timer_mask = GET_FIELD(wr_data,
6871 					       IGU_FIFO_WR_DATA_TIMER_MASK);
6872 
6873 			sprintf(parsed_wr_data,
6874 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6875 				prod_cons,
6876 				update_flag ? "update" : "nop",
6877 				en_dis_int_for_sb ?
6878 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
6879 				"enable",
6880 				segment ? "attn" : "regular",
6881 				timer_mask);
6882 		}
6883 	}
6884 out:
6885 	/* Add parsed element to parsed buffer */
6886 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
6887 						   *results_offset),
6888 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6889 				   element->dword2, element->dword1,
6890 				   element->dword0,
6891 				   is_pf ? "pf" : "vf",
6892 				   GET_FIELD(element->dword0,
6893 					     IGU_FIFO_ELEMENT_DWORD0_FID),
6894 				   s_igu_fifo_source_strs[source],
6895 				   is_wr_cmd ? "wr" : "rd",
6896 				   cmd_addr,
6897 				   (!is_pf && found_addr->vf_desc)
6898 				   ? found_addr->vf_desc
6899 				   : found_addr->desc,
6900 				   parsed_addr_data,
6901 				   parsed_wr_data,
6902 				   s_igu_fifo_error_strs[err_type]);
6903 
6904 	return DBG_STATUS_OK;
6905 }
6906 
6907 /* Parses an IGU FIFO dump buffer.
6908  * If result_buf is not NULL, the IGU FIFO results are printed to it.
6909  * In any case, the required results buffer size is assigned to
6910  * parsed_results_bytes.
6911  * The parsing status is returned.
6912  */
qed_parse_igu_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)6913 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
6914 					       char *results_buf,
6915 					       u32 *parsed_results_bytes)
6916 {
6917 	const char *section_name, *param_name, *param_str_val;
6918 	u32 param_num_val, num_section_params, num_elements;
6919 	struct igu_fifo_element *elements;
6920 	enum dbg_status status;
6921 	u32 results_offset = 0;
6922 	u8 i;
6923 
6924 	/* Read global_params section */
6925 	dump_buf += qed_read_section_hdr(dump_buf,
6926 					 &section_name, &num_section_params);
6927 	if (strcmp(section_name, "global_params"))
6928 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6929 
6930 	/* Print global params */
6931 	dump_buf += qed_print_section_params(dump_buf,
6932 					     num_section_params,
6933 					     results_buf, &results_offset);
6934 
6935 	/* Read igu_fifo_data section */
6936 	dump_buf += qed_read_section_hdr(dump_buf,
6937 					 &section_name, &num_section_params);
6938 	if (strcmp(section_name, "igu_fifo_data"))
6939 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6940 	dump_buf += qed_read_param(dump_buf,
6941 				   &param_name, &param_str_val, &param_num_val);
6942 	if (strcmp(param_name, "size"))
6943 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6944 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6945 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6946 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6947 	elements = (struct igu_fifo_element *)dump_buf;
6948 
6949 	/* Decode elements */
6950 	for (i = 0; i < num_elements; i++) {
6951 		status = qed_parse_igu_fifo_element(&elements[i],
6952 						    results_buf,
6953 						    &results_offset);
6954 		if (status != DBG_STATUS_OK)
6955 			return status;
6956 	}
6957 
6958 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6959 						  results_offset),
6960 				  "fifo contained %d elements", num_elements);
6961 
6962 	/* Add 1 for string NULL termination */
6963 	*parsed_results_bytes = results_offset + 1;
6964 
6965 	return DBG_STATUS_OK;
6966 }
6967 
6968 static enum dbg_status
qed_parse_protection_override_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)6969 qed_parse_protection_override_dump(u32 *dump_buf,
6970 				   char *results_buf,
6971 				   u32 *parsed_results_bytes)
6972 {
6973 	const char *section_name, *param_name, *param_str_val;
6974 	u32 param_num_val, num_section_params, num_elements;
6975 	struct protection_override_element *elements;
6976 	u32 results_offset = 0;
6977 	u8 i;
6978 
6979 	/* Read global_params section */
6980 	dump_buf += qed_read_section_hdr(dump_buf,
6981 					 &section_name, &num_section_params);
6982 	if (strcmp(section_name, "global_params"))
6983 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6984 
6985 	/* Print global params */
6986 	dump_buf += qed_print_section_params(dump_buf,
6987 					     num_section_params,
6988 					     results_buf, &results_offset);
6989 
6990 	/* Read protection_override_data section */
6991 	dump_buf += qed_read_section_hdr(dump_buf,
6992 					 &section_name, &num_section_params);
6993 	if (strcmp(section_name, "protection_override_data"))
6994 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6995 	dump_buf += qed_read_param(dump_buf,
6996 				   &param_name, &param_str_val, &param_num_val);
6997 	if (strcmp(param_name, "size"))
6998 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6999 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7000 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7001 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7002 	elements = (struct protection_override_element *)dump_buf;
7003 
7004 	/* Decode elements */
7005 	for (i = 0; i < num_elements; i++) {
7006 		u32 address = GET_FIELD(elements[i].data,
7007 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7008 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7009 
7010 		results_offset +=
7011 		    sprintf(qed_get_buf_ptr(results_buf,
7012 					    results_offset),
7013 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7014 			    i, address,
7015 			    (u32)GET_FIELD(elements[i].data,
7016 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7017 			    (u32)GET_FIELD(elements[i].data,
7018 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7019 			    (u32)GET_FIELD(elements[i].data,
7020 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7021 			    s_protection_strs[GET_FIELD(elements[i].data,
7022 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7023 			    s_protection_strs[GET_FIELD(elements[i].data,
7024 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7025 	}
7026 
7027 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7028 						  results_offset),
7029 				  "protection override contained %d elements",
7030 				  num_elements);
7031 
7032 	/* Add 1 for string NULL termination */
7033 	*parsed_results_bytes = results_offset + 1;
7034 
7035 	return DBG_STATUS_OK;
7036 }
7037 
7038 /* Parses a FW Asserts dump buffer.
7039  * If result_buf is not NULL, the FW Asserts results are printed to it.
7040  * In any case, the required results buffer size is assigned to
7041  * parsed_results_bytes.
7042  * The parsing status is returned.
7043  */
qed_parse_fw_asserts_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7044 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7045 						 char *results_buf,
7046 						 u32 *parsed_results_bytes)
7047 {
7048 	u32 num_section_params, param_num_val, i, results_offset = 0;
7049 	const char *param_name, *param_str_val, *section_name;
7050 	bool last_section_found = false;
7051 
7052 	*parsed_results_bytes = 0;
7053 
7054 	/* Read global_params section */
7055 	dump_buf += qed_read_section_hdr(dump_buf,
7056 					 &section_name, &num_section_params);
7057 	if (strcmp(section_name, "global_params"))
7058 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7059 
7060 	/* Print global params */
7061 	dump_buf += qed_print_section_params(dump_buf,
7062 					     num_section_params,
7063 					     results_buf, &results_offset);
7064 
7065 	while (!last_section_found) {
7066 		dump_buf += qed_read_section_hdr(dump_buf,
7067 						 &section_name,
7068 						 &num_section_params);
7069 		if (!strcmp(section_name, "fw_asserts")) {
7070 			/* Extract params */
7071 			const char *storm_letter = NULL;
7072 			u32 storm_dump_size = 0;
7073 
7074 			for (i = 0; i < num_section_params; i++) {
7075 				dump_buf += qed_read_param(dump_buf,
7076 							   &param_name,
7077 							   &param_str_val,
7078 							   &param_num_val);
7079 				if (!strcmp(param_name, "storm"))
7080 					storm_letter = param_str_val;
7081 				else if (!strcmp(param_name, "size"))
7082 					storm_dump_size = param_num_val;
7083 				else
7084 					return
7085 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7086 			}
7087 
7088 			if (!storm_letter || !storm_dump_size)
7089 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7090 
7091 			/* Print data */
7092 			results_offset +=
7093 			    sprintf(qed_get_buf_ptr(results_buf,
7094 						    results_offset),
7095 				    "\n%sSTORM_ASSERT: size=%d\n",
7096 				    storm_letter, storm_dump_size);
7097 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7098 				results_offset +=
7099 				    sprintf(qed_get_buf_ptr(results_buf,
7100 							    results_offset),
7101 					    "%08x\n", *dump_buf);
7102 		} else if (!strcmp(section_name, "last")) {
7103 			last_section_found = true;
7104 		} else {
7105 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7106 		}
7107 	}
7108 
7109 	/* Add 1 for string NULL termination */
7110 	*parsed_results_bytes = results_offset + 1;
7111 
7112 	return DBG_STATUS_OK;
7113 }
7114 
7115 /***************************** Public Functions *******************************/
7116 
qed_dbg_user_set_bin_ptr(struct ecore_hwfn * p_hwfn,const u8 * const bin_ptr)7117 enum dbg_status qed_dbg_user_set_bin_ptr(struct ecore_hwfn *p_hwfn,
7118 					 const u8 * const bin_ptr)
7119 {
7120 	struct bin_buffer_hdr *buf_hdrs =
7121 			(struct bin_buffer_hdr *)(osal_uintptr_t)bin_ptr;
7122 	u8 buf_id;
7123 
7124 	/* Convert binary data to debug arrays */
7125 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7126 		qed_set_dbg_bin_buf(p_hwfn,
7127 				    (enum bin_dbg_buffer_type)buf_id,
7128 				    (const u32 *)(bin_ptr +
7129 						  buf_hdrs[buf_id].offset),
7130 						  buf_hdrs[buf_id].length);
7131 
7132 	return DBG_STATUS_OK;
7133 }
7134 
qed_dbg_alloc_user_data(__rte_unused struct ecore_hwfn * p_hwfn,void ** user_data_ptr)7135 enum dbg_status qed_dbg_alloc_user_data(__rte_unused struct ecore_hwfn *p_hwfn,
7136 					void **user_data_ptr)
7137 {
7138 	*user_data_ptr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
7139 				     sizeof(struct dbg_tools_user_data));
7140 	if (!(*user_data_ptr))
7141 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7142 
7143 	return DBG_STATUS_OK;
7144 }
7145 
qed_dbg_get_status_str(enum dbg_status status)7146 const char *qed_dbg_get_status_str(enum dbg_status status)
7147 {
7148 	return (status <
7149 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7150 }
7151 
qed_get_idle_chk_results_buf_size(struct ecore_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7152 enum dbg_status qed_get_idle_chk_results_buf_size(struct ecore_hwfn *p_hwfn,
7153 						  u32 *dump_buf,
7154 						  u32 num_dumped_dwords,
7155 						  u32 *results_buf_size)
7156 {
7157 	u32 num_errors, num_warnings;
7158 
7159 	return qed_parse_idle_chk_dump(p_hwfn,
7160 				       dump_buf,
7161 				       num_dumped_dwords,
7162 				       NULL,
7163 				       results_buf_size,
7164 				       &num_errors, &num_warnings);
7165 }
7166 
qed_print_idle_chk_results(struct ecore_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * num_errors,u32 * num_warnings)7167 enum dbg_status qed_print_idle_chk_results(struct ecore_hwfn *p_hwfn,
7168 					   u32 *dump_buf,
7169 					   u32 num_dumped_dwords,
7170 					   char *results_buf,
7171 					   u32 *num_errors,
7172 					   u32 *num_warnings)
7173 {
7174 	u32 parsed_buf_size;
7175 
7176 	return qed_parse_idle_chk_dump(p_hwfn,
7177 				       dump_buf,
7178 				       num_dumped_dwords,
7179 				       results_buf,
7180 				       &parsed_buf_size,
7181 				       num_errors, num_warnings);
7182 }
7183 
qed_dbg_mcp_trace_set_meta_data(struct ecore_hwfn * p_hwfn,const u32 * meta_buf)7184 void qed_dbg_mcp_trace_set_meta_data(struct ecore_hwfn *p_hwfn,
7185 				     const u32 *meta_buf)
7186 {
7187 	struct dbg_tools_user_data *dev_user_data =
7188 		qed_dbg_get_user_data(p_hwfn);
7189 
7190 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7191 }
7192 
7193 enum dbg_status
qed_get_mcp_trace_results_buf_size(struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,u32 * results_buf_size)7194 qed_get_mcp_trace_results_buf_size(struct ecore_hwfn *p_hwfn,
7195 				   u32 *dump_buf,
7196 				   __rte_unused u32 num_dumped_dwords,
7197 				   u32 *results_buf_size)
7198 {
7199 	return qed_parse_mcp_trace_dump(p_hwfn,
7200 					dump_buf, NULL, results_buf_size, true);
7201 }
7202 
qed_print_mcp_trace_results(struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,char * results_buf)7203 enum dbg_status qed_print_mcp_trace_results(struct ecore_hwfn *p_hwfn,
7204 					    u32 *dump_buf,
7205 					    __rte_unused u32 num_dumped_dwords,
7206 					    char *results_buf)
7207 {
7208 	u32 parsed_buf_size;
7209 
7210 	return qed_parse_mcp_trace_dump(p_hwfn,
7211 					dump_buf,
7212 					results_buf, &parsed_buf_size, true);
7213 }
7214 
qed_print_mcp_trace_results_cont(struct ecore_hwfn * p_hwfn,u32 * dump_buf,char * results_buf)7215 enum dbg_status qed_print_mcp_trace_results_cont(struct ecore_hwfn *p_hwfn,
7216 						 u32 *dump_buf,
7217 						 char *results_buf)
7218 {
7219 	u32 parsed_buf_size;
7220 
7221 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7222 					&parsed_buf_size, false);
7223 }
7224 
qed_print_mcp_trace_line(struct ecore_hwfn * p_hwfn,u8 * dump_buf,u32 num_dumped_bytes,char * results_buf)7225 enum dbg_status qed_print_mcp_trace_line(struct ecore_hwfn *p_hwfn,
7226 					 u8 *dump_buf,
7227 					 u32 num_dumped_bytes,
7228 					 char *results_buf)
7229 {
7230 	u32 parsed_results_bytes;
7231 
7232 	return qed_parse_mcp_trace_buf(p_hwfn,
7233 				       dump_buf,
7234 				       num_dumped_bytes,
7235 				       0,
7236 				       num_dumped_bytes,
7237 				       results_buf, &parsed_results_bytes);
7238 }
7239 
7240 /* Frees the specified MCP Trace meta data */
qed_mcp_trace_free_meta_data(struct ecore_hwfn * p_hwfn)7241 void qed_mcp_trace_free_meta_data(struct ecore_hwfn *p_hwfn)
7242 {
7243 	struct dbg_tools_user_data *dev_user_data;
7244 	struct mcp_trace_meta *meta;
7245 	u32 i;
7246 
7247 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7248 	meta = &dev_user_data->mcp_trace_meta;
7249 	if (!meta->is_allocated)
7250 		return;
7251 
7252 	/* Release modules */
7253 	if (meta->modules) {
7254 		for (i = 0; i < meta->modules_num; i++)
7255 			OSAL_FREE(p_hwfn, meta->modules[i]);
7256 		OSAL_FREE(p_hwfn, meta->modules);
7257 	}
7258 
7259 	/* Release formats */
7260 	if (meta->formats) {
7261 		for (i = 0; i < meta->formats_num; i++)
7262 			OSAL_FREE(p_hwfn, meta->formats[i].format_str);
7263 		OSAL_FREE(p_hwfn, meta->formats);
7264 	}
7265 
7266 	meta->is_allocated = false;
7267 }
7268 
7269 enum dbg_status
qed_get_reg_fifo_results_buf_size(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,u32 * results_buf_size)7270 qed_get_reg_fifo_results_buf_size(__rte_unused struct ecore_hwfn *p_hwfn,
7271 				  u32 *dump_buf,
7272 				  __rte_unused u32 num_dumped_dwords,
7273 				  u32 *results_buf_size)
7274 {
7275 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7276 }
7277 
7278 enum dbg_status
qed_print_reg_fifo_results(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,char * results_buf)7279 qed_print_reg_fifo_results(__rte_unused struct ecore_hwfn *p_hwfn,
7280 			   u32 *dump_buf,
7281 			   __rte_unused u32 num_dumped_dwords,
7282 			   char *results_buf)
7283 {
7284 	u32 parsed_buf_size;
7285 
7286 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7287 }
7288 
7289 enum dbg_status
qed_get_igu_fifo_results_buf_size(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,u32 * results_buf_size)7290 qed_get_igu_fifo_results_buf_size(__rte_unused struct ecore_hwfn *p_hwfn,
7291 				  u32 *dump_buf,
7292 				  __rte_unused u32 num_dumped_dwords,
7293 				  u32 *results_buf_size)
7294 {
7295 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7296 }
7297 
7298 enum dbg_status
qed_print_igu_fifo_results(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,char * results_buf)7299 qed_print_igu_fifo_results(__rte_unused struct ecore_hwfn *p_hwfn,
7300 			   u32 *dump_buf,
7301 			   __rte_unused u32 num_dumped_dwords,
7302 			   char *results_buf)
7303 {
7304 	u32 parsed_buf_size;
7305 
7306 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7307 }
7308 
7309 enum dbg_status
qed_get_protection_override_results_buf_size(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,u32 * results_buf_size)7310 qed_get_protection_override_results_buf_size(__rte_unused
7311 					     struct ecore_hwfn *p_hwfn,
7312 					     u32 *dump_buf,
7313 					     __rte_unused u32 num_dumped_dwords,
7314 					     u32 *results_buf_size)
7315 {
7316 	return qed_parse_protection_override_dump(dump_buf,
7317 						  NULL, results_buf_size);
7318 }
7319 
7320 enum dbg_status
qed_print_protection_override_results(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,char * results_buf)7321 qed_print_protection_override_results(__rte_unused struct ecore_hwfn *p_hwfn,
7322 				      u32 *dump_buf,
7323 				      __rte_unused u32 num_dumped_dwords,
7324 				      char *results_buf)
7325 {
7326 	u32 parsed_buf_size;
7327 
7328 	return qed_parse_protection_override_dump(dump_buf,
7329 						  results_buf,
7330 						  &parsed_buf_size);
7331 }
7332 
7333 enum dbg_status
qed_get_fw_asserts_results_buf_size(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,u32 * results_buf_size)7334 qed_get_fw_asserts_results_buf_size(__rte_unused struct ecore_hwfn *p_hwfn,
7335 				    u32 *dump_buf,
7336 				    __rte_unused u32 num_dumped_dwords,
7337 				    u32 *results_buf_size)
7338 {
7339 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7340 }
7341 
7342 enum dbg_status
qed_print_fw_asserts_results(__rte_unused struct ecore_hwfn * p_hwfn,u32 * dump_buf,__rte_unused u32 num_dumped_dwords,char * results_buf)7343 qed_print_fw_asserts_results(__rte_unused struct ecore_hwfn *p_hwfn,
7344 			     u32 *dump_buf,
7345 			     __rte_unused u32 num_dumped_dwords,
7346 			     char *results_buf)
7347 {
7348 	u32 parsed_buf_size;
7349 
7350 	return qed_parse_fw_asserts_dump(dump_buf,
7351 					 results_buf, &parsed_buf_size);
7352 }
7353 
qed_dbg_parse_attn(struct ecore_hwfn * p_hwfn,struct dbg_attn_block_result * results)7354 enum dbg_status qed_dbg_parse_attn(struct ecore_hwfn *p_hwfn,
7355 				   struct dbg_attn_block_result *results)
7356 {
7357 	const u32 *block_attn_name_offsets;
7358 	const char *attn_name_base;
7359 	const char *block_name;
7360 	enum dbg_attn_type attn_type;
7361 	u8 num_regs, i, j;
7362 
7363 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7364 	attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7365 	block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7366 	if (!block_name)
7367 		return DBG_STATUS_INVALID_ARGS;
7368 
7369 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7370 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7371 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7372 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7373 
7374 	block_attn_name_offsets =
7375 	    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7376 	    results->names_offset;
7377 
7378 	attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7379 
7380 	/* Go over registers with a non-zero attention status */
7381 	for (i = 0; i < num_regs; i++) {
7382 		struct dbg_attn_bit_mapping *bit_mapping;
7383 		struct dbg_attn_reg_result *reg_result;
7384 		u8 num_reg_attn, bit_idx = 0;
7385 
7386 		reg_result = &results->reg_results[i];
7387 		num_reg_attn = GET_FIELD(reg_result->data,
7388 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7389 		bit_mapping = (struct dbg_attn_bit_mapping *)
7390 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7391 		    reg_result->block_attn_offset;
7392 
7393 		/* Go over attention status bits */
7394 		for (j = 0; j < num_reg_attn; j++, bit_idx++) {
7395 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7396 						     DBG_ATTN_BIT_MAPPING_VAL);
7397 			const char *attn_name, *attn_type_str, *masked_str;
7398 			u32 attn_name_offset;
7399 			u32 sts_addr;
7400 
7401 			/* Check if bit mask should be advanced (due to unused
7402 			 * bits).
7403 			 */
7404 			if (GET_FIELD(bit_mapping[j].data,
7405 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7406 				bit_idx += (u8)attn_idx_val;
7407 				continue;
7408 			}
7409 
7410 			/* Check current bit index */
7411 			if (!(reg_result->sts_val & OSAL_BIT(bit_idx)))
7412 				continue;
7413 
7414 			/* An attention bit with value=1 was found
7415 			 * Find attention name
7416 			 */
7417 			attn_name_offset =
7418 				block_attn_name_offsets[attn_idx_val];
7419 			attn_name = attn_name_base + attn_name_offset;
7420 			attn_type_str =
7421 				(attn_type ==
7422 				 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7423 				 "Parity");
7424 			masked_str = reg_result->mask_val & OSAL_BIT(bit_idx) ?
7425 				     " [masked]" : "";
7426 			sts_addr = GET_FIELD(reg_result->data,
7427 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7428 			DP_NOTICE(p_hwfn, false,
7429 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7430 				  block_name, attn_type_str, attn_name,
7431 				  sts_addr * 4, bit_idx, masked_str);
7432 		}
7433 	}
7434 
7435 	return DBG_STATUS_OK;
7436 }
7437 
7438 /* Wrapper for unifying the idle_chk and mcp_trace api */
7439 static enum dbg_status
qed_print_idle_chk_results_wrapper(struct ecore_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7440 qed_print_idle_chk_results_wrapper(struct ecore_hwfn *p_hwfn,
7441 				   u32 *dump_buf,
7442 				   u32 num_dumped_dwords,
7443 				   char *results_buf)
7444 {
7445 	u32 num_errors, num_warnnings;
7446 
7447 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7448 					  results_buf, &num_errors,
7449 					  &num_warnnings);
7450 }
7451 
7452 /* Feature meta data lookup table */
7453 static struct {
7454 	const char *name;
7455 	enum dbg_status (*get_size)(struct ecore_hwfn *p_hwfn,
7456 				    struct ecore_ptt *p_ptt, u32 *size);
7457 	enum dbg_status (*perform_dump)(struct ecore_hwfn *p_hwfn,
7458 					struct ecore_ptt *p_ptt, u32 *dump_buf,
7459 					u32 buf_size, u32 *dumped_dwords);
7460 	enum dbg_status (*print_results)(struct ecore_hwfn *p_hwfn,
7461 					 u32 *dump_buf, u32 num_dumped_dwords,
7462 					 char *results_buf);
7463 	enum dbg_status (*results_buf_size)(struct ecore_hwfn *p_hwfn,
7464 					    u32 *dump_buf,
7465 					    u32 num_dumped_dwords,
7466 					    u32 *results_buf_size);
7467 } qed_features_lookup[] = {
7468 	{
7469 	"grc", qed_dbg_grc_get_dump_buf_size,
7470 		    qed_dbg_grc_dump, NULL, NULL}, {
7471 	"idle_chk",
7472 		    qed_dbg_idle_chk_get_dump_buf_size,
7473 		    qed_dbg_idle_chk_dump,
7474 		    qed_print_idle_chk_results_wrapper,
7475 		    qed_get_idle_chk_results_buf_size}, {
7476 	"mcp_trace",
7477 		    qed_dbg_mcp_trace_get_dump_buf_size,
7478 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7479 		    qed_get_mcp_trace_results_buf_size}, {
7480 	"reg_fifo",
7481 		    qed_dbg_reg_fifo_get_dump_buf_size,
7482 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7483 		    qed_get_reg_fifo_results_buf_size}, {
7484 	"igu_fifo",
7485 		    qed_dbg_igu_fifo_get_dump_buf_size,
7486 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7487 		    qed_get_igu_fifo_results_buf_size}, {
7488 	"protection_override",
7489 		    qed_dbg_protection_override_get_dump_buf_size,
7490 		    qed_dbg_protection_override_dump,
7491 		    qed_print_protection_override_results,
7492 		    qed_get_protection_override_results_buf_size}, {
7493 	"fw_asserts",
7494 		    qed_dbg_fw_asserts_get_dump_buf_size,
7495 		    qed_dbg_fw_asserts_dump,
7496 		    qed_print_fw_asserts_results,
7497 		    qed_get_fw_asserts_results_buf_size}, {
7498 	"ilt",
7499 		    qed_dbg_ilt_get_dump_buf_size,
7500 		    qed_dbg_ilt_dump, NULL, NULL},};
7501 
7502 #define QED_RESULTS_BUF_MIN_SIZE 16
7503 /* Generic function for decoding debug feature info */
format_feature(struct ecore_hwfn * p_hwfn,enum ecore_dbg_features feature_idx)7504 static enum dbg_status format_feature(struct ecore_hwfn *p_hwfn,
7505 				      enum ecore_dbg_features feature_idx)
7506 {
7507 	struct ecore_dbg_feature *feature =
7508 	    &p_hwfn->p_dev->dbg_params.features[feature_idx];
7509 	u32 text_size_bytes, null_char_pos, i;
7510 	enum dbg_status rc;
7511 	char *text_buf;
7512 
7513 	/* Check if feature supports formatting capability */
7514 	if (!qed_features_lookup[feature_idx].results_buf_size)
7515 		return DBG_STATUS_OK;
7516 
7517 	/* Obtain size of formatted output */
7518 	rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
7519 						(u32 *)feature->dump_buf,
7520 						feature->dumped_dwords,
7521 						&text_size_bytes);
7522 	if (rc != DBG_STATUS_OK)
7523 		return rc;
7524 
7525 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7526 	null_char_pos = text_size_bytes - 1;
7527 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7528 
7529 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7530 		DP_NOTICE(p_hwfn->p_dev, false,
7531 			  "formatted size of feature was too small %d. Aborting\n",
7532 			  text_size_bytes);
7533 		return DBG_STATUS_INVALID_ARGS;
7534 	}
7535 
7536 	/* Allocate temp text buf */
7537 	text_buf = OSAL_VZALLOC(p_hwfn, text_size_bytes);
7538 	if (!text_buf) {
7539 		DP_NOTICE(p_hwfn->p_dev, false,
7540 			  "failed to allocate text buffer. Aborting\n");
7541 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7542 	}
7543 
7544 	/* Decode feature opcodes to string on temp buf */
7545 	rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
7546 						(u32 *)feature->dump_buf,
7547 						feature->dumped_dwords,
7548 						text_buf);
7549 	if (rc != DBG_STATUS_OK) {
7550 		OSAL_VFREE(p_hwfn, text_buf);
7551 		return rc;
7552 	}
7553 
7554 	/* Replace the original null character with a '\n' character.
7555 	 * The bytes that were added as a result of the dword alignment are also
7556 	 * padded with '\n' characters.
7557 	 */
7558 	for (i = null_char_pos; i < text_size_bytes; i++)
7559 		text_buf[i] = '\n';
7560 
7561 
7562 	/* Free the old dump_buf and point the dump_buf to the newly allocated
7563 	 * and formatted text buffer.
7564 	 */
7565 	OSAL_VFREE(p_hwfn, feature->dump_buf);
7566 	feature->dump_buf = (u8 *)text_buf;
7567 	feature->buf_size = text_size_bytes;
7568 	feature->dumped_dwords = text_size_bytes / 4;
7569 	return rc;
7570 }
7571 
7572 #define MAX_DBG_FEATURE_SIZE_DWORDS	0x3FFFFFFF
7573 
7574 /* Generic function for performing the dump of a debug feature. */
qed_dbg_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_dbg_features feature_idx)7575 static enum dbg_status qed_dbg_dump(struct ecore_hwfn *p_hwfn,
7576 				    struct ecore_ptt *p_ptt,
7577 				    enum ecore_dbg_features feature_idx)
7578 {
7579 	struct ecore_dbg_feature *feature =
7580 	    &p_hwfn->p_dev->dbg_params.features[feature_idx];
7581 	u32 buf_size_dwords;
7582 	enum dbg_status rc;
7583 
7584 	DP_NOTICE(p_hwfn->p_dev, false, "Collecting a debug feature [\"%s\"]\n",
7585 		  qed_features_lookup[feature_idx].name);
7586 
7587 	/* Dump_buf was already allocated need to free (this can happen if dump
7588 	 * was called but file was never read).
7589 	 * We can't use the buffer as is since size may have changed.
7590 	 */
7591 	if (feature->dump_buf) {
7592 		OSAL_VFREE(p_hwfn, feature->dump_buf);
7593 		feature->dump_buf = NULL;
7594 	}
7595 
7596 	/* Get buffer size from hsi, allocate accordingly, and perform the
7597 	 * dump.
7598 	 */
7599 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7600 						       &buf_size_dwords);
7601 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7602 		return rc;
7603 
7604 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
7605 		feature->buf_size = 0;
7606 		DP_NOTICE(p_hwfn->p_dev, false,
7607 			  "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
7608 			  qed_features_lookup[feature_idx].name,
7609 			  buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
7610 
7611 		return DBG_STATUS_OK;
7612 	}
7613 
7614 	feature->buf_size = buf_size_dwords * sizeof(u32);
7615 	feature->dump_buf = OSAL_ZALLOC(p_hwfn, GFP_KERNEL, feature->buf_size);
7616 	if (!feature->dump_buf)
7617 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7618 
7619 	rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
7620 					(u32 *)feature->dump_buf,
7621 					feature->buf_size / sizeof(u32),
7622 					&feature->dumped_dwords);
7623 
7624 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7625 	 * In this case the buffer holds valid binary data, but we won't able
7626 	 * to parse it (since parsing relies on data in NVRAM which is only
7627 	 * accessible when MFW is responsive). skip the formatting but return
7628 	 * success so that binary data is provided.
7629 	 */
7630 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7631 		return DBG_STATUS_OK;
7632 
7633 	if (rc != DBG_STATUS_OK)
7634 		return rc;
7635 
7636 	/* Format output */
7637 	rc = format_feature(p_hwfn, feature_idx);
7638 	return rc;
7639 }
7640 
qed_dbg_grc(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7641 int qed_dbg_grc(struct ecore_dev *edev, void *buffer, u32 *num_dumped_bytes)
7642 {
7643 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7644 }
7645 
qed_dbg_grc_size(struct ecore_dev * edev)7646 int qed_dbg_grc_size(struct ecore_dev *edev)
7647 {
7648 	return qed_dbg_feature_size(edev, DBG_FEATURE_GRC);
7649 }
7650 
7651 int
qed_dbg_idle_chk(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7652 qed_dbg_idle_chk(struct ecore_dev *edev, void *buffer, u32 *num_dumped_bytes)
7653 {
7654 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_IDLE_CHK,
7655 			       num_dumped_bytes);
7656 }
7657 
qed_dbg_idle_chk_size(struct ecore_dev * edev)7658 int qed_dbg_idle_chk_size(struct ecore_dev *edev)
7659 {
7660 	return qed_dbg_feature_size(edev, DBG_FEATURE_IDLE_CHK);
7661 }
7662 
7663 int
qed_dbg_reg_fifo(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7664 qed_dbg_reg_fifo(struct ecore_dev *edev, void *buffer, u32 *num_dumped_bytes)
7665 {
7666 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_REG_FIFO,
7667 			       num_dumped_bytes);
7668 }
7669 
qed_dbg_reg_fifo_size(struct ecore_dev * edev)7670 int qed_dbg_reg_fifo_size(struct ecore_dev *edev)
7671 {
7672 	return qed_dbg_feature_size(edev, DBG_FEATURE_REG_FIFO);
7673 }
7674 
7675 int
qed_dbg_igu_fifo(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7676 qed_dbg_igu_fifo(struct ecore_dev *edev, void *buffer, u32 *num_dumped_bytes)
7677 {
7678 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_IGU_FIFO,
7679 			       num_dumped_bytes);
7680 }
7681 
qed_dbg_igu_fifo_size(struct ecore_dev * edev)7682 int qed_dbg_igu_fifo_size(struct ecore_dev *edev)
7683 {
7684 	return qed_dbg_feature_size(edev, DBG_FEATURE_IGU_FIFO);
7685 }
7686 
qed_dbg_nvm_image_length(struct ecore_hwfn * p_hwfn,enum ecore_nvm_images image_id,u32 * length)7687 static int qed_dbg_nvm_image_length(struct ecore_hwfn *p_hwfn,
7688 				    enum ecore_nvm_images image_id, u32 *length)
7689 {
7690 	struct ecore_nvm_image_att image_att;
7691 	int rc;
7692 
7693 	*length = 0;
7694 	rc = ecore_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7695 	if (rc)
7696 		return rc;
7697 
7698 	*length = image_att.length;
7699 
7700 	return rc;
7701 }
7702 
qed_dbg_protection_override(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7703 int qed_dbg_protection_override(struct ecore_dev *edev, void *buffer,
7704 				u32 *num_dumped_bytes)
7705 {
7706 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7707 			       num_dumped_bytes);
7708 }
7709 
qed_dbg_protection_override_size(struct ecore_dev * edev)7710 int qed_dbg_protection_override_size(struct ecore_dev *edev)
7711 {
7712 	return qed_dbg_feature_size(edev, DBG_FEATURE_PROTECTION_OVERRIDE);
7713 }
7714 
qed_dbg_fw_asserts(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7715 int qed_dbg_fw_asserts(struct ecore_dev *edev, void *buffer,
7716 		       u32 *num_dumped_bytes)
7717 {
7718 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_FW_ASSERTS,
7719 			       num_dumped_bytes);
7720 }
7721 
qed_dbg_fw_asserts_size(struct ecore_dev * edev)7722 int qed_dbg_fw_asserts_size(struct ecore_dev *edev)
7723 {
7724 	return qed_dbg_feature_size(edev, DBG_FEATURE_FW_ASSERTS);
7725 }
7726 
qed_dbg_ilt(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7727 int qed_dbg_ilt(struct ecore_dev *edev, void *buffer, u32 *num_dumped_bytes)
7728 {
7729 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
7730 }
7731 
qed_dbg_ilt_size(struct ecore_dev * edev)7732 int qed_dbg_ilt_size(struct ecore_dev *edev)
7733 {
7734 	return qed_dbg_feature_size(edev, DBG_FEATURE_ILT);
7735 }
7736 
qed_dbg_mcp_trace(struct ecore_dev * edev,void * buffer,u32 * num_dumped_bytes)7737 int qed_dbg_mcp_trace(struct ecore_dev *edev, void *buffer,
7738 		      u32 *num_dumped_bytes)
7739 {
7740 	return qed_dbg_feature(edev, buffer, DBG_FEATURE_MCP_TRACE,
7741 			       num_dumped_bytes);
7742 }
7743 
qed_dbg_mcp_trace_size(struct ecore_dev * edev)7744 int qed_dbg_mcp_trace_size(struct ecore_dev *edev)
7745 {
7746 	return qed_dbg_feature_size(edev, DBG_FEATURE_MCP_TRACE);
7747 }
7748 
7749 /* Defines the amount of bytes allocated for recording the length of debug
7750  * feature buffer.
7751  */
7752 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7753 #define REGDUMP_HEADER_SIZE_SHIFT		0
7754 #define REGDUMP_HEADER_SIZE_MASK		0xffffff
7755 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7756 #define REGDUMP_HEADER_FEATURE_MASK		0x3f
7757 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7758 #define REGDUMP_HEADER_OMIT_ENGINE_MASK		0x1
7759 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7760 #define REGDUMP_HEADER_ENGINE_MASK		0x1
7761 #define REGDUMP_MAX_SIZE			0x1000000
7762 #define ILT_DUMP_MAX_SIZE			(1024 * 1024 * 15)
7763 
7764 enum debug_print_features {
7765 	OLD_MODE = 0,
7766 	IDLE_CHK = 1,
7767 	GRC_DUMP = 2,
7768 	MCP_TRACE = 3,
7769 	REG_FIFO = 4,
7770 	PROTECTION_OVERRIDE = 5,
7771 	IGU_FIFO = 6,
7772 	PHY = 7,
7773 	FW_ASSERTS = 8,
7774 	NVM_CFG1 = 9,
7775 	DEFAULT_CFG = 10,
7776 	NVM_META = 11,
7777 	MDUMP = 12,
7778 	ILT_DUMP = 13,
7779 };
7780 
qed_calc_regdump_header(struct ecore_dev * edev,enum debug_print_features feature,int engine,u32 feature_size,u8 omit_engine)7781 static u32 qed_calc_regdump_header(struct ecore_dev *edev,
7782 				   enum debug_print_features feature,
7783 				   int engine, u32 feature_size, u8 omit_engine)
7784 {
7785 	u32 res = 0;
7786 
7787 	SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
7788 	if (res != feature_size)
7789 		DP_NOTICE(edev, false,
7790 			  "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
7791 			  feature, feature_size);
7792 
7793 	SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
7794 	SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
7795 	SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
7796 
7797 	return res;
7798 }
7799 
qed_dbg_all_data(struct ecore_dev * edev,void * buffer)7800 int qed_dbg_all_data(struct ecore_dev *edev, void *buffer)
7801 {
7802 	u8 cur_engine, omit_engine = 0, org_engine;
7803 	struct ecore_hwfn *p_hwfn =
7804 		&edev->hwfns[edev->dbg_params.engine_for_debug];
7805 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
7806 	int grc_params[MAX_DBG_GRC_PARAMS], i;
7807 	u32 offset = 0, feature_size;
7808 	int rc;
7809 
7810 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7811 		grc_params[i] = dev_data->grc.param_val[i];
7812 
7813 	if (!ECORE_IS_CMT(edev))
7814 		omit_engine = 1;
7815 
7816 	OSAL_MUTEX_ACQUIRE(&edev->dbg_lock);
7817 
7818 	org_engine = qed_get_debug_engine(edev);
7819 	for (cur_engine = 0; cur_engine < edev->num_hwfns; cur_engine++) {
7820 		/* Collect idle_chks and grcDump for each hw function */
7821 		DP_VERBOSE(edev, ECORE_MSG_DEBUG,
7822 			   "obtaining idle_chk and grcdump for current engine\n");
7823 		qed_set_debug_engine(edev, cur_engine);
7824 
7825 		/* First idle_chk */
7826 		rc = qed_dbg_idle_chk(edev, (u8 *)buffer + offset +
7827 				      REGDUMP_HEADER_SIZE, &feature_size);
7828 		if (!rc) {
7829 			*(u32 *)((u8 *)buffer + offset) =
7830 			    qed_calc_regdump_header(edev, IDLE_CHK, cur_engine,
7831 						    feature_size, omit_engine);
7832 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7833 		} else {
7834 			DP_ERR(edev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7835 		}
7836 
7837 		/* Second idle_chk */
7838 		rc = qed_dbg_idle_chk(edev, (u8 *)buffer + offset +
7839 				      REGDUMP_HEADER_SIZE, &feature_size);
7840 		if (!rc) {
7841 			*(u32 *)((u8 *)buffer + offset) =
7842 			    qed_calc_regdump_header(edev, IDLE_CHK, cur_engine,
7843 						    feature_size, omit_engine);
7844 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7845 		} else {
7846 			DP_ERR(edev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7847 		}
7848 
7849 		/* reg_fifo dump */
7850 		rc = qed_dbg_reg_fifo(edev, (u8 *)buffer + offset +
7851 				      REGDUMP_HEADER_SIZE, &feature_size);
7852 		if (!rc) {
7853 			*(u32 *)((u8 *)buffer + offset) =
7854 			    qed_calc_regdump_header(edev, REG_FIFO, cur_engine,
7855 						    feature_size, omit_engine);
7856 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7857 		} else {
7858 			DP_ERR(edev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7859 		}
7860 
7861 		/* igu_fifo dump */
7862 		rc = qed_dbg_igu_fifo(edev, (u8 *)buffer + offset +
7863 				      REGDUMP_HEADER_SIZE, &feature_size);
7864 		if (!rc) {
7865 			*(u32 *)((u8 *)buffer + offset) =
7866 			    qed_calc_regdump_header(edev, IGU_FIFO, cur_engine,
7867 						    feature_size, omit_engine);
7868 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7869 		} else {
7870 			DP_ERR(edev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7871 		}
7872 
7873 		/* protection_override dump */
7874 		rc = qed_dbg_protection_override(edev, (u8 *)buffer + offset +
7875 						 REGDUMP_HEADER_SIZE,
7876 						 &feature_size);
7877 		if (!rc) {
7878 			*(u32 *)((u8 *)buffer + offset) =
7879 			    qed_calc_regdump_header(edev, PROTECTION_OVERRIDE,
7880 						    cur_engine,
7881 						    feature_size, omit_engine);
7882 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7883 		} else {
7884 			DP_ERR(edev,
7885 			       "qed_dbg_protection_override failed. rc = %d\n",
7886 			       rc);
7887 		}
7888 
7889 		/* fw_asserts dump */
7890 		rc = qed_dbg_fw_asserts(edev, (u8 *)buffer + offset +
7891 					REGDUMP_HEADER_SIZE, &feature_size);
7892 		if (!rc) {
7893 			*(u32 *)((u8 *)buffer + offset) =
7894 			    qed_calc_regdump_header(edev, FW_ASSERTS,
7895 						    cur_engine, feature_size,
7896 						    omit_engine);
7897 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7898 		} else {
7899 			DP_ERR(edev, "qed_dbg_fw_asserts failed. rc = %d\n",
7900 			       rc);
7901 		}
7902 
7903 		/* GRC dump - must be last because when mcp stuck it will
7904 		 * clutter idle_chk, reg_fifo, ...
7905 		 */
7906 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7907 			dev_data->grc.param_val[i] = grc_params[i];
7908 
7909 		rc = qed_dbg_grc(edev, (u8 *)buffer + offset +
7910 				 REGDUMP_HEADER_SIZE, &feature_size);
7911 		if (!rc) {
7912 			*(u32 *)((u8 *)buffer + offset) =
7913 			    qed_calc_regdump_header(edev, GRC_DUMP,
7914 						    cur_engine,
7915 						    feature_size, omit_engine);
7916 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7917 		} else {
7918 			DP_ERR(edev, "qed_dbg_grc failed. rc = %d", rc);
7919 		}
7920 	}
7921 
7922 	qed_set_debug_engine(edev, org_engine);
7923 
7924 	/* mcp_trace */
7925 	rc = qed_dbg_mcp_trace(edev, (u8 *)buffer + offset +
7926 			       REGDUMP_HEADER_SIZE, &feature_size);
7927 	if (!rc) {
7928 		*(u32 *)((u8 *)buffer + offset) =
7929 		    qed_calc_regdump_header(edev, MCP_TRACE, cur_engine,
7930 					    feature_size, omit_engine);
7931 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7932 	} else {
7933 		DP_ERR(edev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7934 	}
7935 
7936 	OSAL_MUTEX_RELEASE(&edev->dbg_lock);
7937 
7938 	return 0;
7939 }
7940 
qed_dbg_all_data_size(struct ecore_dev * edev)7941 int qed_dbg_all_data_size(struct ecore_dev *edev)
7942 {
7943 	struct ecore_hwfn *p_hwfn =
7944 		&edev->hwfns[edev->dbg_params.engine_for_debug];
7945 	u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
7946 	u8 cur_engine, org_engine;
7947 
7948 	edev->disable_ilt_dump = false;
7949 	org_engine = qed_get_debug_engine(edev);
7950 	for (cur_engine = 0; cur_engine < edev->num_hwfns; cur_engine++) {
7951 		/* Engine specific */
7952 		DP_VERBOSE(edev, ECORE_MSG_DEBUG,
7953 			   "calculating idle_chk and grcdump register length for current engine\n");
7954 		qed_set_debug_engine(edev, cur_engine);
7955 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(edev) +
7956 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(edev) +
7957 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(edev) +
7958 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(edev) +
7959 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(edev) +
7960 			    REGDUMP_HEADER_SIZE +
7961 			    qed_dbg_protection_override_size(edev) +
7962 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(edev);
7963 
7964 		ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(edev);
7965 		if (ilt_len < ILT_DUMP_MAX_SIZE) {
7966 			total_ilt_len += ilt_len;
7967 			regs_len += ilt_len;
7968 		}
7969 	}
7970 
7971 	qed_set_debug_engine(edev, org_engine);
7972 
7973 	/* Engine common */
7974 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(edev);
7975 	qed_dbg_nvm_image_length(p_hwfn, ECORE_NVM_IMAGE_NVM_CFG1, &image_len);
7976 	if (image_len)
7977 		regs_len += REGDUMP_HEADER_SIZE + image_len;
7978 	qed_dbg_nvm_image_length(p_hwfn, ECORE_NVM_IMAGE_DEFAULT_CFG,
7979 				 &image_len);
7980 	if (image_len)
7981 		regs_len += REGDUMP_HEADER_SIZE + image_len;
7982 	qed_dbg_nvm_image_length(p_hwfn, ECORE_NVM_IMAGE_NVM_META, &image_len);
7983 	if (image_len)
7984 		regs_len += REGDUMP_HEADER_SIZE + image_len;
7985 	qed_dbg_nvm_image_length(p_hwfn, ECORE_NVM_IMAGE_MDUMP, &image_len);
7986 	if (image_len)
7987 		regs_len += REGDUMP_HEADER_SIZE + image_len;
7988 
7989 	if (regs_len > REGDUMP_MAX_SIZE) {
7990 		DP_VERBOSE(edev, ECORE_MSG_DEBUG,
7991 			   "Dump exceeds max size 0x%x, disable ILT dump\n",
7992 			   REGDUMP_MAX_SIZE);
7993 		edev->disable_ilt_dump = true;
7994 		regs_len -= total_ilt_len;
7995 	}
7996 
7997 	return regs_len;
7998 }
7999 
qed_dbg_feature(struct ecore_dev * edev,void * buffer,enum ecore_dbg_features feature,u32 * num_dumped_bytes)8000 int qed_dbg_feature(struct ecore_dev *edev, void *buffer,
8001 		    enum ecore_dbg_features feature, u32 *num_dumped_bytes)
8002 {
8003 	struct ecore_hwfn *p_hwfn =
8004 		&edev->hwfns[edev->dbg_params.engine_for_debug];
8005 	struct ecore_dbg_feature *qed_feature =
8006 		&edev->dbg_params.features[feature];
8007 	enum dbg_status dbg_rc;
8008 	struct ecore_ptt *p_ptt;
8009 	int rc = 0;
8010 
8011 	/* Acquire ptt */
8012 	p_ptt = ecore_ptt_acquire(p_hwfn);
8013 	if (!p_ptt)
8014 		return -EINVAL;
8015 
8016 	/* Get dump */
8017 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8018 	if (dbg_rc != DBG_STATUS_OK) {
8019 		DP_VERBOSE(edev, ECORE_MSG_DEBUG, "%s\n",
8020 			   qed_dbg_get_status_str(dbg_rc));
8021 		*num_dumped_bytes = 0;
8022 		rc = -EINVAL;
8023 		goto out;
8024 	}
8025 
8026 	DP_VERBOSE(edev, ECORE_MSG_DEBUG,
8027 		   "copying debug feature to external buffer\n");
8028 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8029 	*num_dumped_bytes = edev->dbg_params.features[feature].dumped_dwords *
8030 			    4;
8031 
8032 out:
8033 	ecore_ptt_release(p_hwfn, p_ptt);
8034 	return rc;
8035 }
8036 
8037 int
qed_dbg_feature_size(struct ecore_dev * edev,enum ecore_dbg_features feature)8038 qed_dbg_feature_size(struct ecore_dev *edev, enum ecore_dbg_features feature)
8039 {
8040 	struct ecore_hwfn *p_hwfn =
8041 		&edev->hwfns[edev->dbg_params.engine_for_debug];
8042 	struct ecore_dbg_feature *qed_feature = &edev->dbg_features[feature];
8043 	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
8044 	u32 buf_size_dwords;
8045 	enum dbg_status rc;
8046 
8047 	if (!p_ptt)
8048 		return -EINVAL;
8049 
8050 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8051 						   &buf_size_dwords);
8052 	if (rc != DBG_STATUS_OK)
8053 		buf_size_dwords = 0;
8054 
8055 	/* Feature will not be dumped if it exceeds maximum size */
8056 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8057 		buf_size_dwords = 0;
8058 
8059 	ecore_ptt_release(p_hwfn, p_ptt);
8060 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8061 	return qed_feature->buf_size;
8062 }
8063 
qed_get_debug_engine(struct ecore_dev * edev)8064 u8 qed_get_debug_engine(struct ecore_dev *edev)
8065 {
8066 	return edev->dbg_params.engine_for_debug;
8067 }
8068 
qed_set_debug_engine(struct ecore_dev * edev,int engine_number)8069 void qed_set_debug_engine(struct ecore_dev *edev, int engine_number)
8070 {
8071 	DP_VERBOSE(edev, ECORE_MSG_DEBUG, "set debug engine to %d\n",
8072 		   engine_number);
8073 	edev->dbg_params.engine_for_debug = engine_number;
8074 }
8075 
qed_dbg_pf_init(struct ecore_dev * edev)8076 void qed_dbg_pf_init(struct ecore_dev *edev)
8077 {
8078 	const u8 *dbg_values = NULL;
8079 	int i;
8080 
8081 	PMD_INIT_FUNC_TRACE(edev);
8082 
8083 	OSAL_MUTEX_INIT(&edev->dbg_lock);
8084 
8085 	/* Sync ver with debugbus qed code */
8086 	qed_dbg_set_app_ver(TOOLS_VERSION);
8087 
8088 	/* Debug values are after init values.
8089 	 * The offset is the first dword of the file.
8090 	 */
8091 	/* TBD: change hardcoded value to offset from FW file */
8092 	dbg_values = (const u8 *)edev->firmware + 1337296;
8093 
8094 	for_each_hwfn(edev, i) {
8095 		qed_dbg_set_bin_ptr(&edev->hwfns[i], dbg_values);
8096 		qed_dbg_user_set_bin_ptr(&edev->hwfns[i], dbg_values);
8097 	}
8098 
8099 	/* Set the hwfn to be 0 as default */
8100 	edev->dbg_params.engine_for_debug = 0;
8101 }
8102 
qed_dbg_pf_exit(struct ecore_dev * edev)8103 void qed_dbg_pf_exit(struct ecore_dev *edev)
8104 {
8105 	struct ecore_dbg_feature *feature = NULL;
8106 	enum ecore_dbg_features feature_idx;
8107 
8108 	PMD_INIT_FUNC_TRACE(edev);
8109 
8110 	/* debug features' buffers may be allocated if debug feature was used
8111 	 * but dump wasn't called
8112 	 */
8113 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8114 		feature = &edev->dbg_features[feature_idx];
8115 		if (feature->dump_buf) {
8116 			OSAL_VFREE(edev, feature->dump_buf);
8117 			feature->dump_buf = NULL;
8118 		}
8119 	}
8120 
8121 	OSAL_MUTEX_DEALLOC(&edev->dbg_lock);
8122 }
8123