xref: /dpdk/drivers/net/qede/base/ecore_int.c (revision 0857b942113874c69dc3db5df11a828ee3cc9b6b)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_spq.h"
12 #include "reg_addr.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
17 #include "reg_addr.h"
18 #include "ecore_hw.h"
19 #include "ecore_sriov.h"
20 #include "ecore_vf.h"
21 #include "ecore_hw_defs.h"
22 #include "ecore_hsi_common.h"
23 #include "ecore_mcp.h"
24 
25 struct ecore_pi_info {
26 	ecore_int_comp_cb_t comp_cb;
27 	void *cookie;		/* Will be sent to the compl cb function */
28 };
29 
30 struct ecore_sb_sp_info {
31 	struct ecore_sb_info sb_info;
32 	/* per protocol index data */
33 	struct ecore_pi_info pi_info_arr[PIS_PER_SB];
34 };
35 
36 enum ecore_attention_type {
37 	ECORE_ATTN_TYPE_ATTN,
38 	ECORE_ATTN_TYPE_PARITY,
39 };
40 
41 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
42 	ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
43 
44 struct aeu_invert_reg_bit {
45 	char bit_name[30];
46 
47 #define ATTENTION_PARITY		(1 << 0)
48 
49 #define ATTENTION_LENGTH_MASK		(0x00000ff0)
50 #define ATTENTION_LENGTH_SHIFT		(4)
51 #define ATTENTION_LENGTH(flags)		(((flags) & ATTENTION_LENGTH_MASK) >> \
52 					 ATTENTION_LENGTH_SHIFT)
53 #define ATTENTION_SINGLE		(1 << ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_PAR			(ATTENTION_SINGLE | ATTENTION_PARITY)
55 #define ATTENTION_PAR_INT		((2 << ATTENTION_LENGTH_SHIFT) | \
56 					 ATTENTION_PARITY)
57 
58 /* Multiple bits start with this offset */
59 #define ATTENTION_OFFSET_MASK		(0x000ff000)
60 #define ATTENTION_OFFSET_SHIFT		(12)
61 
62 #define ATTENTION_BB_MASK		(0x00700000)
63 #define ATTENTION_BB_SHIFT		(20)
64 #define ATTENTION_BB(value)		((value) << ATTENTION_BB_SHIFT)
65 #define ATTENTION_BB_DIFFERENT		(1 << 23)
66 
67 #define	ATTENTION_CLEAR_ENABLE		(1 << 28)
68 	unsigned int flags;
69 
70 	/* Callback to call if attention will be triggered */
71 	enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
72 
73 	enum block_id block_index;
74 };
75 
76 struct aeu_invert_reg {
77 	struct aeu_invert_reg_bit bits[32];
78 };
79 
80 #define MAX_ATTN_GRPS		(8)
81 #define NUM_ATTN_REGS		(9)
82 
83 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
84 {
85 	u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
86 
87 	DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
88 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
89 
90 	return ECORE_SUCCESS;
91 }
92 
93 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK		(0x3c000)
94 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT	(14)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK		(0x03fc0)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT	(6)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK	(0x00020)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT	(5)
99 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK	(0x0001e)
100 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT	(1)
101 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK	(0x1)
102 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT	(0)
103 #define ECORE_PSWHST_ATTENTION_VF_DISABLED		(0x1)
104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS		(0x1)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK		(0x1)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT	(0)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK	(0x1e)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT	(1)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK	(0x20)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT	(5)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK	(0x3fc0)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT	(6)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK	(0x3c000)
114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT	(14)
115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK	(0x3fc0000)
116 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT	(18)
117 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
118 {
119 	u32 tmp =
120 	    ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
121 		     PSWHST_REG_VF_DISABLED_ERROR_VALID);
122 
123 	/* Disabled VF access */
124 	if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
125 		u32 addr, data;
126 
127 		addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
128 				PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
129 		data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
130 				PSWHST_REG_VF_DISABLED_ERROR_DATA);
131 		DP_INFO(p_hwfn->p_dev,
132 			"PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
133 			" Write [0x%02x] Addr [0x%08x]\n",
134 			(u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
135 			     >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
136 			(u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
137 			     >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
138 			(u8)((data &
139 			      ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
140 			      ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
141 			(u8)((data &
142 			      ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
143 			      ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
144 			(u8)((data &
145 			      ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
146 			      ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
147 			addr);
148 	}
149 
150 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
151 		       PSWHST_REG_INCORRECT_ACCESS_VALID);
152 	if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
153 		u32 addr, data, length;
154 
155 		addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
156 				PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
157 		data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
158 				PSWHST_REG_INCORRECT_ACCESS_DATA);
159 		length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
160 				  PSWHST_REG_INCORRECT_ACCESS_LENGTH);
161 
162 		DP_INFO(p_hwfn->p_dev,
163 			"Incorrect access to %08x of length %08x - PF [%02x]"
164 			" VF [%04x] [valid %02x] client [%02x] write [%02x]"
165 			" Byte-Enable [%04x] [%08x]\n",
166 			addr, length,
167 			(u8)((data &
168 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
169 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
170 			(u8)((data &
171 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
172 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
173 			(u8)((data &
174 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
175 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
176 			(u8)((data &
177 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
178 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
179 			(u8)((data &
180 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
181 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
182 			(u8)((data &
183 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
184 		      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
185 			data);
186 	}
187 
188 	/* TODO - We know 'some' of these are legal due to virtualization,
189 	 * but is it true for all of them?
190 	 */
191 	return ECORE_SUCCESS;
192 }
193 
194 #define ECORE_GRC_ATTENTION_VALID_BIT		(1 << 0)
195 #define ECORE_GRC_ATTENTION_ADDRESS_MASK	(0x7fffff << 0)
196 #define ECORE_GRC_ATTENTION_RDWR_BIT		(1 << 23)
197 #define ECORE_GRC_ATTENTION_MASTER_MASK		(0xf << 24)
198 #define ECORE_GRC_ATTENTION_MASTER_SHIFT	(24)
199 #define ECORE_GRC_ATTENTION_PF_MASK		(0xf)
200 #define ECORE_GRC_ATTENTION_VF_MASK		(0xff << 4)
201 #define ECORE_GRC_ATTENTION_VF_SHIFT		(4)
202 #define ECORE_GRC_ATTENTION_PRIV_MASK		(0x3 << 14)
203 #define ECORE_GRC_ATTENTION_PRIV_SHIFT		(14)
204 #define ECORE_GRC_ATTENTION_PRIV_VF		(0)
205 static const char *grc_timeout_attn_master_to_str(u8 master)
206 {
207 	switch (master) {
208 	case 1:
209 		return "PXP";
210 	case 2:
211 		return "MCP";
212 	case 3:
213 		return "MSDM";
214 	case 4:
215 		return "PSDM";
216 	case 5:
217 		return "YSDM";
218 	case 6:
219 		return "USDM";
220 	case 7:
221 		return "TSDM";
222 	case 8:
223 		return "XSDM";
224 	case 9:
225 		return "DBU";
226 	case 10:
227 		return "DMAE";
228 	default:
229 		return "Unknown";
230 	}
231 }
232 
233 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
234 {
235 	u32 tmp, tmp2;
236 
237 	/* We've already cleared the timeout interrupt register, so we learn
238 	 * of interrupts via the validity register
239 	 */
240 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
241 		       GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
242 	if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
243 		goto out;
244 
245 	/* Read the GRC timeout information */
246 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
247 		       GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
248 	tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
249 			GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
250 
251 	DP_INFO(p_hwfn->p_dev,
252 		"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
253 		" [PF: %02x %s %02x]\n",
254 		tmp2, tmp,
255 		(tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
256 		(tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
257 		grc_timeout_attn_master_to_str((tmp &
258 					ECORE_GRC_ATTENTION_MASTER_MASK) >>
259 				       ECORE_GRC_ATTENTION_MASTER_SHIFT),
260 		(tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
261 		(((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
262 		  ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
263 		 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
264 		(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
265 		ECORE_GRC_ATTENTION_VF_SHIFT);
266 
267 out:
268 	/* Regardles of anything else, clean the validity bit */
269 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
270 		 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
271 	return ECORE_SUCCESS;
272 }
273 
274 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
275 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
276 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
277 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
278 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
279 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
280 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
281 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
282 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME	(1 << 22)
283 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
284 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
285 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
286 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
287 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
288 {
289 	u32 tmp;
290 
291 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
292 		       PGLUE_B_REG_TX_ERR_WR_DETAILS2);
293 	if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
294 		u32 addr_lo, addr_hi, details;
295 
296 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
297 				   PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
298 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
299 				   PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
300 		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
301 				   PGLUE_B_REG_TX_ERR_WR_DETAILS);
302 
303 		DP_INFO(p_hwfn,
304 			"Illegal write by chip to [%08x:%08x] blocked."
305 			"Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
306 			" Details2 %08x [Was_error %02x BME deassert %02x"
307 			" FID_enable deassert %02x]\n",
308 			addr_hi, addr_lo, details,
309 			(u8)((details &
310 			      ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
311 			     ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
312 			(u8)((details &
313 			      ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
314 			     ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
315 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
316 			     ? 1 : 0), tmp,
317 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
318 			     : 0),
319 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
320 			     0),
321 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
322 			     : 0));
323 	}
324 
325 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
326 		       PGLUE_B_REG_TX_ERR_RD_DETAILS2);
327 	if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
328 		u32 addr_lo, addr_hi, details;
329 
330 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
331 				   PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
332 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
333 				   PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
334 		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
335 				   PGLUE_B_REG_TX_ERR_RD_DETAILS);
336 
337 		DP_INFO(p_hwfn,
338 			"Illegal read by chip from [%08x:%08x] blocked."
339 			" Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
340 			" Details2 %08x [Was_error %02x BME deassert %02x"
341 			" FID_enable deassert %02x]\n",
342 			addr_hi, addr_lo, details,
343 			(u8)((details &
344 			      ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
345 			     ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
346 			(u8)((details &
347 			      ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
348 			     ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
349 			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
350 			     ? 1 : 0), tmp,
351 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
352 			     : 0),
353 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
354 			     0),
355 			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
356 			     : 0));
357 	}
358 
359 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
360 		       PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
361 	if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
362 		DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
363 
364 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
365 		       PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
366 	if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
367 		u32 addr_hi, addr_lo;
368 
369 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
370 				   PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
371 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
372 				   PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
373 
374 		DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
375 			tmp, addr_hi, addr_lo);
376 	}
377 
378 	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
379 		       PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
380 	if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
381 		u32 addr_hi, addr_lo, details;
382 
383 		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
384 				   PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
385 		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
386 				   PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
387 		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
388 				   PGLUE_B_REG_VF_ILT_ERR_DETAILS);
389 
390 		DP_INFO(p_hwfn,
391 			"ILT error - Details %08x Details2 %08x"
392 			" [Address %08x:%08x]\n",
393 			details, tmp, addr_hi, addr_lo);
394 	}
395 
396 	/* Clear the indications */
397 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
398 		 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
399 
400 	return ECORE_SUCCESS;
401 }
402 
403 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
404 {
405 	DP_NOTICE(p_hwfn, false, "FW assertion!\n");
406 
407 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
408 
409 	return ECORE_INVAL;
410 }
411 
412 static enum _ecore_status_t
413 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
414 {
415 	DP_INFO(p_hwfn, "General attention 35!\n");
416 
417 	return ECORE_SUCCESS;
418 }
419 
420 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
421 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
422 #define ECORE_DORQ_ATTENTION_SIZE_MASK	 (0x7f0000)
423 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT	 (16)
424 
425 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
426 {
427 	u32 reason;
428 
429 	reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
430 	    ECORE_DORQ_ATTENTION_REASON_MASK;
431 	if (reason) {
432 		u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
433 				       DORQ_REG_DB_DROP_DETAILS);
434 
435 		DP_INFO(p_hwfn->p_dev,
436 			"DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
437 			" Size [bytes] 0x%08x Reason: 0x%08x\n",
438 			ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
439 				 DORQ_REG_DB_DROP_DETAILS_ADDRESS),
440 			(u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
441 			((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
442 			 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
443 	}
444 
445 	return ECORE_INVAL;
446 }
447 
448 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
449 {
450 #ifndef ASIC_ONLY
451 	if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
452 		u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
453 				   TM_REG_INT_STS_1);
454 
455 		if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
456 			    TM_REG_INT_STS_1_PEND_CONN_SCAN))
457 			return ECORE_INVAL;
458 
459 		if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
460 			   TM_REG_INT_STS_1_PEND_CONN_SCAN))
461 			DP_INFO(p_hwfn,
462 				"TM attention on emulation - most likely"
463 				" results of clock-ratios\n");
464 		val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
465 		val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
466 		    TM_REG_INT_MASK_1_PEND_TASK_SCAN;
467 		ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
468 
469 		return ECORE_SUCCESS;
470 	}
471 #endif
472 
473 	return ECORE_INVAL;
474 }
475 
476 /* Instead of major changes to the data-structure, we have a some 'special'
477  * identifiers for sources that changed meaning between adapters.
478  */
479 enum aeu_invert_reg_special_type {
480 	AEU_INVERT_REG_SPECIAL_CNIG_0,
481 	AEU_INVERT_REG_SPECIAL_CNIG_1,
482 	AEU_INVERT_REG_SPECIAL_CNIG_2,
483 	AEU_INVERT_REG_SPECIAL_CNIG_3,
484 	AEU_INVERT_REG_SPECIAL_MAX,
485 };
486 
487 static struct aeu_invert_reg_bit
488 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
489 	{"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
490 	{"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
491 	{"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
492 	{"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
493 };
494 
495 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
496 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
497 	{
498 	 {			/* After Invert 1 */
499 	  {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
500 	   MAX_BLOCK_ID},
501 	  }
502 	 },
503 
504 	{
505 	 {			/* After Invert 2 */
506 	  {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
507 	  {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
508 	  {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
509 	   BLOCK_PGLUE_B},
510 	  {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
511 	  {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
512 	  {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
513 	  {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
514 	  {"SW timers #%d",
515 	   (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
516 	   OSAL_NULL, MAX_BLOCK_ID},
517 	  {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
518 	   BLOCK_PGLCS},
519 	  }
520 	 },
521 
522 	{
523 	 {			/* After Invert 3 */
524 	  {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
525 	   MAX_BLOCK_ID},
526 	  }
527 	 },
528 
529 	{
530 	 {			/* After Invert 4 */
531 	  {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
532 	   ecore_fw_assertion, MAX_BLOCK_ID},
533 	  {"General Attention %d",
534 	   (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
535 	   OSAL_NULL, MAX_BLOCK_ID},
536 	  {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
537 	   ecore_general_attention_35, MAX_BLOCK_ID},
538 	  {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
539 			 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
540 			 OSAL_NULL, BLOCK_NWS},
541 	  {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
542 			    ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
543 			    OSAL_NULL, BLOCK_NWS},
544 	  {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
545 			 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
546 			 OSAL_NULL, BLOCK_NWM},
547 	  {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
548 			    ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
549 			    OSAL_NULL, BLOCK_NWM},
550 	  {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
551 	  {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
552 	  {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
553 	  {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
554 	  {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
555 	  {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
556 	  {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
557 	   MAX_BLOCK_ID},
558 	  {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
559 	  {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
560 	  {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
561 	  {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
562 	  {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
563 	  }
564 	 },
565 
566 	{
567 	 {			/* After Invert 5 */
568 	  {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
569 	  {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
570 	  {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
571 	  {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
572 	  {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
573 	  {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
574 	  {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
575 	  {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
576 	  {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
577 	  {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
578 	  {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
579 	  {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
580 	  {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
581 	  {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
582 	  {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
583 	  {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
584 	  }
585 	 },
586 
587 	{
588 	 {			/* After Invert 6 */
589 	  {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
590 	  {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
591 	  {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
592 	  {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
593 	  {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
594 	  {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
595 	  {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
596 	  {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
597 	  {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
598 	  {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
599 	  {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
600 	  {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
601 	  {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
602 	  {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
603 	  {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
604 	  {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
605 	  }
606 	 },
607 
608 	{
609 	 {			/* After Invert 7 */
610 	  {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
611 	  {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
612 	  {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
613 	  {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
614 	  {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
615 	  {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
616 	  {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
617 	  {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
618 	  {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
619 	  {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
620 	  {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
621 	  {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
622 	  {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
623 	  {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
624 	  {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
625 	  {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
626 	  {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
627 	  }
628 	 },
629 
630 	{
631 	 {			/* After Invert 8 */
632 	  {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
633 	  {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
634 	  {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
635 	  {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
636 	  {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
637 	  {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
638 	  {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
639 	  {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
640 	  {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
641 	  {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
642 	  {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
643 	  {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
644 	  {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
645 	  {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
646 	  {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
647 	  {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
648 	  {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
649 	  {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
650 	  {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
651 	  {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
652 	  {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
653 	   MAX_BLOCK_ID},
654 	  }
655 	 },
656 
657 	{
658 	 {			/* After Invert 9 */
659 	  {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
660 	  {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
661 	   MAX_BLOCK_ID},
662 	  {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
663 	  {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
664 	  {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
665 	   MAX_BLOCK_ID},
666 	  }
667 	 },
668 
669 };
670 
671 static struct aeu_invert_reg_bit *
672 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
673 			struct aeu_invert_reg_bit *p_bit)
674 {
675 	if (!ECORE_IS_BB(p_hwfn->p_dev))
676 		return p_bit;
677 
678 	if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
679 		return p_bit;
680 
681 	return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
682 				  ATTENTION_BB_SHIFT];
683 }
684 
685 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
686 				     struct aeu_invert_reg_bit *p_bit)
687 {
688 	return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
689 		  ATTENTION_PARITY);
690 }
691 
692 #define ATTN_STATE_BITS		(0xfff)
693 #define ATTN_BITS_MASKABLE	(0x3ff)
694 struct ecore_sb_attn_info {
695 	/* Virtual & Physical address of the SB */
696 	struct atten_status_block *sb_attn;
697 	dma_addr_t sb_phys;
698 
699 	/* Last seen running index */
700 	u16 index;
701 
702 	/* A mask of the AEU bits resulting in a parity error */
703 	u32 parity_mask[NUM_ATTN_REGS];
704 
705 	/* A pointer to the attention description structure */
706 	struct aeu_invert_reg *p_aeu_desc;
707 
708 	/* Previously asserted attentions, which are still unasserted */
709 	u16 known_attn;
710 
711 	/* Cleanup address for the link's general hw attention */
712 	u32 mfw_attn_addr;
713 };
714 
715 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
716 				 struct ecore_sb_attn_info *p_sb_desc)
717 {
718 	u16 rc = 0, index;
719 
720 	OSAL_MMIOWB(p_hwfn->p_dev);
721 
722 	index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
723 	if (p_sb_desc->index != index) {
724 		p_sb_desc->index = index;
725 		rc = ECORE_SB_ATT_IDX;
726 	}
727 
728 	OSAL_MMIOWB(p_hwfn->p_dev);
729 
730 	return rc;
731 }
732 
733 /**
734  * @brief ecore_int_assertion - handles asserted attention bits
735  *
736  * @param p_hwfn
737  * @param asserted_bits newly asserted bits
738  * @return enum _ecore_status_t
739  */
740 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
741 						u16 asserted_bits)
742 {
743 	struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
744 	u32 igu_mask;
745 
746 	/* Mask the source of the attention in the IGU */
747 	igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
748 			    IGU_REG_ATTENTION_ENABLE);
749 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
750 		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
751 	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
752 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
753 
754 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
755 		   "inner known ATTN state: 0x%04x --> 0x%04x\n",
756 		   sb_attn_sw->known_attn,
757 		   sb_attn_sw->known_attn | asserted_bits);
758 	sb_attn_sw->known_attn |= asserted_bits;
759 
760 	/* Handle MCP events */
761 	if (asserted_bits & 0x100) {
762 		ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
763 		/* Clean the MCP attention */
764 		ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
765 			 sb_attn_sw->mfw_attn_addr, 0);
766 	}
767 
768 	/* FIXME - this will change once we'll have GOOD gtt definitions */
769 	DIRECT_REG_WR(p_hwfn,
770 		      (u8 OSAL_IOMEM *) p_hwfn->regview +
771 		      GTT_BAR0_MAP_REG_IGU_CMD +
772 		      ((IGU_CMD_ATTN_BIT_SET_UPPER -
773 			IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
774 
775 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
776 		   asserted_bits);
777 
778 	return ECORE_SUCCESS;
779 }
780 
781 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
782 				 enum block_id id, enum dbg_attn_type type,
783 				 bool b_clear)
784 {
785 	/* @DPDK */
786 	DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
787 }
788 
789 /**
790  * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
791  * cause of the attention
792  *
793  * @param p_hwfn
794  * @param p_aeu - descriptor of an AEU bit which caused the attention
795  * @param aeu_en_reg - register offset of the AEU enable reg. which configured
796  *  this bit to this group.
797  * @param bit_index - index of this bit in the aeu_en_reg
798  *
799  * @return enum _ecore_status_t
800  */
801 static enum _ecore_status_t
802 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
803 			      struct aeu_invert_reg_bit *p_aeu,
804 			      u32 aeu_en_reg,
805 			      const char *p_bit_name,
806 			      u32 bitmask)
807 {
808 	enum _ecore_status_t rc = ECORE_INVAL;
809 	bool b_fatal = false;
810 
811 	DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
812 		p_bit_name, bitmask);
813 
814 	/* Call callback before clearing the interrupt status */
815 	if (p_aeu->cb) {
816 		DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
817 			p_bit_name);
818 		rc = p_aeu->cb(p_hwfn);
819 	}
820 
821 	if (rc != ECORE_SUCCESS)
822 		b_fatal = true;
823 
824 	/* Print HW block interrupt registers */
825 	if (p_aeu->block_index != MAX_BLOCK_ID) {
826 		ecore_int_attn_print(p_hwfn, p_aeu->block_index,
827 				     ATTN_TYPE_INTERRUPT, !b_fatal);
828 }
829 
830 	/* Reach assertion if attention is fatal */
831 	if (b_fatal) {
832 		DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
833 			  p_bit_name);
834 
835 		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
836 	}
837 
838 	/* Prevent this Attention from being asserted in the future */
839 	if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
840 	    p_hwfn->p_dev->attn_clr_en) {
841 		u32 val;
842 		u32 mask = ~bitmask;
843 		val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
844 		ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
845 		DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
846 			p_bit_name);
847 	}
848 
849 	return rc;
850 }
851 
852 /**
853  * @brief ecore_int_deassertion_parity - handle a single parity AEU source
854  *
855  * @param p_hwfn
856  * @param p_aeu - descriptor of an AEU bit which caused the
857  *              parity
858  * @param bit_index
859  */
860 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
861 					 struct aeu_invert_reg_bit *p_aeu,
862 					 u8 bit_index)
863 {
864 	u32 block_id = p_aeu->block_index;
865 
866 	DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
867 		p_aeu->bit_name, bit_index);
868 
869 	if (block_id == MAX_BLOCK_ID)
870 		return;
871 
872 	ecore_int_attn_print(p_hwfn, block_id,
873 			     ATTN_TYPE_PARITY, false);
874 
875 	/* In A0, there's a single parity bit for several blocks */
876 	if (block_id == BLOCK_BTB) {
877 		ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
878 				     ATTN_TYPE_PARITY, false);
879 		ecore_int_attn_print(p_hwfn, BLOCK_MCP,
880 				     ATTN_TYPE_PARITY, false);
881 	}
882 }
883 
884 /**
885  * @brief - handles deassertion of previously asserted attentions.
886  *
887  * @param p_hwfn
888  * @param deasserted_bits - newly deasserted bits
889  * @return enum _ecore_status_t
890  *
891  */
892 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
893 						  u16 deasserted_bits)
894 {
895 	struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
896 	u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
897 	bool b_parity = false;
898 	u8 i, j, k, bit_idx;
899 	enum _ecore_status_t rc = ECORE_SUCCESS;
900 
901 	/* Read the attention registers in the AEU */
902 	for (i = 0; i < NUM_ATTN_REGS; i++) {
903 		aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
904 					  MISC_REG_AEU_AFTER_INVERT_1_IGU +
905 					  i * 0x4);
906 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
907 			   "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
908 	}
909 
910 	/* Handle parity attentions first */
911 	for (i = 0; i < NUM_ATTN_REGS; i++) {
912 		struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
913 		u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
914 				  MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
915 				  i * sizeof(u32));
916 
917 		u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
918 
919 		/* Skip register in which no parity bit is currently set */
920 		if (!parities)
921 			continue;
922 
923 		for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
924 			struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
925 
926 			if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
927 			    !!(parities & (1 << bit_idx))) {
928 				ecore_int_deassertion_parity(p_hwfn, p_bit,
929 							     bit_idx);
930 				b_parity = true;
931 			}
932 
933 			bit_idx += ATTENTION_LENGTH(p_bit->flags);
934 		}
935 	}
936 
937 	/* Find non-parity cause for attention and act */
938 	for (k = 0; k < MAX_ATTN_GRPS; k++) {
939 		struct aeu_invert_reg_bit *p_aeu;
940 
941 		/* Handle only groups whose attention is currently deasserted */
942 		if (!(deasserted_bits & (1 << k)))
943 			continue;
944 
945 		for (i = 0; i < NUM_ATTN_REGS; i++) {
946 			u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
947 			    i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
948 			u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
949 			u32 bits = aeu_inv_arr[i] & en;
950 
951 			/* Skip if no bit from this group is currently set */
952 			if (!bits)
953 				continue;
954 
955 			/* Find all set bits from current register which belong
956 			 * to current group, making them responsible for the
957 			 * previous assertion.
958 			 */
959 			for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
960 				unsigned long int bitmask;
961 				u8 bit, bit_len;
962 
963 				/* Need to account bits with changed meaning */
964 				p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
965 
966 				bit = bit_idx;
967 				bit_len = ATTENTION_LENGTH(p_aeu->flags);
968 				if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
969 					/* Skip Parity */
970 					bit++;
971 					bit_len--;
972 				}
973 
974 				/* Find the bits relating to HW-block, then
975 				 * shift so they'll become LSB.
976 				 */
977 				bitmask = bits & (((1 << bit_len) - 1) << bit);
978 				bitmask >>= bit;
979 
980 				if (bitmask) {
981 					u32 flags = p_aeu->flags;
982 					char bit_name[30];
983 					u8 num;
984 
985 					num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
986 								bit_len);
987 
988 					/* Some bits represent more than a
989 					 * a single interrupt. Correctly print
990 					 * their name.
991 					 */
992 					if (ATTENTION_LENGTH(flags) > 2 ||
993 					    ((flags & ATTENTION_PAR_INT) &&
994 					    ATTENTION_LENGTH(flags) > 1))
995 						OSAL_SNPRINTF(bit_name, 30,
996 							      p_aeu->bit_name,
997 							      num);
998 					else
999 						OSAL_STRNCPY(bit_name,
1000 							     p_aeu->bit_name,
1001 							     30);
1002 
1003 					/* We now need to pass bitmask in its
1004 					 * correct position.
1005 					 */
1006 					bitmask <<= bit;
1007 
1008 					/* Handle source of the attention */
1009 					ecore_int_deassertion_aeu_bit(p_hwfn,
1010 								      p_aeu,
1011 								      aeu_en,
1012 								      bit_name,
1013 								      bitmask);
1014 				}
1015 
1016 				bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1017 			}
1018 		}
1019 	}
1020 
1021 	/* Clear IGU indication for the deasserted bits */
1022 	/* FIXME - this will change once we'll have GOOD gtt definitions */
1023 	DIRECT_REG_WR(p_hwfn,
1024 		      (u8 OSAL_IOMEM *) p_hwfn->regview +
1025 		      GTT_BAR0_MAP_REG_IGU_CMD +
1026 		      ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1027 			IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
1028 
1029 	/* Unmask deasserted attentions in IGU */
1030 	aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1031 			    IGU_REG_ATTENTION_ENABLE);
1032 	aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1033 	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1034 
1035 	/* Clear deassertion from inner state */
1036 	sb_attn_sw->known_attn &= ~deasserted_bits;
1037 
1038 	return rc;
1039 }
1040 
1041 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1042 {
1043 	struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1044 	struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1045 	u16 index = 0, asserted_bits, deasserted_bits;
1046 	u32 attn_bits = 0, attn_acks = 0;
1047 	enum _ecore_status_t rc = ECORE_SUCCESS;
1048 
1049 	/* Read current attention bits/acks - safeguard against attentions
1050 	 * by guaranting work on a synchronized timeframe
1051 	 */
1052 	do {
1053 		index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1054 		attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1055 		attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1056 	} while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1057 	p_sb_attn->sb_index = index;
1058 
1059 	/* Attention / Deassertion are meaningful (and in correct state)
1060 	 * only when they differ and consistent with known state - deassertion
1061 	 * when previous attention & current ack, and assertion when current
1062 	 * attention with no previous attention
1063 	 */
1064 	asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1065 	    ~p_sb_attn_sw->known_attn;
1066 	deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1067 	    p_sb_attn_sw->known_attn;
1068 
1069 	if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1070 		DP_INFO(p_hwfn,
1071 			"Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1072 			index, attn_bits, attn_acks, asserted_bits,
1073 			deasserted_bits, p_sb_attn_sw->known_attn);
1074 	else if (asserted_bits == 0x100)
1075 		DP_INFO(p_hwfn, "MFW indication via attention\n");
1076 	else
1077 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1078 			   "MFW indication [deassertion]\n");
1079 
1080 	if (asserted_bits) {
1081 		rc = ecore_int_assertion(p_hwfn, asserted_bits);
1082 		if (rc)
1083 			return rc;
1084 	}
1085 
1086 	if (deasserted_bits)
1087 		rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1088 
1089 	return rc;
1090 }
1091 
1092 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1093 			      void OSAL_IOMEM *igu_addr, u32 ack_cons)
1094 {
1095 	struct igu_prod_cons_update igu_ack = { 0 };
1096 
1097 	igu_ack.sb_id_and_flags =
1098 	    ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1099 	     (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1100 	     (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1101 	     (IGU_SEG_ACCESS_ATTN <<
1102 	      IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1103 
1104 	DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1105 
1106 	/* Both segments (interrupts & acks) are written to same place address;
1107 	 * Need to guarantee all commands will be received (in-order) by HW.
1108 	 */
1109 	OSAL_MMIOWB(p_hwfn->p_dev);
1110 	OSAL_BARRIER(p_hwfn->p_dev);
1111 }
1112 
1113 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1114 {
1115 	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1116 	struct ecore_pi_info *pi_info = OSAL_NULL;
1117 	struct ecore_sb_attn_info *sb_attn;
1118 	struct ecore_sb_info *sb_info;
1119 	int arr_size;
1120 	u16 rc = 0;
1121 
1122 	if (!p_hwfn)
1123 		return;
1124 
1125 	if (!p_hwfn->p_sp_sb) {
1126 		DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1127 		return;
1128 	}
1129 
1130 	sb_info = &p_hwfn->p_sp_sb->sb_info;
1131 	arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1132 	if (!sb_info) {
1133 		DP_ERR(p_hwfn->p_dev,
1134 		       "Status block is NULL - cannot ack interrupts\n");
1135 		return;
1136 	}
1137 
1138 	if (!p_hwfn->p_sb_attn) {
1139 		DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1140 		return;
1141 	}
1142 	sb_attn = p_hwfn->p_sb_attn;
1143 
1144 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1145 		   p_hwfn, p_hwfn->my_id);
1146 
1147 	/* Disable ack for def status block. Required both for msix +
1148 	 * inta in non-mask mode, in inta does no harm.
1149 	 */
1150 	ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1151 
1152 	/* Gather Interrupts/Attentions information */
1153 	if (!sb_info->sb_virt) {
1154 		DP_ERR(p_hwfn->p_dev,
1155 		       "Interrupt Status block is NULL -"
1156 		       " cannot check for new interrupts!\n");
1157 	} else {
1158 		u32 tmp_index = sb_info->sb_ack;
1159 		rc = ecore_sb_update_sb_idx(sb_info);
1160 		DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1161 			   "Interrupt indices: 0x%08x --> 0x%08x\n",
1162 			   tmp_index, sb_info->sb_ack);
1163 	}
1164 
1165 	if (!sb_attn || !sb_attn->sb_attn) {
1166 		DP_ERR(p_hwfn->p_dev,
1167 		       "Attentions Status block is NULL -"
1168 		       " cannot check for new attentions!\n");
1169 	} else {
1170 		u16 tmp_index = sb_attn->index;
1171 
1172 		rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1173 		DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1174 			   "Attention indices: 0x%08x --> 0x%08x\n",
1175 			   tmp_index, sb_attn->index);
1176 	}
1177 
1178 	/* Check if we expect interrupts at this time. if not just ack them */
1179 	if (!(rc & ECORE_SB_EVENT_MASK)) {
1180 		ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1181 		return;
1182 	}
1183 
1184 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1185 
1186 	if (!p_hwfn->p_dpc_ptt) {
1187 		DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1188 		ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1189 		return;
1190 	}
1191 
1192 	if (rc & ECORE_SB_ATT_IDX)
1193 		ecore_int_attentions(p_hwfn);
1194 
1195 	if (rc & ECORE_SB_IDX) {
1196 		int pi;
1197 
1198 		/* Since we only looked at the SB index, it's possible more
1199 		 * than a single protocol-index on the SB incremented.
1200 		 * Iterate over all configured protocol indices and check
1201 		 * whether something happened for each.
1202 		 */
1203 		for (pi = 0; pi < arr_size; pi++) {
1204 			pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1205 			if (pi_info->comp_cb != OSAL_NULL)
1206 				pi_info->comp_cb(p_hwfn, pi_info->cookie);
1207 		}
1208 	}
1209 
1210 	if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1211 		/* This should be done before the interrupts are enabled,
1212 		 * since otherwise a new attention will be generated.
1213 		 */
1214 		ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1215 	}
1216 
1217 	ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1218 }
1219 
1220 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1221 {
1222 	struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1223 
1224 	if (!p_sb)
1225 		return;
1226 
1227 	if (p_sb->sb_attn) {
1228 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1229 				       p_sb->sb_phys,
1230 				       SB_ATTN_ALIGNED_SIZE(p_hwfn));
1231 	}
1232 	OSAL_FREE(p_hwfn->p_dev, p_sb);
1233 }
1234 
1235 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1236 				    struct ecore_ptt *p_ptt)
1237 {
1238 	struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1239 
1240 	OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1241 
1242 	sb_info->index = 0;
1243 	sb_info->known_attn = 0;
1244 
1245 	/* Configure Attention Status Block in IGU */
1246 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1247 		 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1248 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1249 		 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1250 }
1251 
1252 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1253 				   struct ecore_ptt *p_ptt,
1254 				   void *sb_virt_addr, dma_addr_t sb_phy_addr)
1255 {
1256 	struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1257 	int i, j, k;
1258 
1259 	sb_info->sb_attn = sb_virt_addr;
1260 	sb_info->sb_phys = sb_phy_addr;
1261 
1262 	/* Set the pointer to the AEU descriptors */
1263 	sb_info->p_aeu_desc = aeu_descs;
1264 
1265 	/* Calculate Parity Masks */
1266 	OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1267 	for (i = 0; i < NUM_ATTN_REGS; i++) {
1268 		/* j is array index, k is bit index */
1269 		for (j = 0, k = 0; k < 32; j++) {
1270 			struct aeu_invert_reg_bit *p_aeu;
1271 
1272 			p_aeu = &aeu_descs[i].bits[j];
1273 			if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1274 				sb_info->parity_mask[i] |= 1 << k;
1275 
1276 			k += ATTENTION_LENGTH(p_aeu->flags);
1277 		}
1278 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1279 			   "Attn Mask [Reg %d]: 0x%08x\n",
1280 			   i, sb_info->parity_mask[i]);
1281 	}
1282 
1283 	/* Set the address of cleanup for the mcp attention */
1284 	sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1285 	    MISC_REG_AEU_GENERAL_ATTN_0;
1286 
1287 	ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1288 }
1289 
1290 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1291 						    struct ecore_ptt *p_ptt)
1292 {
1293 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1294 	struct ecore_sb_attn_info *p_sb;
1295 	dma_addr_t p_phys = 0;
1296 	void *p_virt;
1297 
1298 	/* SB struct */
1299 	p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1300 	if (!p_sb) {
1301 		DP_NOTICE(p_dev, true,
1302 			  "Failed to allocate `struct ecore_sb_attn_info'\n");
1303 		return ECORE_NOMEM;
1304 	}
1305 
1306 	/* SB ring  */
1307 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1308 					 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1309 	if (!p_virt) {
1310 		DP_NOTICE(p_dev, true,
1311 			  "Failed to allocate status block (attentions)\n");
1312 		OSAL_FREE(p_dev, p_sb);
1313 		return ECORE_NOMEM;
1314 	}
1315 
1316 	/* Attention setup */
1317 	p_hwfn->p_sb_attn = p_sb;
1318 	ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1319 
1320 	return ECORE_SUCCESS;
1321 }
1322 
1323 /* coalescing timeout = timeset << (timer_res + 1) */
1324 #define ECORE_CAU_DEF_RX_USECS 24
1325 #define ECORE_CAU_DEF_TX_USECS 48
1326 
1327 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1328 			     struct cau_sb_entry *p_sb_entry,
1329 			     u8 pf_id, u16 vf_number, u8 vf_valid)
1330 {
1331 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1332 	u32 cau_state;
1333 	u8 timer_res;
1334 
1335 	OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1336 
1337 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1338 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1339 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1340 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1341 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1342 
1343 	cau_state = CAU_HC_DISABLE_STATE;
1344 
1345 	if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1346 		cau_state = CAU_HC_ENABLE_STATE;
1347 		if (!p_dev->rx_coalesce_usecs)
1348 			p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1349 		if (!p_dev->tx_coalesce_usecs)
1350 			p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1351 	}
1352 
1353 	/* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1354 	if (p_dev->rx_coalesce_usecs <= 0x7F)
1355 		timer_res = 0;
1356 	else if (p_dev->rx_coalesce_usecs <= 0xFF)
1357 		timer_res = 1;
1358 	else
1359 		timer_res = 2;
1360 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1361 
1362 	if (p_dev->tx_coalesce_usecs <= 0x7F)
1363 		timer_res = 0;
1364 	else if (p_dev->tx_coalesce_usecs <= 0xFF)
1365 		timer_res = 1;
1366 	else
1367 		timer_res = 2;
1368 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1369 
1370 	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1371 	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1372 }
1373 
1374 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1375 			   struct ecore_ptt *p_ptt,
1376 			   dma_addr_t sb_phys, u16 igu_sb_id,
1377 			   u16 vf_number, u8 vf_valid)
1378 {
1379 	struct cau_sb_entry sb_entry;
1380 
1381 	ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1382 				vf_number, vf_valid);
1383 
1384 	if (p_hwfn->hw_init_done) {
1385 		/* Wide-bus, initialize via DMAE */
1386 		u64 phys_addr = (u64)sb_phys;
1387 
1388 		ecore_dmae_host2grc(p_hwfn, p_ptt,
1389 				    (u64)(osal_uintptr_t)&phys_addr,
1390 				    CAU_REG_SB_ADDR_MEMORY +
1391 				    igu_sb_id * sizeof(u64), 2, 0);
1392 		ecore_dmae_host2grc(p_hwfn, p_ptt,
1393 				    (u64)(osal_uintptr_t)&sb_entry,
1394 				    CAU_REG_SB_VAR_MEMORY +
1395 				    igu_sb_id * sizeof(u64), 2, 0);
1396 	} else {
1397 		/* Initialize Status Block Address */
1398 		STORE_RT_REG_AGG(p_hwfn,
1399 				 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1400 				 igu_sb_id * 2, sb_phys);
1401 
1402 		STORE_RT_REG_AGG(p_hwfn,
1403 				 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1404 				 igu_sb_id * 2, sb_entry);
1405 	}
1406 
1407 	/* Configure pi coalescing if set */
1408 	if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1409 		/* eth will open queues for all tcs, so configure all of them
1410 		 * properly, rather than just the active ones
1411 		 */
1412 		u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1413 
1414 		u8 timeset, timer_res;
1415 		u8 i;
1416 
1417 		/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1418 		if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1419 			timer_res = 0;
1420 		else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1421 			timer_res = 1;
1422 		else
1423 			timer_res = 2;
1424 		timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1425 		ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1426 				      ECORE_COAL_RX_STATE_MACHINE, timeset);
1427 
1428 		if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1429 			timer_res = 0;
1430 		else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1431 			timer_res = 1;
1432 		else
1433 			timer_res = 2;
1434 		timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1435 		for (i = 0; i < num_tc; i++) {
1436 			ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1437 					      igu_sb_id, TX_PI(i),
1438 					      ECORE_COAL_TX_STATE_MACHINE,
1439 					      timeset);
1440 		}
1441 	}
1442 }
1443 
1444 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1445 			   struct ecore_ptt *p_ptt,
1446 			   u16 igu_sb_id, u32 pi_index,
1447 			   enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
1448 {
1449 	struct cau_pi_entry pi_entry;
1450 	u32 sb_offset, pi_offset;
1451 
1452 	if (IS_VF(p_hwfn->p_dev))
1453 		return;		/* @@@TBD MichalK- VF CAU... */
1454 
1455 	sb_offset = igu_sb_id * PIS_PER_SB;
1456 	OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1457 
1458 	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1459 	if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1460 		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1461 	else
1462 		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1463 
1464 	pi_offset = sb_offset + pi_index;
1465 	if (p_hwfn->hw_init_done) {
1466 		ecore_wr(p_hwfn, p_ptt,
1467 			 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1468 			 *((u32 *)&(pi_entry)));
1469 	} else {
1470 		STORE_RT_REG(p_hwfn,
1471 			     CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1472 			     *((u32 *)&(pi_entry)));
1473 	}
1474 }
1475 
1476 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1477 			struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1478 {
1479 	/* zero status block and ack counter */
1480 	sb_info->sb_ack = 0;
1481 	OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1482 
1483 	if (IS_PF(p_hwfn->p_dev))
1484 		ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1485 				      sb_info->igu_sb_id, 0, 0);
1486 }
1487 
1488 /**
1489  * @brief ecore_get_igu_sb_id - given a sw sb_id return the
1490  *        igu_sb_id
1491  *
1492  * @param p_hwfn
1493  * @param sb_id
1494  *
1495  * @return u16
1496  */
1497 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1498 {
1499 	u16 igu_sb_id;
1500 
1501 	/* Assuming continuous set of IGU SBs dedicated for given PF */
1502 	if (sb_id == ECORE_SP_SB_ID)
1503 		igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1504 	else if (IS_PF(p_hwfn->p_dev))
1505 		igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
1506 	else
1507 		igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1508 
1509 	if (sb_id == ECORE_SP_SB_ID)
1510 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1511 			   "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1512 	else
1513 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1514 			   "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1515 
1516 	return igu_sb_id;
1517 }
1518 
1519 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1520 				       struct ecore_ptt *p_ptt,
1521 				       struct ecore_sb_info *sb_info,
1522 				       void *sb_virt_addr,
1523 				       dma_addr_t sb_phy_addr, u16 sb_id)
1524 {
1525 	sb_info->sb_virt = sb_virt_addr;
1526 	sb_info->sb_phys = sb_phy_addr;
1527 
1528 	sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1529 
1530 	if (sb_id != ECORE_SP_SB_ID) {
1531 		p_hwfn->sbs_info[sb_id] = sb_info;
1532 		p_hwfn->num_sbs++;
1533 	}
1534 #ifdef ECORE_CONFIG_DIRECT_HWFN
1535 	sb_info->p_hwfn = p_hwfn;
1536 #endif
1537 	sb_info->p_dev = p_hwfn->p_dev;
1538 
1539 	/* The igu address will hold the absolute address that needs to be
1540 	 * written to for a specific status block
1541 	 */
1542 	if (IS_PF(p_hwfn->p_dev)) {
1543 		sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1544 		    GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1545 
1546 	} else {
1547 		sb_info->igu_addr =
1548 		    (u8 OSAL_IOMEM *)p_hwfn->regview +
1549 		    PXP_VF_BAR0_START_IGU +
1550 		    ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1551 	}
1552 
1553 	sb_info->flags |= ECORE_SB_INFO_INIT;
1554 
1555 	ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1556 
1557 	return ECORE_SUCCESS;
1558 }
1559 
1560 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1561 					  struct ecore_sb_info *sb_info,
1562 					  u16 sb_id)
1563 {
1564 	if (sb_id == ECORE_SP_SB_ID) {
1565 		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1566 		return ECORE_INVAL;
1567 	}
1568 
1569 	/* zero status block and ack counter */
1570 	sb_info->sb_ack = 0;
1571 	OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1572 
1573 	if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
1574 		p_hwfn->sbs_info[sb_id] = OSAL_NULL;
1575 		p_hwfn->num_sbs--;
1576 	}
1577 
1578 	return ECORE_SUCCESS;
1579 }
1580 
1581 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1582 {
1583 	struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1584 
1585 	if (!p_sb)
1586 		return;
1587 
1588 	if (p_sb->sb_info.sb_virt) {
1589 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1590 				       p_sb->sb_info.sb_virt,
1591 				       p_sb->sb_info.sb_phys,
1592 				       SB_ALIGNED_SIZE(p_hwfn));
1593 	}
1594 
1595 	OSAL_FREE(p_hwfn->p_dev, p_sb);
1596 }
1597 
1598 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1599 						  struct ecore_ptt *p_ptt)
1600 {
1601 	struct ecore_sb_sp_info *p_sb;
1602 	dma_addr_t p_phys = 0;
1603 	void *p_virt;
1604 
1605 	/* SB struct */
1606 	p_sb =
1607 	    OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1608 		       sizeof(*p_sb));
1609 	if (!p_sb) {
1610 		DP_NOTICE(p_hwfn, true,
1611 			  "Failed to allocate `struct ecore_sb_info'\n");
1612 		return ECORE_NOMEM;
1613 	}
1614 
1615 	/* SB ring  */
1616 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1617 					 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1618 	if (!p_virt) {
1619 		DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
1620 		OSAL_FREE(p_hwfn->p_dev, p_sb);
1621 		return ECORE_NOMEM;
1622 	}
1623 
1624 	/* Status Block setup */
1625 	p_hwfn->p_sp_sb = p_sb;
1626 	ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1627 			  p_virt, p_phys, ECORE_SP_SB_ID);
1628 
1629 	OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1630 
1631 	return ECORE_SUCCESS;
1632 }
1633 
1634 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1635 					   ecore_int_comp_cb_t comp_cb,
1636 					   void *cookie,
1637 					   u8 *sb_idx, __le16 **p_fw_cons)
1638 {
1639 	struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1640 	enum _ecore_status_t rc = ECORE_NOMEM;
1641 	u8 pi;
1642 
1643 	/* Look for a free index */
1644 	for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1645 		if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1646 			continue;
1647 
1648 		p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1649 		p_sp_sb->pi_info_arr[pi].cookie = cookie;
1650 		*sb_idx = pi;
1651 		*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1652 		rc = ECORE_SUCCESS;
1653 		break;
1654 	}
1655 
1656 	return rc;
1657 }
1658 
1659 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1660 {
1661 	struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1662 
1663 	if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1664 		return ECORE_NOMEM;
1665 
1666 	p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1667 	p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1668 	return ECORE_SUCCESS;
1669 }
1670 
1671 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1672 {
1673 	return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1674 }
1675 
1676 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1677 			      struct ecore_ptt *p_ptt,
1678 			      enum ecore_int_mode int_mode)
1679 {
1680 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1681 
1682 #ifndef ASIC_ONLY
1683 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1684 		DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1685 		igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1686 	}
1687 #endif
1688 
1689 	p_hwfn->p_dev->int_mode = int_mode;
1690 	switch (p_hwfn->p_dev->int_mode) {
1691 	case ECORE_INT_MODE_INTA:
1692 		igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1693 		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1694 		break;
1695 
1696 	case ECORE_INT_MODE_MSI:
1697 		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1698 		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1699 		break;
1700 
1701 	case ECORE_INT_MODE_MSIX:
1702 		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1703 		break;
1704 	case ECORE_INT_MODE_POLL:
1705 		break;
1706 	}
1707 
1708 	ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1709 }
1710 
1711 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1712 				      struct ecore_ptt *p_ptt)
1713 {
1714 #ifndef ASIC_ONLY
1715 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1716 		DP_INFO(p_hwfn,
1717 			"FPGA - Don't enable Attentions in IGU and MISC\n");
1718 		return;
1719 	}
1720 #endif
1721 
1722 	/* Configure AEU signal change to produce attentions */
1723 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1724 	ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1725 	ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1726 	ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1727 
1728 	/* Flush the writes to IGU */
1729 	OSAL_MMIOWB(p_hwfn->p_dev);
1730 
1731 	/* Unmask AEU signals toward IGU */
1732 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1733 }
1734 
1735 enum _ecore_status_t
1736 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1737 			  enum ecore_int_mode int_mode)
1738 {
1739 	enum _ecore_status_t rc = ECORE_SUCCESS;
1740 	u32 tmp;
1741 
1742 	/* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1743 	 * attentions. Since we're waiting for BRCM answer regarding this
1744 	 * attention, in the meanwhile we simply mask it.
1745 	 */
1746 	tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1747 	tmp &= ~0x800;
1748 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1749 
1750 	ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1751 
1752 	if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1753 		rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1754 		if (rc != ECORE_SUCCESS) {
1755 			DP_NOTICE(p_hwfn, true,
1756 				  "Slowpath IRQ request failed\n");
1757 			return ECORE_NORESOURCES;
1758 		}
1759 		p_hwfn->b_int_requested = true;
1760 	}
1761 
1762 	/* Enable interrupt Generation */
1763 	ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1764 
1765 	p_hwfn->b_int_enabled = 1;
1766 
1767 	return rc;
1768 }
1769 
1770 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1771 			       struct ecore_ptt *p_ptt)
1772 {
1773 	p_hwfn->b_int_enabled = 0;
1774 
1775 	if (IS_VF(p_hwfn->p_dev))
1776 		return;
1777 
1778 	ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1779 }
1780 
1781 #define IGU_CLEANUP_SLEEP_LENGTH		(1000)
1782 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1783 			      struct ecore_ptt *p_ptt,
1784 			      u32 sb_id, bool cleanup_set, u16 opaque_fid)
1785 {
1786 	u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1787 	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
1788 	u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1789 	u8 type = 0;		/* FIXME MichalS type??? */
1790 
1791 	OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1792 			   IGU_REG_CLEANUP_STATUS_0) != 0x200);
1793 
1794 	/* USE Control Command Register to perform cleanup. There is an
1795 	 * option to do this using IGU bar, but then it can't be used for VFs.
1796 	 */
1797 
1798 	/* Set the data field */
1799 	SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1800 	SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1801 	SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1802 
1803 	/* Set the control register */
1804 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1805 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1806 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1807 
1808 	ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1809 
1810 	OSAL_BARRIER(p_hwfn->p_dev);
1811 
1812 	ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1813 
1814 	/* Flush the write to IGU */
1815 	OSAL_MMIOWB(p_hwfn->p_dev);
1816 
1817 	/* calculate where to read the status bit from */
1818 	sb_bit = 1 << (sb_id % 32);
1819 	sb_bit_addr = sb_id / 32 * sizeof(u32);
1820 
1821 	sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1822 
1823 	/* Now wait for the command to complete */
1824 	while (--sleep_cnt) {
1825 		val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1826 		if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1827 			break;
1828 		OSAL_MSLEEP(5);
1829 	}
1830 
1831 	if (!sleep_cnt)
1832 		DP_NOTICE(p_hwfn, true,
1833 			  "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1834 			  val, sb_id);
1835 }
1836 
1837 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1838 				       struct ecore_ptt *p_ptt,
1839 				       u32 sb_id, u16 opaque, bool b_set)
1840 {
1841 	int pi, i;
1842 
1843 	/* Set */
1844 	if (b_set)
1845 		ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
1846 
1847 	/* Clear */
1848 	ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
1849 
1850 	/* Wait for the IGU SB to cleanup */
1851 	for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1852 		u32 val;
1853 
1854 		val = ecore_rd(p_hwfn, p_ptt,
1855 			       IGU_REG_WRITE_DONE_PENDING +
1856 			       ((sb_id / 32) * 4));
1857 		if (val & (1 << (sb_id % 32)))
1858 			OSAL_UDELAY(10);
1859 		else
1860 			break;
1861 	}
1862 	if (i == IGU_CLEANUP_SLEEP_LENGTH)
1863 		DP_NOTICE(p_hwfn, true,
1864 			  "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1865 			  sb_id);
1866 
1867 	/* Clear the CAU for the SB */
1868 	for (pi = 0; pi < 12; pi++)
1869 		ecore_wr(p_hwfn, p_ptt,
1870 			 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
1871 }
1872 
1873 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1874 				struct ecore_ptt *p_ptt,
1875 				bool b_set, bool b_slowpath)
1876 {
1877 	u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
1878 	u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
1879 	u32 sb_id = 0, val = 0;
1880 
1881 	/* @@@TBD MichalK temporary... should be moved to init-tool... */
1882 	val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1883 	val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1884 	val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1885 	ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1886 	/* end temporary */
1887 
1888 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1889 		   "IGU cleaning SBs [%d,...,%d]\n",
1890 		   igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
1891 
1892 	for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
1893 		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1894 						  p_hwfn->hw_info.opaque_fid,
1895 						  b_set);
1896 
1897 	if (!b_slowpath)
1898 		return;
1899 
1900 	sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1901 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1902 		   "IGU cleaning slowpath SB [%d]\n", sb_id);
1903 	ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1904 					  p_hwfn->hw_info.opaque_fid, b_set);
1905 }
1906 
1907 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
1908 					struct ecore_ptt *p_ptt, u16 sb_id)
1909 {
1910 	u32 val = ecore_rd(p_hwfn, p_ptt,
1911 			   IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
1912 	struct ecore_igu_block *p_block;
1913 
1914 	p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
1915 
1916 	/* stop scanning when hit first invalid PF entry */
1917 	if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1918 	    GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1919 		goto out;
1920 
1921 	/* Fill the block information */
1922 	p_block->status = ECORE_IGU_STATUS_VALID;
1923 	p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
1924 	p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
1925 	p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
1926 
1927 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1928 		   "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
1929 		   " is_pf = %d vector_num = 0x%x\n",
1930 		   sb_id, val, p_block->function_id, p_block->is_pf,
1931 		   p_block->vector_number);
1932 
1933 out:
1934 	return val;
1935 }
1936 
1937 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
1938 					    struct ecore_ptt *p_ptt)
1939 {
1940 	struct ecore_igu_info *p_igu_info;
1941 	struct ecore_igu_block *p_block;
1942 	u32 min_vf = 0, max_vf = 0, val;
1943 	u16 sb_id, last_iov_sb_id = 0;
1944 	u16 prev_sb_id = 0xFF;
1945 
1946 	p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
1947 						GFP_KERNEL,
1948 						sizeof(*p_igu_info));
1949 	if (!p_hwfn->hw_info.p_igu_info)
1950 		return ECORE_NOMEM;
1951 
1952 	OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
1953 
1954 	p_igu_info = p_hwfn->hw_info.p_igu_info;
1955 
1956 	/* Initialize base sb / sb cnt for PFs and VFs */
1957 	p_igu_info->igu_base_sb = 0xffff;
1958 	p_igu_info->igu_sb_cnt = 0;
1959 	p_igu_info->igu_dsb_id = 0xffff;
1960 	p_igu_info->igu_base_sb_iov = 0xffff;
1961 
1962 	if (p_hwfn->p_dev->p_iov_info) {
1963 		struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
1964 
1965 		min_vf = p_iov->first_vf_in_pf;
1966 		max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
1967 	}
1968 	for (sb_id = 0;
1969 	     sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1970 	     sb_id++) {
1971 		p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
1972 		val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
1973 		if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1974 		    GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1975 			break;
1976 
1977 		if (p_block->is_pf) {
1978 			if (p_block->function_id == p_hwfn->rel_pf_id) {
1979 				p_block->status |= ECORE_IGU_STATUS_PF;
1980 
1981 				if (p_block->vector_number == 0) {
1982 					if (p_igu_info->igu_dsb_id == 0xffff)
1983 						p_igu_info->igu_dsb_id = sb_id;
1984 				} else {
1985 					if (p_igu_info->igu_base_sb == 0xffff) {
1986 						p_igu_info->igu_base_sb = sb_id;
1987 					} else if (prev_sb_id != sb_id - 1) {
1988 						DP_NOTICE(p_hwfn->p_dev, false,
1989 							  "consecutive igu"
1990 							  " vectors for HWFN"
1991 							  " %x broken",
1992 							  p_hwfn->rel_pf_id);
1993 						break;
1994 					}
1995 					prev_sb_id = sb_id;
1996 					/* we don't count the default */
1997 					(p_igu_info->igu_sb_cnt)++;
1998 				}
1999 			}
2000 		} else {
2001 			if ((p_block->function_id >= min_vf) &&
2002 			    (p_block->function_id < max_vf)) {
2003 				/* Available for VFs of this PF */
2004 				if (p_igu_info->igu_base_sb_iov == 0xffff) {
2005 					p_igu_info->igu_base_sb_iov = sb_id;
2006 				} else if (last_iov_sb_id != sb_id - 1) {
2007 					if (!val)
2008 						DP_VERBOSE(p_hwfn->p_dev,
2009 							   ECORE_MSG_INTR,
2010 							   "First uninited IGU"
2011 							   " CAM entry at"
2012 							   " index 0x%04x\n",
2013 							   sb_id);
2014 					else
2015 						DP_NOTICE(p_hwfn->p_dev, false,
2016 							  "Consecutive igu"
2017 							  " vectors for HWFN"
2018 							  " %x vfs is broken"
2019 							  " [jumps from %04x"
2020 							  " to %04x]\n",
2021 							  p_hwfn->rel_pf_id,
2022 							  last_iov_sb_id,
2023 							  sb_id);
2024 					break;
2025 				}
2026 				p_block->status |= ECORE_IGU_STATUS_FREE;
2027 				p_hwfn->hw_info.p_igu_info->free_blks++;
2028 				last_iov_sb_id = sb_id;
2029 			}
2030 		}
2031 	}
2032 
2033 	/* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
2034 	 * the number of VF SBs [especially for first VF on engine, as we can't
2035 	 * diffrentiate between empty entries and its entries].
2036 	 * Since we don't really support more SBs than VFs today, prevent any
2037 	 * such configuration by sanitizing the number of SBs to equal the
2038 	 * number of VFs.
2039 	 */
2040 	if (IS_PF_SRIOV(p_hwfn)) {
2041 		u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2042 
2043 		if (total_vfs < p_igu_info->free_blks) {
2044 			DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2045 				   "Limiting number of SBs for IOV - %04x --> %04x\n",
2046 				   p_igu_info->free_blks,
2047 				   p_hwfn->p_dev->p_iov_info->total_vfs);
2048 			p_igu_info->free_blks = total_vfs;
2049 		} else if (total_vfs > p_igu_info->free_blks) {
2050 			DP_NOTICE(p_hwfn, true,
2051 				  "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
2052 				  p_igu_info->free_blks, total_vfs);
2053 			return ECORE_INVAL;
2054 		}
2055 	}
2056 
2057 	p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
2058 
2059 	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2060 		   "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
2061 		   "igu_dsb_id=0x%x\n",
2062 		   p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
2063 		   p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
2064 		   p_igu_info->igu_dsb_id);
2065 
2066 	if (p_igu_info->igu_base_sb == 0xffff ||
2067 	    p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
2068 		DP_NOTICE(p_hwfn, true,
2069 			  "IGU CAM returned invalid values igu_base_sb=0x%x "
2070 			  "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
2071 			  p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
2072 			  p_igu_info->igu_dsb_id);
2073 		return ECORE_INVAL;
2074 	}
2075 
2076 	return ECORE_SUCCESS;
2077 }
2078 
2079 /**
2080  * @brief Initialize igu runtime registers
2081  *
2082  * @param p_hwfn
2083  */
2084 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2085 {
2086 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2087 
2088 	STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2089 }
2090 
2091 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2092 			  IGU_CMD_INT_ACK_BASE)
2093 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2094 			  IGU_CMD_INT_ACK_BASE)
2095 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2096 {
2097 	u32 intr_status_hi = 0, intr_status_lo = 0;
2098 	u64 intr_status = 0;
2099 
2100 	intr_status_lo = REG_RD(p_hwfn,
2101 				GTT_BAR0_MAP_REG_IGU_CMD +
2102 				LSB_IGU_CMD_ADDR * 8);
2103 	intr_status_hi = REG_RD(p_hwfn,
2104 				GTT_BAR0_MAP_REG_IGU_CMD +
2105 				MSB_IGU_CMD_ADDR * 8);
2106 	intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2107 
2108 	return intr_status;
2109 }
2110 
2111 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2112 {
2113 	OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2114 	p_hwfn->b_sp_dpc_enabled = true;
2115 }
2116 
2117 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2118 {
2119 	p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2120 	if (!p_hwfn->sp_dpc)
2121 		return ECORE_NOMEM;
2122 
2123 	return ECORE_SUCCESS;
2124 }
2125 
2126 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2127 {
2128 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2129 }
2130 
2131 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2132 				     struct ecore_ptt *p_ptt)
2133 {
2134 	enum _ecore_status_t rc = ECORE_SUCCESS;
2135 
2136 	rc = ecore_int_sp_dpc_alloc(p_hwfn);
2137 	if (rc != ECORE_SUCCESS) {
2138 		DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2139 		return rc;
2140 	}
2141 
2142 	rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2143 	if (rc != ECORE_SUCCESS) {
2144 		DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2145 		return rc;
2146 	}
2147 
2148 	rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2149 	if (rc != ECORE_SUCCESS)
2150 		DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2151 
2152 	return rc;
2153 }
2154 
2155 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2156 {
2157 	ecore_int_sp_sb_free(p_hwfn);
2158 	ecore_int_sb_attn_free(p_hwfn);
2159 	ecore_int_sp_dpc_free(p_hwfn);
2160 }
2161 
2162 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2163 {
2164 	if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2165 		return;
2166 
2167 	ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2168 	ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2169 	ecore_int_sp_dpc_setup(p_hwfn);
2170 }
2171 
2172 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2173 			   struct ecore_sb_cnt_info *p_sb_cnt_info)
2174 {
2175 	struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
2176 
2177 	if (!info || !p_sb_cnt_info)
2178 		return;
2179 
2180 	p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
2181 	p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
2182 	p_sb_cnt_info->sb_free_blk = info->free_blks;
2183 }
2184 
2185 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2186 {
2187 	int i;
2188 
2189 	for_each_hwfn(p_dev, i)
2190 		p_dev->hwfns[i].b_int_requested = false;
2191 }
2192 
2193 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2194 {
2195 	p_dev->attn_clr_en = clr_enable;
2196 }
2197 
2198 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2199 					     struct ecore_ptt *p_ptt,
2200 					     u8 timer_res, u16 sb_id, bool tx)
2201 {
2202 	struct cau_sb_entry sb_entry;
2203 	enum _ecore_status_t rc;
2204 
2205 	if (!p_hwfn->hw_init_done) {
2206 		DP_ERR(p_hwfn, "hardware not initialized yet\n");
2207 		return ECORE_INVAL;
2208 	}
2209 
2210 	rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2211 				 sb_id * sizeof(u64),
2212 				 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2213 	if (rc != ECORE_SUCCESS) {
2214 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2215 		return rc;
2216 	}
2217 
2218 	if (tx)
2219 		SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2220 	else
2221 		SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2222 
2223 	rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2224 				 (u64)(osal_uintptr_t)&sb_entry,
2225 				 CAU_REG_SB_VAR_MEMORY +
2226 				 sb_id * sizeof(u64), 2, 0);
2227 	if (rc != ECORE_SUCCESS) {
2228 		DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2229 		return rc;
2230 	}
2231 
2232 	return rc;
2233 }
2234 
2235 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2236 					  struct ecore_ptt *p_ptt,
2237 					  struct ecore_sb_info *p_sb,
2238 					  struct ecore_sb_info_dbg *p_info)
2239 {
2240 	u16 sbid = p_sb->igu_sb_id;
2241 	int i;
2242 
2243 	if (IS_VF(p_hwfn->p_dev))
2244 		return ECORE_INVAL;
2245 
2246 	if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2247 		return ECORE_INVAL;
2248 
2249 	p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2250 				    IGU_REG_PRODUCER_MEMORY + sbid * 4);
2251 	p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2252 				    IGU_REG_CONSUMER_MEM + sbid * 4);
2253 
2254 	for (i = 0; i < PIS_PER_SB; i++)
2255 		p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2256 					      CAU_REG_PI_MEMORY +
2257 					      sbid * 4 * PIS_PER_SB +  i * 4);
2258 
2259 	return ECORE_SUCCESS;
2260 }
2261