xref: /dpdk/drivers/net/bnx2x/ecore_init.h (revision 0cb4150f82ff77e1c74826ae425f36388d9172fb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2007-2013 Broadcom Corporation.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9  * Copyright (c) 2015-2018 Cavium Inc.
10  * All rights reserved.
11  * www.cavium.com
12  */
13 
14 #ifndef ECORE_INIT_H
15 #define ECORE_INIT_H
16 
17 /* Init operation types and structures */
18 enum {
19 	OP_RD = 0x1,	/* read a single register */
20 	OP_WR,		/* write a single register */
21 	OP_SW,		/* copy a string to the device */
22 	OP_ZR,		/* clear memory */
23 	OP_ZP,		/* unzip then copy with DMAE */
24 	OP_WR_64,	/* write 64 bit pattern */
25 	OP_WB,		/* copy a string using DMAE */
26 	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
27 	OP_IF_MODE_OR,  /* Skip the following ops if all init modes don't match */
28 	OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */
29 	OP_MAX
30 };
31 
32 enum {
33 	STAGE_START,
34 	STAGE_END,
35 };
36 
37 /* Returns the index of start or end of a specific block stage in ops array*/
38 #define BLOCK_OPS_IDX(block, stage, end) \
39 	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
40 
41 
42 /* structs for the various opcodes */
43 struct raw_op {
44 	uint32_t op:8;
45 	uint32_t offset:24;
46 	uint32_t raw_data;
47 };
48 
49 struct op_read {
50 	uint32_t op:8;
51 	uint32_t offset:24;
52 	uint32_t val;
53 };
54 
55 struct op_write {
56 	uint32_t op:8;
57 	uint32_t offset:24;
58 	uint32_t val;
59 };
60 
61 struct op_arr_write {
62 	uint32_t op:8;
63 	uint32_t offset:24;
64 #ifdef __BIG_ENDIAN
65 	uint16_t data_len;
66 	uint16_t data_off;
67 #else /* __LITTLE_ENDIAN */
68 	uint16_t data_off;
69 	uint16_t data_len;
70 #endif
71 };
72 
73 struct op_zero {
74 	uint32_t op:8;
75 	uint32_t offset:24;
76 	uint32_t len;
77 };
78 
79 struct op_if_mode {
80 	uint32_t op:8;
81 	uint32_t cmd_offset:24;
82 	uint32_t mode_bit_map;
83 };
84 
85 
86 union init_op {
87 	struct op_read		read;
88 	struct op_write		write;
89 	struct op_arr_write	arr_wr;
90 	struct op_zero		zero;
91 	struct raw_op		raw;
92 	struct op_if_mode	if_mode;
93 };
94 
95 
96 /* Init Phases */
97 enum {
98 	PHASE_COMMON,
99 	PHASE_PORT0,
100 	PHASE_PORT1,
101 	PHASE_PF0,
102 	PHASE_PF1,
103 	PHASE_PF2,
104 	PHASE_PF3,
105 	PHASE_PF4,
106 	PHASE_PF5,
107 	PHASE_PF6,
108 	PHASE_PF7,
109 	NUM_OF_INIT_PHASES
110 };
111 
112 /* Init Modes */
113 enum {
114 	MODE_ASIC                      = 0x00000001,
115 	MODE_FPGA                      = 0x00000002,
116 	MODE_EMUL                      = 0x00000004,
117 	MODE_E2                        = 0x00000008,
118 	MODE_E3                        = 0x00000010,
119 	MODE_PORT2                     = 0x00000020,
120 	MODE_PORT4                     = 0x00000040,
121 	MODE_SF                        = 0x00000080,
122 	MODE_MF                        = 0x00000100,
123 	MODE_MF_SD                     = 0x00000200,
124 	MODE_MF_SI                     = 0x00000400,
125 	MODE_MF_AFEX                   = 0x00000800,
126 	MODE_E3_A0                     = 0x00001000,
127 	MODE_E3_B0                     = 0x00002000,
128 	MODE_COS3                      = 0x00004000,
129 	MODE_COS6                      = 0x00008000,
130 	MODE_LITTLE_ENDIAN             = 0x00010000,
131 	MODE_BIG_ENDIAN                = 0x00020000,
132 };
133 
134 /* Init Blocks */
135 enum {
136 	BLOCK_ATC,
137 	BLOCK_BRB1,
138 	BLOCK_CCM,
139 	BLOCK_CDU,
140 	BLOCK_CFC,
141 	BLOCK_CSDM,
142 	BLOCK_CSEM,
143 	BLOCK_DBG,
144 	BLOCK_DMAE,
145 	BLOCK_DORQ,
146 	BLOCK_HC,
147 	BLOCK_IGU,
148 	BLOCK_MISC,
149 	BLOCK_NIG,
150 	BLOCK_PBF,
151 	BLOCK_PGLUE_B,
152 	BLOCK_PRS,
153 	BLOCK_PXP2,
154 	BLOCK_PXP,
155 	BLOCK_QM,
156 	BLOCK_SRC,
157 	BLOCK_TCM,
158 	BLOCK_TM,
159 	BLOCK_TSDM,
160 	BLOCK_TSEM,
161 	BLOCK_UCM,
162 	BLOCK_UPB,
163 	BLOCK_USDM,
164 	BLOCK_USEM,
165 	BLOCK_XCM,
166 	BLOCK_XPB,
167 	BLOCK_XSDM,
168 	BLOCK_XSEM,
169 	BLOCK_MISC_AEU,
170 	NUM_OF_INIT_BLOCKS
171 };
172 
173 #include "bnx2x.h"
174 
175 /* Vnics per mode */
176 #define ECORE_PORT2_MODE_NUM_VNICS 4
177 
178 
179 /* QM queue numbers */
180 #define ECORE_ETH_Q		0
181 #define ECORE_TOE_Q		3
182 #define ECORE_TOE_ACK_Q		6
183 #define ECORE_ISCSI_Q		9
184 #define ECORE_ISCSI_ACK_Q	11
185 #define ECORE_FCOE_Q		10
186 
187 /* Vnics per mode */
188 #define ECORE_PORT4_MODE_NUM_VNICS 2
189 
190 /* COS offset for port1 in E3 B0 4port mode */
191 #define ECORE_E3B0_PORT1_COS_OFFSET 3
192 
193 /* QM Register addresses */
194 #define ECORE_Q_VOQ_REG_ADDR(pf_q_num)\
195 	(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
196 #define ECORE_VOQ_Q_REG_ADDR(cos, pf_q_num)\
197 	(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
198 #define ECORE_Q_CMDQ_REG_ADDR(pf_q_num)\
199 	(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
200 
201 /* extracts the QM queue number for the specified port and vnic */
202 #define ECORE_PF_Q_NUM(q_num, port, vnic)\
203 	((((port) << 1) | (vnic)) * 16 + (q_num))
204 
205 
206 /* Maps the specified queue to the specified COS */
ecore_map_q_cos(struct bnx2x_softc * sc,uint32_t q_num,uint32_t new_cos)207 static inline void ecore_map_q_cos(struct bnx2x_softc *sc, uint32_t q_num, uint32_t new_cos)
208 {
209 	/* find current COS mapping */
210 	uint32_t curr_cos = REG_RD(sc, QM_REG_QVOQIDX_0 + q_num * 4);
211 
212 	/* check if queue->COS mapping has changed */
213 	if (curr_cos != new_cos) {
214 		uint32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS;
215 		uint32_t reg_addr, reg_bit_map, vnic;
216 
217 		/* update parameters for 4port mode */
218 		if (INIT_MODE_FLAGS(sc) & MODE_PORT4) {
219 			num_vnics = ECORE_PORT4_MODE_NUM_VNICS;
220 			if (SC_PORT(sc)) {
221 				curr_cos += ECORE_E3B0_PORT1_COS_OFFSET;
222 				new_cos += ECORE_E3B0_PORT1_COS_OFFSET;
223 			}
224 		}
225 
226 		/* change queue mapping for each VNIC */
227 		for (vnic = 0; vnic < num_vnics; vnic++) {
228 			uint32_t pf_q_num =
229 				ECORE_PF_Q_NUM(q_num, SC_PORT(sc), vnic);
230 			uint32_t q_bit_map = 1 << (pf_q_num & 0x1f);
231 
232 			/* overwrite queue->VOQ mapping */
233 			REG_WR(sc, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
234 
235 			/* clear queue bit from current COS bit map */
236 			reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
237 			reg_bit_map = REG_RD(sc, reg_addr);
238 			REG_WR(sc, reg_addr, reg_bit_map & (~q_bit_map));
239 
240 			/* set queue bit in new COS bit map */
241 			reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
242 			reg_bit_map = REG_RD(sc, reg_addr);
243 			REG_WR(sc, reg_addr, reg_bit_map | q_bit_map);
244 
245 			/* set/clear queue bit in command-queue bit map
246 			(E2/E3A0 only, valid COS values are 0/1) */
247 			if (!(INIT_MODE_FLAGS(sc) & MODE_E3_B0)) {
248 				reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num);
249 				reg_bit_map = REG_RD(sc, reg_addr);
250 				q_bit_map = 1 << (2 * (pf_q_num & 0xf));
251 				reg_bit_map = new_cos ?
252 					      (reg_bit_map | q_bit_map) :
253 					      (reg_bit_map & (~q_bit_map));
254 				REG_WR(sc, reg_addr, reg_bit_map);
255 			}
256 		}
257 	}
258 }
259 
260 /* Configures the QM according to the specified per-traffic-type COSes */
ecore_dcb_config_qm(struct bnx2x_softc * sc,enum cos_mode mode,struct priority_cos * traffic_cos)261 static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mode,
262 				       struct priority_cos *traffic_cos)
263 {
264 	ecore_map_q_cos(sc, ECORE_FCOE_Q,
265 			traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
266 	ecore_map_q_cos(sc, ECORE_ISCSI_Q,
267 			traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
268 	ecore_map_q_cos(sc, ECORE_ISCSI_ACK_Q,
269 		traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
270 	if (mode != STATIC_COS) {
271 		/* required only in OVERRIDE_COS mode */
272 		ecore_map_q_cos(sc, ECORE_ETH_Q,
273 				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
274 		ecore_map_q_cos(sc, ECORE_TOE_Q,
275 				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
276 		ecore_map_q_cos(sc, ECORE_TOE_ACK_Q,
277 				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
278 	}
279 }
280 
281 
282 /*
283  * congestion management port init api description
284  * the api works as follows:
285  * the driver should pass the cmng_init_input struct, the port_init function
286  * will prepare the required internal ram structure which will be passed back
287  * to the driver (cmng_init) that will write it into the internal ram.
288  *
289  * IMPORTANT REMARKS:
290  * 1. the cmng_init struct does not represent the contiguous internal ram
291  *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
292  *    offset in order to write the port sub struct and the
293  *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
294  *    words - don't use memcpy!).
295  * 2. although the cmng_init struct is filled for the maximal vnic number
296  *    possible, the driver should only write the valid vnics into the internal
297  *    ram according to the appropriate port mode.
298  */
299 #define BITS_TO_BYTES(x) ((x)/8)
300 
301 /* CMNG constants, as derived from system spec calculations */
302 
303 /* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
304 #define DEF_MIN_RATE 100
305 
306 /* resolution of the rate shaping timer - 400 usec */
307 #define RS_PERIODIC_TIMEOUT_USEC 400
308 
309 /*
310  *  number of bytes in single QM arbitration cycle -
311  *  coefficient for calculating the fairness timer
312  */
313 #define QM_ARB_BYTES 160000
314 
315 /* resolution of Min algorithm 1:100 */
316 #define MIN_RES 100
317 
318 /*
319  *  how many bytes above threshold for
320  *  the minimal credit of Min algorithm
321  */
322 #define MIN_ABOVE_THRESH 32768
323 
324 /*
325  *  Fairness algorithm integration time coefficient -
326  *  for calculating the actual Tfair
327  */
328 #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
329 
330 /* Memory of fairness algorithm - 2 cycles */
331 #define FAIR_MEM 2
332 #define SAFC_TIMEOUT_USEC 52
333 
334 #define SDM_TICKS 4
335 
336 
ecore_init_max(const struct cmng_init_input * input_data,uint32_t r_param,struct cmng_init * ram_data)337 static inline void ecore_init_max(const struct cmng_init_input *input_data,
338 				  uint32_t r_param, struct cmng_init *ram_data)
339 {
340 	uint32_t vnic;
341 	struct cmng_vnic *vdata = &ram_data->vnic;
342 	struct cmng_struct_per_port *pdata = &ram_data->port;
343 	/*
344 	 * rate shaping per-port variables
345 	 *  100 micro seconds in SDM ticks = 25
346 	 *  since each tick is 4 microSeconds
347 	 */
348 
349 	pdata->rs_vars.rs_periodic_timeout =
350 	RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
351 
352 	/* this is the threshold below which no timer arming will occur.
353 	 *  1.25 coefficient is for the threshold to be a little bigger
354 	 *  then the real time to compensate for timer in-accuracy
355 	 */
356 	pdata->rs_vars.rs_threshold =
357 	(5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
358 
359 	/* rate shaping per-vnic variables */
360 	for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
361 		/* global vnic counter */
362 		vdata->vnic_max_rate[vnic].vn_counter.rate =
363 		input_data->vnic_max_rate[vnic];
364 		/*
365 		 * maximal Mbps for this vnic
366 		 * the quota in each timer period - number of bytes
367 		 * transmitted in this period
368 		 */
369 		vdata->vnic_max_rate[vnic].vn_counter.quota =
370 			RS_PERIODIC_TIMEOUT_USEC *
371 			(uint32_t)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
372 	}
373 
374 }
375 
ecore_init_max_per_vn(uint16_t vnic_max_rate,struct rate_shaping_vars_per_vn * ram_data)376 static inline void ecore_init_max_per_vn(uint16_t vnic_max_rate,
377 				  struct rate_shaping_vars_per_vn *ram_data)
378 {
379 	/* global vnic counter */
380 	ram_data->vn_counter.rate = vnic_max_rate;
381 
382 	/*
383 	* maximal Mbps for this vnic
384 	* the quota in each timer period - number of bytes
385 	* transmitted in this period
386 	*/
387 	ram_data->vn_counter.quota =
388 		RS_PERIODIC_TIMEOUT_USEC * (uint32_t)vnic_max_rate / 8;
389 }
390 
ecore_init_min(const struct cmng_init_input * input_data,uint32_t r_param,struct cmng_init * ram_data)391 static inline void ecore_init_min(const struct cmng_init_input *input_data,
392 				  uint32_t r_param, struct cmng_init *ram_data)
393 {
394 	uint32_t vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
395 	struct cmng_vnic *vdata = &ram_data->vnic;
396 	struct cmng_struct_per_port *pdata = &ram_data->port;
397 
398 	/* this is the resolution of the fairness timer */
399 	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
400 
401 	/*
402 	 * fairness per-port variables
403 	 * for 10G it is 1000usec. for 1G it is 10000usec.
404 	 */
405 	tFair = T_FAIR_COEF / input_data->port_rate;
406 
407 	/* this is the threshold below which we won't arm the timer anymore */
408 	pdata->fair_vars.fair_threshold = QM_ARB_BYTES +
409 					  input_data->fairness_thr;
410 
411 	/*New limitation - minimal packet size to cause timeout to be armed */
412 	pdata->fair_vars.size_thr = input_data->size_thr;
413 
414 	/*
415 	 *  we multiply by 1e3/8 to get bytes/msec. We don't want the credits
416 	 *  to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
417 	 */
418 	pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
419 
420 	/* since each tick is 4 microSeconds */
421 	pdata->fair_vars.fairness_timeout =
422 				fair_periodic_timeout_usec / SDM_TICKS;
423 
424 	/* calculate sum of weights */
425 	vnicWeightSum = 0;
426 
427 	for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++)
428 		vnicWeightSum += input_data->vnic_min_rate[vnic];
429 
430 	/* global vnic counter */
431 	if (vnicWeightSum > 0) {
432 		/* fairness per-vnic variables */
433 		for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
434 			/*
435 			 *  this is the credit for each period of the fairness
436 			 *  algorithm - number of bytes in T_FAIR (this vnic
437 			 *  share of the port rate)
438 			 */
439 			vdata->vnic_min_rate[vnic].vn_credit_delta =
440 				((uint32_t)(input_data->vnic_min_rate[vnic]) * 100 *
441 				(T_FAIR_COEF / (8 * 100 * vnicWeightSum)));
442 			if (vdata->vnic_min_rate[vnic].vn_credit_delta <
443 			    pdata->fair_vars.fair_threshold +
444 			    MIN_ABOVE_THRESH) {
445 				vdata->vnic_min_rate[vnic].vn_credit_delta =
446 					pdata->fair_vars.fair_threshold +
447 					MIN_ABOVE_THRESH;
448 			}
449 		}
450 	}
451 }
452 
ecore_init_fw_wrr(const struct cmng_init_input * input_data,uint32_t r_param __rte_unused,struct cmng_init * ram_data)453 static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data,
454 				     uint32_t r_param __rte_unused,
455 				     struct cmng_init *ram_data)
456 {
457 	uint32_t vnic, cos;
458 	uint32_t cosWeightSum = 0;
459 	struct cmng_vnic *vdata = &ram_data->vnic;
460 	struct cmng_struct_per_port *pdata = &ram_data->port;
461 
462 	for (cos = 0; cos < MAX_COS_NUMBER; cos++)
463 		cosWeightSum += input_data->cos_min_rate[cos];
464 
465 	if (cosWeightSum > 0) {
466 
467 		for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
468 			/*
469 			 *  Since cos and vnic shouldn't work together the rate
470 			 *  to divide between the coses is the port rate.
471 			 */
472 			uint32_t *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
473 			for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
474 				/*
475 				 * this is the credit for each period of
476 				 * the fairness algorithm - number of bytes
477 				 * in T_FAIR (this cos share of the vnic rate)
478 				 */
479 				ccd[cos] =
480 				    ((uint32_t)input_data->cos_min_rate[cos] * 100 *
481 				    (T_FAIR_COEF / (8 * 100 * cosWeightSum)));
482 				 if (ccd[cos] < pdata->fair_vars.fair_threshold
483 						+ MIN_ABOVE_THRESH) {
484 					ccd[cos] =
485 					    pdata->fair_vars.fair_threshold +
486 					    MIN_ABOVE_THRESH;
487 				}
488 			}
489 		}
490 	}
491 }
492 
493 static inline void
ecore_init_safc(const struct cmng_init_input * input_data __rte_unused,struct cmng_init * ram_data)494 ecore_init_safc(const struct cmng_init_input *input_data __rte_unused,
495 		struct cmng_init *ram_data)
496 {
497 	/* in microSeconds */
498 	ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
499 }
500 
501 /* Congestion management port init */
ecore_init_cmng(const struct cmng_init_input * input_data,struct cmng_init * ram_data)502 static inline void ecore_init_cmng(const struct cmng_init_input *input_data,
503 				   struct cmng_init *ram_data)
504 {
505 	uint32_t r_param;
506 	ECORE_MEMSET(ram_data, 0, sizeof(struct cmng_init));
507 
508 	ram_data->port.flags = input_data->flags;
509 
510 	/*
511 	 *  number of bytes transmitted in a rate of 10Gbps
512 	 *  in one usec = 1.25KB.
513 	 */
514 	r_param = BITS_TO_BYTES(input_data->port_rate);
515 	ecore_init_max(input_data, r_param, ram_data);
516 	ecore_init_min(input_data, r_param, ram_data);
517 	ecore_init_fw_wrr(input_data, r_param, ram_data);
518 	ecore_init_safc(input_data, ram_data);
519 }
520 
521 
522 
523 
524 /* Returns the index of start or end of a specific block stage in ops array*/
525 #define BLOCK_OPS_IDX(block, stage, end) \
526 			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
527 
528 
529 #define INITOP_SET		0	/* set the HW directly */
530 #define INITOP_CLEAR		1	/* clear the HW directly */
531 #define INITOP_INIT		2	/* set the init-value array */
532 
533 /****************************************************************************
534 * ILT management
535 ****************************************************************************/
536 struct ilt_line {
537 	ecore_dma_addr_t page_mapping;
538 	void *page;
539 	uint32_t size;
540 };
541 
542 struct ilt_client_info {
543 	uint32_t page_size;
544 	uint16_t start;
545 	uint16_t end;
546 	uint16_t client_num;
547 	uint16_t flags;
548 #define ILT_CLIENT_SKIP_INIT	0x1
549 #define ILT_CLIENT_SKIP_MEM	0x2
550 };
551 
552 struct ecore_ilt {
553 	uint32_t start_line;
554 	struct ilt_line		*lines;
555 	struct ilt_client_info	clients[4];
556 #define ILT_CLIENT_CDU	0
557 #define ILT_CLIENT_QM	1
558 #define ILT_CLIENT_SRC	2
559 #define ILT_CLIENT_TM	3
560 };
561 
562 /****************************************************************************
563 * SRC configuration
564 ****************************************************************************/
565 struct src_ent {
566 	uint8_t opaque[56];
567 	uint64_t next;
568 };
569 
570 /****************************************************************************
571 * Parity configuration
572 ****************************************************************************/
573 #define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
574 { \
575 	block##_REG_##block##_PRTY_MASK, \
576 	block##_REG_##block##_PRTY_STS_CLR, \
577 	en_mask, {m1, m1h, m2, m3}, #block \
578 }
579 
580 #define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
581 { \
582 	block##_REG_##block##_PRTY_MASK_0, \
583 	block##_REG_##block##_PRTY_STS_CLR_0, \
584 	en_mask, {m1, m1h, m2, m3}, #block "_0" \
585 }
586 
587 #define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
588 { \
589 	block##_REG_##block##_PRTY_MASK_1, \
590 	block##_REG_##block##_PRTY_STS_CLR_1, \
591 	en_mask, {m1, m1h, m2, m3}, #block "_1" \
592 }
593 
594 static const struct {
595 	uint32_t mask_addr;
596 	uint32_t sts_clr_addr;
597 	uint32_t en_mask;		/* Mask to enable parity attentions */
598 	struct {
599 		uint32_t e1;		/* 57710 */
600 		uint32_t e1h;	/* 57711 */
601 		uint32_t e2;		/* 57712 */
602 		uint32_t e3;		/* 578xx */
603 	} reg_mask;		/* Register mask (all valid bits) */
604 	char name[8];		/* Block's longest name is 7 characters long
605 				 * (name + suffix)
606 				 */
607 } ecore_blocks_parity_data[] = {
608 	/* bit 19 masked */
609 	/* REG_WR(sc, PXP_REG_PXP_PRTY_MASK, 0x80000); */
610 	/* bit 5,18,20-31 */
611 	/* REG_WR(sc, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
612 	/* bit 5 */
613 	/* REG_WR(sc, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
614 	/* REG_WR(sc, HC_REG_HC_PRTY_MASK, 0x0); */
615 	/* REG_WR(sc, MISC_REG_MISC_PRTY_MASK, 0x0); */
616 
617 	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
618 	 * want to handle "system kill" flow at the moment.
619 	 */
620 	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
621 			0x7ffffff),
622 	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
623 			  0xffffffff),
624 	BLOCK_PRTY_INFO_1(PXP2,	0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
625 	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
626 	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
627 	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
628 	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0, 0xff, 0xffff),
629 	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
630 	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
631 	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
632 	BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
633 	BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
634 	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
635 	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
636 		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
637 		{0xf, 0xf, 0xf, 0xf}, "UPB"},
638 	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
639 		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
640 		{0xf, 0xf, 0xf, 0xf}, "XPB"},
641 	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
642 	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
643 	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
644 	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
645 	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
646 	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
647 	BLOCK_PRTY_INFO(PRS, (1 << 6), 0xff, 0xff, 0xff, 0xff),
648 	BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
649 	BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
650 	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
651 	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
652 	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
653 	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
654 	BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
655 	BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
656 	BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
657 	BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
658 	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
659 			  0xffffffff),
660 	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
661 	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
662 			  0xffffffff),
663 	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
664 	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
665 			  0xffffffff),
666 	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
667 	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
668 			  0xffffffff),
669 	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
670 };
671 
672 
673 /* [28] MCP Latched rom_parity
674  * [29] MCP Latched ump_rx_parity
675  * [30] MCP Latched ump_tx_parity
676  * [31] MCP Latched scpad_parity
677  */
678 #define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS	\
679 	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
680 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
681 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
682 
683 #define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
684 	(MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
685 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
686 
687 /* Below registers control the MCP parity attention output. When
688  * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
689  * enabled, when cleared - disabled.
690  */
691 static const struct {
692 	uint32_t addr;
693 	uint32_t bits;
694 } mcp_attn_ctl_regs[] = {
695 	{ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
696 		MISC_AEU_ENABLE_MCP_PRTY_BITS },
697 	{ MISC_REG_AEU_ENABLE4_NIG_0,
698 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
699 	{ MISC_REG_AEU_ENABLE4_PXP_0,
700 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
701 	{ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
702 		MISC_AEU_ENABLE_MCP_PRTY_BITS },
703 	{ MISC_REG_AEU_ENABLE4_NIG_1,
704 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
705 	{ MISC_REG_AEU_ENABLE4_PXP_1,
706 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
707 };
708 
ecore_set_mcp_parity(struct bnx2x_softc * sc,uint8_t enable)709 static inline void ecore_set_mcp_parity(struct bnx2x_softc *sc, uint8_t enable)
710 {
711 	unsigned int i;
712 	uint32_t reg_val;
713 
714 	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
715 		reg_val = REG_RD(sc, mcp_attn_ctl_regs[i].addr);
716 
717 		if (enable)
718 			reg_val |= mcp_attn_ctl_regs[i].bits;
719 		else
720 			reg_val &= ~mcp_attn_ctl_regs[i].bits;
721 
722 		REG_WR(sc, mcp_attn_ctl_regs[i].addr, reg_val);
723 	}
724 }
725 
ecore_parity_reg_mask(struct bnx2x_softc * sc,int idx)726 static inline uint32_t ecore_parity_reg_mask(struct bnx2x_softc *sc, int idx)
727 {
728 	if (CHIP_IS_E1(sc))
729 		return ecore_blocks_parity_data[idx].reg_mask.e1;
730 	else if (CHIP_IS_E1H(sc))
731 		return ecore_blocks_parity_data[idx].reg_mask.e1h;
732 	else if (CHIP_IS_E2(sc))
733 		return ecore_blocks_parity_data[idx].reg_mask.e2;
734 	else /* CHIP_IS_E3 */
735 		return ecore_blocks_parity_data[idx].reg_mask.e3;
736 }
737 
ecore_disable_blocks_parity(struct bnx2x_softc * sc)738 static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
739 {
740 	unsigned int i;
741 
742 	for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) {
743 		uint32_t dis_mask = ecore_parity_reg_mask(sc, i);
744 
745 		if (dis_mask) {
746 			REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
747 			       dis_mask);
748 			ECORE_MSG(sc, "Setting parity mask "
749 						 "for %s to\t\t0x%x",
750 				    ecore_blocks_parity_data[i].name, dis_mask);
751 		}
752 	}
753 
754 	/* Disable MCP parity attentions */
755 	ecore_set_mcp_parity(sc, false);
756 }
757 
758 /**
759  * Clear the parity error status registers.
760  */
ecore_clear_blocks_parity(struct bnx2x_softc * sc)761 static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
762 {
763 	unsigned int i;
764 	uint32_t reg_val, mcp_aeu_bits =
765 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
766 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
767 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
768 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
769 
770 	/* Clear SEM_FAST parities */
771 	REG_WR(sc, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
772 	REG_WR(sc, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
773 	REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
774 	REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
775 
776 	for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) {
777 		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
778 
779 		if (reg_mask) {
780 			reg_val = REG_RD(sc, ecore_blocks_parity_data[i].
781 					 sts_clr_addr);
782 			if (reg_val & reg_mask)
783 				ECORE_MSG(sc, "Parity errors in %s: 0x%x",
784 					   ecore_blocks_parity_data[i].name,
785 					   reg_val & reg_mask);
786 		}
787 	}
788 
789 	/* Check if there were parity attentions in MCP */
790 	reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP);
791 	if (reg_val & mcp_aeu_bits)
792 		ECORE_MSG(sc, "Parity error in MCP: 0x%x",
793 			   reg_val & mcp_aeu_bits);
794 
795 	/* Clear parity attentions in MCP:
796 	 * [7]  clears Latched rom_parity
797 	 * [8]  clears Latched ump_rx_parity
798 	 * [9]  clears Latched ump_tx_parity
799 	 * [10] clears Latched scpad_parity (both ports)
800 	 */
801 	REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
802 }
803 
ecore_enable_blocks_parity(struct bnx2x_softc * sc)804 static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc)
805 {
806 	unsigned int i;
807 
808 	for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) {
809 		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
810 
811 		if (reg_mask)
812 			REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
813 				ecore_blocks_parity_data[i].en_mask & reg_mask);
814 	}
815 
816 	/* Enable MCP parity attentions */
817 	ecore_set_mcp_parity(sc, true);
818 }
819 
820 
821 #endif /* ECORE_INIT_H */
822