xref: /dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c (revision acc0ed087cd1ce6464f63489ab17eca52b0c94b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include "bcm_osal.h"
8 #include "ecore_hw.h"
9 #include "ecore_init_ops.h"
10 #include "reg_addr.h"
11 #include "ecore_rt_defs.h"
12 #include "ecore_hsi_init_func.h"
13 #include "ecore_hsi_init_tool.h"
14 #include "ecore_iro.h"
15 #include "ecore_init_fw_funcs.h"
16 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
17 	{ 400,  336,  352,  368,  304,  384,  416,  352}, /* region 3 offsets */
18 	{ 528,  496,  416,  512,  448,  512,  544,  480}, /* region 4 offsets */
19 	{ 608,  544,  496,  576,  576,  592,  624,  560}  /* region 5 offsets */
20 };
21 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
22 	{ 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
23 };
24 
25 /* General constants */
26 #define QM_PQ_MEM_4KB(pq_size) \
27 	(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
28 #define QM_PQ_SIZE_256B(pq_size) \
29 	(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
30 #define QM_INVALID_PQ_ID		0xffff
31 
32 /* Max link speed (in Mbps) */
33 #define QM_MAX_LINK_SPEED		100000
34 
35 /* Feature enable */
36 #define QM_BYPASS_EN			1
37 #define QM_BYTE_CRD_EN			1
38 
39 /* Other PQ constants */
40 #define QM_OTHER_PQS_PER_PF		4
41 
42 /* VOQ constants */
43 #define MAX_NUM_VOQS			(MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
44 #define VOQS_BIT_MASK			((1 << MAX_NUM_VOQS) - 1)
45 
46 /* WFQ constants: */
47 
48 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
49 #define QM_WFQ_UPPER_BOUND		62500000
50 
51 /* Bit  of VOQ in WFQ VP PQ map */
52 #define QM_WFQ_VP_PQ_VOQ_SHIFT		0
53 
54 /* Bit  of PF in WFQ VP PQ map */
55 #define QM_WFQ_VP_PQ_PF_SHIFT		5
56 
57 /* 0x9000 = 4*9*1024 */
58 #define QM_WFQ_INC_VAL(weight)		((weight) * 0x9000)
59 
60 /* Max WFQ increment value is 0.7 * upper bound */
61 #define QM_WFQ_MAX_INC_VAL		((QM_WFQ_UPPER_BOUND * 7) / 10)
62 
63 /* RL constants: */
64 
65 /* Period in us */
66 #define QM_RL_PERIOD			5
67 
68 /* Period in 25MHz cycles */
69 #define QM_RL_PERIOD_CLK_25M		(25 * QM_RL_PERIOD)
70 
71 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
72  * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
73  * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
74  * although the credit increment value was the correct one and FW calculated
75  * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
76  * this point.
77  */
78 #define QM_RL_INC_VAL(rate) \
79 	OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
80 	(8 * 100)), 1)
81 
82 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
83 #define QM_PF_RL_UPPER_BOUND		62500000
84 
85 /* Max PF RL increment value is 0.7 * upper bound */
86 #define QM_PF_RL_MAX_INC_VAL		((QM_PF_RL_UPPER_BOUND * 7) / 10)
87 
88 /* Vport RL Upper bound, link speed is in Mpbs */
89 #define QM_VP_RL_UPPER_BOUND(speed) \
90 	((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
91 
92 /* Max Vport RL increment value is the Vport RL upper bound */
93 #define QM_VP_RL_MAX_INC_VAL(speed)	QM_VP_RL_UPPER_BOUND(speed)
94 
95 /* Vport RL credit threshold in case of QM bypass */
96 #define QM_VP_RL_BYPASS_THRESH_SPEED	(QM_VP_RL_UPPER_BOUND(10000) - 1)
97 
98 /* AFullOprtnstcCrdMask constants */
99 #define QM_OPPOR_LINE_VOQ_DEF		1
100 #define QM_OPPOR_FW_STOP_DEF		0
101 #define QM_OPPOR_PQ_EMPTY_DEF		1
102 
103 /* Command Queue constants: */
104 
105 /* Pure LB CmdQ lines (+spare) */
106 #define PBF_CMDQ_PURE_LB_LINES		150
107 
108 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
109 	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
110 	 ext_voq * \
111 	 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
112 	  PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
113 
114 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
115 	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
116 	 ext_voq * \
117 	 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
118 	  PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
119 
120 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
121 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
122 
123 /* BTB: blocks constants (block size = 256B) */
124 
125 /* 256B blocks in 9700B packet */
126 #define BTB_JUMBO_PKT_BLOCKS		38
127 
128 /* Headroom per-port */
129 #define BTB_HEADROOM_BLOCKS		BTB_JUMBO_PKT_BLOCKS
130 #define BTB_PURE_LB_FACTOR		10
131 
132 /* Factored (hence really 0.7) */
133 #define BTB_PURE_LB_RATIO		7
134 
135 /* QM stop command constants */
136 #define QM_STOP_PQ_MASK_WIDTH		32
137 #define QM_STOP_CMD_ADDR		2
138 #define QM_STOP_CMD_STRUCT_SIZE		2
139 #define QM_STOP_CMD_PAUSE_MASK_OFFSET	0
140 #define QM_STOP_CMD_PAUSE_MASK_SHIFT	0
141 #define QM_STOP_CMD_PAUSE_MASK_MASK	0xffffffff /* @DPDK */
142 #define QM_STOP_CMD_GROUP_ID_OFFSET	1
143 #define QM_STOP_CMD_GROUP_ID_SHIFT	16
144 #define QM_STOP_CMD_GROUP_ID_MASK	15
145 #define QM_STOP_CMD_PQ_TYPE_OFFSET	1
146 #define QM_STOP_CMD_PQ_TYPE_SHIFT	24
147 #define QM_STOP_CMD_PQ_TYPE_MASK	1
148 #define QM_STOP_CMD_MAX_POLL_COUNT	100
149 #define QM_STOP_CMD_POLL_PERIOD_US	500
150 
151 /* QM command macros */
152 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
153 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
154 	SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
155 
156 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, \
157 			   rl_valid, rl_id, voq, wrr) \
158 	do { \
159 		OSAL_MEMSET(&(map), 0, sizeof(map)); \
160 		SET_FIELD(map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
161 		SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); \
162 		SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_ID, rl_id); \
163 		SET_FIELD(map.reg, QM_RF_PQ_MAP_VP_PQ_ID, vp_pq_id); \
164 		SET_FIELD(map.reg, QM_RF_PQ_MAP_VOQ, voq); \
165 		SET_FIELD(map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, wrr); \
166 		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
167 			     *((u32 *)&(map))); \
168 	} while (0)
169 
170 #define WRITE_PQ_INFO_TO_RAM		1
171 
172 #define PQ_INFO_ELEMENT(vp_pq_id, pf, tc, port, rl_valid, rl_id) \
173 	(((vp_pq_id) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
174 	 ((rl_valid ? 1 : 0) << 22) | (((rl_id) & 255) << 24) | \
175 	 (((rl_id) >> 8) << 9))
176 
177 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) (XSEM_REG_FAST_MEMORY + \
178 	SEM_FAST_REG_INT_RAM + XSTORM_PQ_INFO_OFFSET(pq_id))
179 
180 /******************** INTERNAL IMPLEMENTATION *********************/
181 
182 /* Prepare PF RL enable/disable runtime init values */
ecore_enable_pf_rl(struct ecore_hwfn * p_hwfn,bool pf_rl_en)183 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
184 {
185 	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
186 	if (pf_rl_en) {
187 		/* Enable RLs for all VOQs */
188 		STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
189 			     VOQS_BIT_MASK);
190 
191 		/* Write RL period */
192 		STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
193 			     QM_RL_PERIOD_CLK_25M);
194 		STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
195 			     QM_RL_PERIOD_CLK_25M);
196 
197 		/* Set credit threshold for QM bypass flow */
198 		if (QM_BYPASS_EN)
199 			STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
200 				     QM_PF_RL_UPPER_BOUND);
201 	}
202 }
203 
204 /* Prepare PF WFQ enable/disable runtime init values */
ecore_enable_pf_wfq(struct ecore_hwfn * p_hwfn,bool pf_wfq_en)205 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
206 {
207 	STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
208 
209 	/* Set credit threshold for QM bypass flow */
210 	if (pf_wfq_en && QM_BYPASS_EN)
211 		STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
212 			     QM_WFQ_UPPER_BOUND);
213 }
214 
215 /* Prepare global RL enable/disable runtime init values */
ecore_enable_global_rl(struct ecore_hwfn * p_hwfn,bool global_rl_en)216 static void ecore_enable_global_rl(struct ecore_hwfn *p_hwfn,
217 				   bool global_rl_en)
218 {
219 	STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
220 		     global_rl_en ? 1 : 0);
221 	if (global_rl_en) {
222 		/* Write RL period (use timer 0 only) */
223 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
224 			     QM_RL_PERIOD_CLK_25M);
225 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
226 			     QM_RL_PERIOD_CLK_25M);
227 
228 		/* Set credit threshold for QM bypass flow */
229 		if (QM_BYPASS_EN)
230 			STORE_RT_REG(p_hwfn,
231 				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
232 				     QM_VP_RL_BYPASS_THRESH_SPEED);
233 	}
234 }
235 
236 /* Prepare VPORT WFQ enable/disable runtime init values */
ecore_enable_vport_wfq(struct ecore_hwfn * p_hwfn,bool vport_wfq_en)237 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
238 {
239 	STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
240 		     vport_wfq_en ? 1 : 0);
241 
242 	/* Set credit threshold for QM bypass flow */
243 	if (vport_wfq_en && QM_BYPASS_EN)
244 		STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
245 			     QM_WFQ_UPPER_BOUND);
246 }
247 
248 /* Prepare runtime init values to allocate PBF command queue lines for
249  * the specified VOQ
250  */
ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn * p_hwfn,u8 voq,u16 cmdq_lines)251 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
252 					 u8 voq,
253 					 u16 cmdq_lines)
254 {
255 	u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
256 
257 	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
258 			 (u32)cmdq_lines);
259 	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
260 	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
261 		     qm_line_crd);
262 }
263 
264 /* Prepare runtime init values to allocate PBF command queue lines. */
ecore_cmdq_lines_rt_init(struct ecore_hwfn * p_hwfn,u8 max_ports_per_engine,u8 max_phys_tcs_per_port,struct init_qm_port_params port_params[MAX_NUM_PORTS])265 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
266 				     u8 max_ports_per_engine,
267 				     u8 max_phys_tcs_per_port,
268 				     struct init_qm_port_params
269 				     port_params[MAX_NUM_PORTS])
270 {
271 	u8 tc, voq, port_id, num_tcs_in_port;
272 
273 	/* Clear PBF lines of all VOQs */
274 	for (voq = 0; voq < MAX_NUM_VOQS; voq++)
275 		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
276 
277 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
278 		u16 phys_lines, phys_lines_per_tc;
279 
280 		if (!port_params[port_id].active)
281 			continue;
282 
283 		/* Find number of command queue lines to divide between the
284 		 * active physical TCs.
285 		 */
286 		phys_lines = port_params[port_id].num_pbf_cmd_lines;
287 		phys_lines -= PBF_CMDQ_PURE_LB_LINES;
288 
289 		/* Find #lines per active physical TC */
290 		num_tcs_in_port = 0;
291 		for (tc = 0; tc < max_phys_tcs_per_port; tc++)
292 			if (((port_params[port_id].active_phys_tcs >> tc) &
293 			      0x1) == 1)
294 				num_tcs_in_port++;
295 		phys_lines_per_tc = phys_lines / num_tcs_in_port;
296 
297 		/* Init registers per active TC */
298 		for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
299 			voq = VOQ(port_id, tc, max_phys_tcs_per_port);
300 			if (((port_params[port_id].active_phys_tcs >>
301 			      tc) & 0x1) == 1)
302 				ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
303 							     phys_lines_per_tc);
304 		}
305 
306 		/* Init registers for pure LB TC */
307 		voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);
308 		ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
309 					     PBF_CMDQ_PURE_LB_LINES);
310 	}
311 }
312 
313 /*
314  * Prepare runtime init values to allocate guaranteed BTB blocks for the
315  * specified port. The guaranteed BTB space is divided between the TCs as
316  * follows (shared space Is currently not used):
317  * 1. Parameters:
318  *     B BTB blocks for this port
319  *     C Number of physical TCs for this port
320  * 2. Calculation:
321  *     a. 38 blocks (9700B jumbo frame) are allocated for global per port
322  *        headroom
323  *     b. B = B 38 (remainder after global headroom allocation)
324  *     c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
325  *     d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
326  *     e. B/C blocks are allocated for each physical TC.
327  * Assumptions:
328  * - MTU is up to 9700 bytes (38 blocks)
329  * - All TCs are considered symmetrical (same rate and packet size)
330  * - No optimization for lossy TC (all are considered lossless). Shared space is
331  *   not enabled and allocated for each TC.
332  */
ecore_btb_blocks_rt_init(struct ecore_hwfn * p_hwfn,u8 max_ports_per_engine,u8 max_phys_tcs_per_port,struct init_qm_port_params port_params[MAX_NUM_PORTS])333 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
334 				     u8 max_ports_per_engine,
335 				     u8 max_phys_tcs_per_port,
336 				     struct init_qm_port_params
337 				     port_params[MAX_NUM_PORTS])
338 {
339 	u32 usable_blocks, pure_lb_blocks, phys_blocks;
340 	u8 tc, voq, port_id, num_tcs_in_port;
341 
342 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
343 		if (!port_params[port_id].active)
344 			continue;
345 
346 		/* Subtract headroom blocks */
347 		usable_blocks = port_params[port_id].num_btb_blocks -
348 				BTB_HEADROOM_BLOCKS;
349 
350 		/* Find blocks per physical TC. use factor to avoid floating
351 		 * arithmethic.
352 		 */
353 		num_tcs_in_port = 0;
354 		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
355 			if (((port_params[port_id].active_phys_tcs >> tc) &
356 			      0x1) == 1)
357 				num_tcs_in_port++;
358 
359 		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
360 				  (num_tcs_in_port * BTB_PURE_LB_FACTOR +
361 				   BTB_PURE_LB_RATIO);
362 		pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
363 					    pure_lb_blocks /
364 					    BTB_PURE_LB_FACTOR);
365 		phys_blocks = (usable_blocks - pure_lb_blocks) /
366 			      num_tcs_in_port;
367 
368 		/* Init physical TCs */
369 		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
370 			if (((port_params[port_id].active_phys_tcs >> tc) &
371 			     0x1) == 1) {
372 				voq = VOQ(port_id, tc, max_phys_tcs_per_port);
373 				STORE_RT_REG(p_hwfn,
374 					PBF_BTB_GUARANTEED_RT_OFFSET(voq),
375 					phys_blocks);
376 			}
377 		}
378 
379 		/* Init pure LB TC */
380 		voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);
381 		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
382 			     pure_lb_blocks);
383 	}
384 }
385 
386 /* Prepare runtime init values for the specified RL.
387  * If global_rl_params is OSAL_NULL, max link speed (100Gbps) is used instead.
388  * Return -1 on error.
389  */
ecore_global_rl_rt_init(struct ecore_hwfn * p_hwfn,struct init_qm_global_rl_params global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])390 static int ecore_global_rl_rt_init(struct ecore_hwfn *p_hwfn,
391 				   struct init_qm_global_rl_params
392 				     global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
393 {
394 	u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
395 			  (u32)QM_RL_CRD_REG_SIGN_BIT;
396 	u32 inc_val;
397 	u16 rl_id;
398 
399 	/* Go over all global RLs */
400 	for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
401 		u32 rate_limit = global_rl_params ?
402 				 global_rl_params[rl_id].rate_limit : 0;
403 
404 		inc_val = QM_RL_INC_VAL(rate_limit ?
405 					rate_limit : QM_MAX_LINK_SPEED);
406 		if (inc_val > QM_VP_RL_MAX_INC_VAL(QM_MAX_LINK_SPEED)) {
407 			DP_NOTICE(p_hwfn, true, "Invalid rate limit configuration.\n");
408 			return -1;
409 		}
410 
411 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
412 			     (u32)QM_RL_CRD_REG_SIGN_BIT);
413 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
414 			     upper_bound);
415 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
416 			     inc_val);
417 	}
418 
419 	return 0;
420 }
421 
422 /* Prepare Tx PQ mapping runtime init values for the specified PF */
ecore_tx_pq_map_rt_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 pf_id,u8 max_phys_tcs_per_port,bool is_pf_loading,u32 num_pf_cids,u32 num_vf_cids,u16 start_pq,u16 num_pf_pqs,u16 num_vf_pqs,u16 start_vport,u32 base_mem_addr_4kb,struct init_qm_pq_params * pq_params,struct init_qm_vport_params * vport_params)423 static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
424 				    struct ecore_ptt *p_ptt,
425 				    u8 pf_id,
426 				    u8 max_phys_tcs_per_port,
427 						bool is_pf_loading,
428 				    u32 num_pf_cids,
429 				    u32 num_vf_cids,
430 				    u16 start_pq,
431 				    u16 num_pf_pqs,
432 				    u16 num_vf_pqs,
433 				   u16 start_vport,
434 				    u32 base_mem_addr_4kb,
435 				    struct init_qm_pq_params *pq_params,
436 				    struct init_qm_vport_params *vport_params)
437 {
438 	/* A bit per Tx PQ indicating if the PQ is associated with a VF */
439 	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
440 	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
441 	u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
442 	u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
443 	#if (WRITE_PQ_INFO_TO_RAM != 0)
444 		u32 pq_info = 0;
445 	#endif
446 
447 	num_pqs = num_pf_pqs + num_vf_pqs;
448 
449 	first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
450 	last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
451 
452 	pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
453 	vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
454 	mem_addr_4kb = base_mem_addr_4kb;
455 
456 	/* Set mapping from PQ group to PF */
457 	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
458 		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
459 			     (u32)(pf_id));
460 
461 	/* Set PQ sizes */
462 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
463 		     QM_PQ_SIZE_256B(num_pf_cids));
464 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
465 		     QM_PQ_SIZE_256B(num_vf_cids));
466 
467 	/* Go over all Tx PQs */
468 	for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
469 		u16 first_tx_pq_id, vport_id_in_pf;
470 		struct qm_rf_pq_map tx_pq_map;
471 		bool is_vf_pq;
472 		u8 voq;
473 
474 		voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
475 			  max_phys_tcs_per_port);
476 		is_vf_pq = (i >= num_pf_pqs);
477 
478 		/* Update first Tx PQ of VPORT/TC */
479 		vport_id_in_pf = pq_params[i].vport_id - start_vport;
480 		first_tx_pq_id =
481 		vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
482 		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
483 			u32 map_val = (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
484 				      (pf_id << QM_WFQ_VP_PQ_PF_SHIFT);
485 
486 			/* Create new VP PQ */
487 			vport_params[vport_id_in_pf].
488 			    first_tx_pq_id[pq_params[i].tc_id] = pq_id;
489 			first_tx_pq_id = pq_id;
490 
491 			/* Map VP PQ to VOQ and PF */
492 			STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
493 				     first_tx_pq_id, map_val);
494 		}
495 
496 		/* Prepare PQ map entry */
497 		QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,
498 				  pq_params[i].rl_valid, pq_params[i].rl_id,
499 				  voq, pq_params[i].wrr_group);
500 
501 		/* Set PQ base address */
502 		STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
503 			     mem_addr_4kb);
504 
505 		/* Clear PQ pointer table entry (64 bit) */
506 		if (is_pf_loading)
507 			for (j = 0; j < 2; j++)
508 				STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
509 					     (pq_id * 2) + j, 0);
510 
511 		/* Write PQ info to RAM */
512 #if (WRITE_PQ_INFO_TO_RAM != 0)
513 		pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
514 					  pq_params[i].tc_id,
515 					  pq_params[i].port_id,
516 					  pq_params[i].rl_valid,
517 					  pq_params[i].rl_id);
518 		ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
519 			 pq_info);
520 #endif
521 
522 		/* If VF PQ, add indication to PQ VF mask */
523 		if (is_vf_pq) {
524 			tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
525 				(1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
526 			mem_addr_4kb += vport_pq_mem_4kb;
527 		} else {
528 			mem_addr_4kb += pq_mem_4kb;
529 		}
530 	}
531 
532 	/* Store Tx PQ VF mask to size select register */
533 	for (i = 0; i < num_tx_pq_vf_masks; i++)
534 		if (tx_pq_vf_mask[i])
535 			STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
536 				     i, tx_pq_vf_mask[i]);
537 
538 	return 0;
539 }
540 
541 /* Prepare Other PQ mapping runtime init values for the specified PF */
ecore_other_pq_map_rt_init(struct ecore_hwfn * p_hwfn,u8 pf_id,bool is_pf_loading,u32 num_pf_cids,u32 num_tids,u32 base_mem_addr_4kb)542 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
543 				       u8 pf_id,
544 				       bool is_pf_loading,
545 				       u32 num_pf_cids,
546 				       u32 num_tids,
547 				       u32 base_mem_addr_4kb)
548 {
549 	u32 pq_size, pq_mem_4kb, mem_addr_4kb;
550 	u16 i, j, pq_id, pq_group;
551 
552 	/* A single other PQ group is used in each PF, where PQ group i is used
553 	 * in PF i.
554 	 */
555 	pq_group = pf_id;
556 	pq_size = num_pf_cids + num_tids;
557 	pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
558 	mem_addr_4kb = base_mem_addr_4kb;
559 
560 	/* Map PQ group to PF */
561 	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
562 		     (u32)(pf_id));
563 
564 	/* Set PQ sizes */
565 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
566 		     QM_PQ_SIZE_256B(pq_size));
567 
568 	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
569 	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
570 		/* Set PQ base address */
571 		STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
572 			     mem_addr_4kb);
573 
574 		/* Clear PQ pointer table entry */
575 		if (is_pf_loading)
576 			for (j = 0; j < 2; j++)
577 				STORE_RT_REG(p_hwfn,
578 					     QM_REG_PTRTBLOTHER_RT_OFFSET +
579 					     (pq_id * 2) + j, 0);
580 
581 		mem_addr_4kb += pq_mem_4kb;
582 	}
583 }
584 
585 /* Prepare PF WFQ runtime init values for the specified PF.
586  * Return -1 on error.
587  */
ecore_pf_wfq_rt_init(struct ecore_hwfn * p_hwfn,u8 pf_id,u16 pf_wfq,u8 max_phys_tcs_per_port,u16 num_tx_pqs,struct init_qm_pq_params * pq_params)588 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
589 				u8 pf_id,
590 				u16 pf_wfq,
591 				u8 max_phys_tcs_per_port,
592 				u16 num_tx_pqs,
593 				struct init_qm_pq_params *pq_params)
594 {
595 	u32 inc_val, crd_reg_offset;
596 	u8 voq;
597 	u16 i;
598 
599 	inc_val = QM_WFQ_INC_VAL(pf_wfq);
600 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
601 		DP_NOTICE(p_hwfn, true,
602 			  "Invalid PF WFQ weight configuration\n");
603 		return -1;
604 	}
605 
606 	for (i = 0; i < num_tx_pqs; i++) {
607 		voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
608 			  max_phys_tcs_per_port);
609 		crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
610 				  QM_REG_WFQPFCRD_RT_OFFSET :
611 				  QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
612 				 voq * MAX_NUM_PFS_BB +
613 				 (pf_id % MAX_NUM_PFS_BB);
614 		OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
615 				 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
616 	}
617 
618 	STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
619 		     pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
620 	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
621 
622 	return 0;
623 }
624 
625 /* Prepare PF RL runtime init values for the specified PF.
626  * Return -1 on error.
627  */
ecore_pf_rl_rt_init(struct ecore_hwfn * p_hwfn,u8 pf_id,u32 pf_rl)628 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
629 {
630 	u32 inc_val;
631 
632 	inc_val = QM_RL_INC_VAL(pf_rl);
633 	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
634 		DP_NOTICE(p_hwfn, true,
635 			  "Invalid PF rate limit configuration\n");
636 		return -1;
637 	}
638 
639 	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
640 		     (u32)QM_RL_CRD_REG_SIGN_BIT);
641 	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
642 		     QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
643 	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
644 
645 	return 0;
646 }
647 
648 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
649  * Return -1 on error.
650  */
ecore_vp_wfq_rt_init(struct ecore_hwfn * p_hwfn,u16 num_vports,struct init_qm_vport_params * vport_params)651 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
652 				u16 num_vports,
653 				struct init_qm_vport_params *vport_params)
654 {
655 	u16 vp_pq_id, vport_id;
656 	u32 inc_val;
657 	u8 tc;
658 
659 	/* Go over all PF VPORTs */
660 	for (vport_id = 0; vport_id < num_vports; vport_id++) {
661 		if (!vport_params[vport_id].wfq)
662 			continue;
663 
664 		inc_val = QM_WFQ_INC_VAL(vport_params[vport_id].wfq);
665 		if (inc_val > QM_WFQ_MAX_INC_VAL) {
666 			DP_NOTICE(p_hwfn, true,
667 				  "Invalid VPORT WFQ weight configuration\n");
668 			return -1;
669 		}
670 
671 		/* Each VPORT can have several VPORT PQ IDs for various TCs */
672 		for (tc = 0; tc < NUM_OF_TCS; tc++) {
673 			vp_pq_id = vport_params[vport_id].first_tx_pq_id[tc];
674 			if (vp_pq_id == QM_INVALID_PQ_ID)
675 				continue;
676 
677 			STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
678 				     vp_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
679 			STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
680 				     vp_pq_id, inc_val);
681 		}
682 	}
683 
684 	return 0;
685 }
686 
ecore_poll_on_qm_cmd_ready(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)687 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
688 				       struct ecore_ptt *p_ptt)
689 {
690 	u32 reg_val, i;
691 
692 	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
693 	     i++) {
694 		OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
695 		reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
696 	}
697 
698 	/* Check if timeout while waiting for SDM command ready */
699 	if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
700 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
701 			   "Timeout waiting for QM SDM cmd ready signal\n");
702 		return false;
703 	}
704 
705 	return true;
706 }
707 
ecore_send_qm_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd_addr,u32 cmd_data_lsb,u32 cmd_data_msb)708 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
709 			      struct ecore_ptt *p_ptt,
710 							  u32 cmd_addr,
711 							  u32 cmd_data_lsb,
712 							  u32 cmd_data_msb)
713 {
714 	if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
715 		return false;
716 
717 	ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
718 	ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
719 	ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
720 	ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
721 	ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
722 
723 	return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
724 }
725 
726 /******************** INTERFACE IMPLEMENTATION *********************/
727 
ecore_qm_pf_mem_size(struct ecore_hwfn * p_hwfn,u32 num_pf_cids,u32 num_vf_cids,u32 num_tids,u16 num_pf_pqs,u16 num_vf_pqs)728 u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,
729 			 u32 num_pf_cids,
730 						 u32 num_vf_cids,
731 						 u32 num_tids,
732 						 u16 num_pf_pqs,
733 						 u16 num_vf_pqs)
734 {
735 	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
736 	    QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
737 	    QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
738 }
739 
ecore_qm_common_rt_init(struct ecore_hwfn * p_hwfn,u8 max_ports_per_engine,u8 max_phys_tcs_per_port,bool pf_rl_en,bool pf_wfq_en,bool global_rl_en,bool vport_wfq_en,struct init_qm_port_params port_params[MAX_NUM_PORTS],struct init_qm_global_rl_params global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])740 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
741 			    u8 max_ports_per_engine,
742 			    u8 max_phys_tcs_per_port,
743 			    bool pf_rl_en,
744 			    bool pf_wfq_en,
745 			    bool global_rl_en,
746 			    bool vport_wfq_en,
747 			    struct init_qm_port_params
748 				   port_params[MAX_NUM_PORTS],
749 			    struct init_qm_global_rl_params
750 				   global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
751 {
752 	u32 mask = 0;
753 
754 	/* Init AFullOprtnstcCrdMask */
755 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
756 		  QM_OPPOR_LINE_VOQ_DEF);
757 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
758 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, pf_wfq_en);
759 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, vport_wfq_en);
760 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, pf_rl_en);
761 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, global_rl_en);
762 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
763 	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY,
764 		  QM_OPPOR_PQ_EMPTY_DEF);
765 	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
766 
767 	/* Enable/disable PF RL */
768 	ecore_enable_pf_rl(p_hwfn, pf_rl_en);
769 
770 	/* Enable/disable PF WFQ */
771 	ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
772 
773 	/* Enable/disable global RL */
774 	ecore_enable_global_rl(p_hwfn, global_rl_en);
775 
776 	/* Enable/disable VPORT WFQ */
777 	ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
778 
779 	/* Init PBF CMDQ line credit */
780 	ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
781 				 max_phys_tcs_per_port, port_params);
782 
783 	/* Init BTB blocks in PBF */
784 	ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
785 				 max_phys_tcs_per_port, port_params);
786 
787 	ecore_global_rl_rt_init(p_hwfn, global_rl_params);
788 
789 	return 0;
790 }
791 
ecore_qm_pf_rt_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 pf_id,u8 max_phys_tcs_per_port,bool is_pf_loading,u32 num_pf_cids,u32 num_vf_cids,u32 num_tids,u16 start_pq,u16 num_pf_pqs,u16 num_vf_pqs,u16 start_vport,u16 num_vports,u16 pf_wfq,u32 pf_rl,struct init_qm_pq_params * pq_params,struct init_qm_vport_params * vport_params)792 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
793 			struct ecore_ptt *p_ptt,
794 			u8 pf_id,
795 			u8 max_phys_tcs_per_port,
796 			bool is_pf_loading,
797 			u32 num_pf_cids,
798 			u32 num_vf_cids,
799 			u32 num_tids,
800 			u16 start_pq,
801 			u16 num_pf_pqs,
802 			u16 num_vf_pqs,
803 			u16 start_vport,
804 			u16 num_vports,
805 			u16 pf_wfq,
806 			u32 pf_rl,
807 			struct init_qm_pq_params *pq_params,
808 			struct init_qm_vport_params *vport_params)
809 {
810 	u32 other_mem_size_4kb;
811 	u16 vport_id;
812 	u8 tc;
813 
814 	other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
815 			     QM_OTHER_PQS_PER_PF;
816 
817 	/* Clear first Tx PQ ID array for each VPORT */
818 	for (vport_id = 0; vport_id < num_vports; vport_id++)
819 		for (tc = 0; tc < NUM_OF_TCS; tc++)
820 			vport_params[vport_id].first_tx_pq_id[tc] =
821 				QM_INVALID_PQ_ID;
822 
823 	/* Map Other PQs (if any) */
824 #if QM_OTHER_PQS_PER_PF > 0
825 	ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
826 				   num_tids, 0);
827 #endif
828 
829 	/* Map Tx PQs */
830 	if (ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
831 				    is_pf_loading, num_pf_cids, num_vf_cids,
832 				    start_pq, num_pf_pqs, num_vf_pqs,
833 				    start_vport, other_mem_size_4kb, pq_params,
834 				    vport_params))
835 		return -1;
836 
837 	/* Init PF WFQ */
838 	if (pf_wfq)
839 		if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
840 					 max_phys_tcs_per_port,
841 					 num_pf_pqs + num_vf_pqs, pq_params))
842 			return -1;
843 
844 	/* Init PF RL */
845 	if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
846 		return -1;
847 
848 	/* Init VPORT WFQ */
849 	if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
850 		return -1;
851 
852 	return 0;
853 }
854 
ecore_init_pf_wfq(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 pf_id,u16 pf_wfq)855 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
856 		      struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
857 {
858 	u32 inc_val;
859 
860 	inc_val = QM_WFQ_INC_VAL(pf_wfq);
861 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
862 		DP_NOTICE(p_hwfn, true,
863 			  "Invalid PF WFQ weight configuration\n");
864 		return -1;
865 	}
866 
867 	ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
868 
869 	return 0;
870 }
871 
ecore_init_pf_rl(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 pf_id,u32 pf_rl)872 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
873 		     struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
874 {
875 	u32 inc_val;
876 
877 	inc_val = QM_RL_INC_VAL(pf_rl);
878 	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
879 		DP_NOTICE(p_hwfn, true,
880 			  "Invalid PF rate limit configuration\n");
881 		return -1;
882 	}
883 
884 	ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
885 		 (u32)QM_RL_CRD_REG_SIGN_BIT);
886 	ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
887 
888 	return 0;
889 }
890 
ecore_init_vport_wfq(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 first_tx_pq_id[NUM_OF_TCS],u16 wfq)891 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
892 			 struct ecore_ptt *p_ptt,
893 			 u16 first_tx_pq_id[NUM_OF_TCS],
894 			 u16 wfq)
895 {
896 	u16 vp_pq_id;
897 	u32 inc_val;
898 	u8 tc;
899 
900 	inc_val = QM_WFQ_INC_VAL(wfq);
901 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
902 		DP_NOTICE(p_hwfn, true,
903 			  "Invalid VPORT WFQ weight configuration\n");
904 		return -1;
905 	}
906 
907 	/* A VPORT can have several VPORT PQ IDs for various TCs */
908 	for (tc = 0; tc < NUM_OF_TCS; tc++) {
909 		vp_pq_id = first_tx_pq_id[tc];
910 		if (vp_pq_id != QM_INVALID_PQ_ID) {
911 			ecore_wr(p_hwfn, p_ptt,
912 				 QM_REG_WFQVPWEIGHT + vp_pq_id * 4, inc_val);
913 		}
914 	}
915 
916 	return 0;
917 		}
918 
ecore_init_global_rl(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 rl_id,u32 rate_limit)919 int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,
920 			 struct ecore_ptt *p_ptt,
921 			 u16 rl_id,
922 			 u32 rate_limit)
923 {
924 	u32 inc_val;
925 
926 	inc_val = QM_RL_INC_VAL(rate_limit);
927 	if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
928 		DP_NOTICE(p_hwfn, true, "Invalid rate limit configuration.\n");
929 		return -1;
930 	}
931 
932 	ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4,
933 		 (u32)QM_RL_CRD_REG_SIGN_BIT);
934 	ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
935 
936 	return 0;
937 }
938 
ecore_init_vport_rl(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vport_id,u32 vport_rl,u32 link_speed)939 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
940 			struct ecore_ptt *p_ptt, u8 vport_id,
941 						u32 vport_rl,
942 						u32 link_speed)
943 {
944 	u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
945 
946 	if (vport_id >= max_qm_global_rls) {
947 		DP_NOTICE(p_hwfn, true,
948 			  "Invalid VPORT ID for rate limiter configuration\n");
949 		return -1;
950 	}
951 
952 	inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
953 	if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
954 		DP_NOTICE(p_hwfn, true,
955 			  "Invalid VPORT rate-limit configuration\n");
956 		return -1;
957 	}
958 
959 	ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
960 		 (u32)QM_RL_CRD_REG_SIGN_BIT);
961 	ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
962 
963 	return 0;
964 }
965 
ecore_send_qm_stop_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool is_release_cmd,bool is_tx_pq,u16 start_pq,u16 num_pqs)966 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
967 			    struct ecore_ptt *p_ptt,
968 			    bool is_release_cmd,
969 			    bool is_tx_pq, u16 start_pq, u16 num_pqs)
970 {
971 	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
972 	u32 pq_mask = 0, last_pq, pq_id;
973 
974 	last_pq = start_pq + num_pqs - 1;
975 
976 	/* Set command's PQ type */
977 	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
978 
979 	/* Go over requested PQs */
980 	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
981 		/* Set PQ bit in mask (stop command only) */
982 		if (!is_release_cmd)
983 			pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
984 
985 		/* If last PQ or end of PQ mask, write command */
986 		if ((pq_id == last_pq) ||
987 		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
988 		    (QM_STOP_PQ_MASK_WIDTH - 1))) {
989 			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
990 					 pq_mask);
991 			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
992 					 pq_id / QM_STOP_PQ_MASK_WIDTH);
993 			if (!ecore_send_qm_cmd
994 			    (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
995 			     cmd_arr[1]))
996 				return false;
997 			pq_mask = 0;
998 		}
999 	}
1000 
1001 	return true;
1002 }
1003 
1004 #ifndef UNUSED_HSI_FUNC
1005 
1006 /* NIG: ETS configuration constants */
1007 #define NIG_TX_ETS_CLIENT_OFFSET	4
1008 #define NIG_LB_ETS_CLIENT_OFFSET	1
1009 #define NIG_ETS_MIN_WFQ_BYTES		1600
1010 
1011 /* NIG: ETS constants */
1012 #define NIG_ETS_UP_BOUND(weight, mtu) \
1013 	(2 * ((weight) > (mtu) ? (weight) : (mtu)))
1014 
1015 /* NIG: RL constants */
1016 
1017 /* Byte base type value */
1018 #define NIG_RL_BASE_TYPE		1
1019 
1020 /* Period in us */
1021 #define NIG_RL_PERIOD			1
1022 
1023 /* Period in 25MHz cycles */
1024 #define NIG_RL_PERIOD_CLK_25M		(25 * NIG_RL_PERIOD)
1025 
1026 /* Rate in mbps */
1027 #define NIG_RL_INC_VAL(rate)		(((rate) * NIG_RL_PERIOD) / 8)
1028 
1029 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1030 	(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1031 
1032 /* NIG: packet prioritry configuration constants */
1033 #define NIG_PRIORITY_MAP_TC_BITS	4
1034 
1035 
ecore_init_nig_ets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct init_ets_req * req,bool is_lb)1036 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1037 			struct ecore_ptt *p_ptt,
1038 			struct init_ets_req *req, bool is_lb)
1039 {
1040 	u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1041 	u32 tc_bound_base_addr, tc_bound_addr_diff;
1042 	u8 sp_tc_map = 0, wfq_tc_map = 0;
1043 	u8 tc, num_tc, tc_client_offset;
1044 
1045 	num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1046 	tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1047 				   NIG_TX_ETS_CLIENT_OFFSET;
1048 	min_weight = 0xffffffff;
1049 	tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1050 				      NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1051 	tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1052 				      NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1053 				      NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1054 				      NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1055 	tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1056 				     NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1057 	tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1058 				     NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1059 				     NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1060 				     NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1061 
1062 	for (tc = 0; tc < num_tc; tc++) {
1063 		struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1064 
1065 		/* Update SP map */
1066 		if (tc_req->use_sp)
1067 			sp_tc_map |= (1 << tc);
1068 
1069 		if (!tc_req->use_wfq)
1070 			continue;
1071 
1072 		/* Update WFQ map */
1073 		wfq_tc_map |= (1 << tc);
1074 
1075 		/* Find minimal weight */
1076 		if (tc_req->weight < min_weight)
1077 			min_weight = tc_req->weight;
1078 	}
1079 
1080 	/* Write SP map */
1081 	ecore_wr(p_hwfn, p_ptt,
1082 		 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1083 		 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1084 		 (sp_tc_map << tc_client_offset));
1085 
1086 	/* Write WFQ map */
1087 	ecore_wr(p_hwfn, p_ptt,
1088 		 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1089 		 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1090 		 (wfq_tc_map << tc_client_offset));
1091 	/* write WFQ weights */
1092 	for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1093 		struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1094 		u32 byte_weight;
1095 
1096 		if (!tc_req->use_wfq)
1097 			continue;
1098 
1099 		/* Translate weight to bytes */
1100 		byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1101 			      min_weight;
1102 
1103 		/* Write WFQ weight */
1104 		ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1105 			 tc_weight_addr_diff * tc_client_offset, byte_weight);
1106 
1107 		/* Write WFQ upper bound */
1108 		ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1109 			 tc_bound_addr_diff * tc_client_offset,
1110 			 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1111 	}
1112 }
1113 
ecore_init_nig_lb_rl(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct init_nig_lb_rl_req * req)1114 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1115 			  struct ecore_ptt *p_ptt,
1116 			  struct init_nig_lb_rl_req *req)
1117 {
1118 	u32 ctrl, inc_val, reg_offset;
1119 	u8 tc;
1120 
1121 	/* Disable global MAC+LB RL */
1122 	ctrl =
1123 	    NIG_RL_BASE_TYPE <<
1124 	    NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1125 	ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1126 
1127 	/* Configure and enable global MAC+LB RL */
1128 	if (req->lb_mac_rate) {
1129 		/* Configure  */
1130 		ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1131 			 NIG_RL_PERIOD_CLK_25M);
1132 		inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1133 		ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1134 			 inc_val);
1135 		ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1136 			 NIG_RL_MAX_VAL(inc_val, req->mtu));
1137 
1138 		/* Enable */
1139 		ctrl |=
1140 		    1 <<
1141 		    NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1142 		ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1143 	}
1144 
1145 	/* Disable global LB-only RL */
1146 	ctrl =
1147 	    NIG_RL_BASE_TYPE <<
1148 	    NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1149 	ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1150 
1151 	/* Configure and enable global LB-only RL */
1152 	if (req->lb_rate) {
1153 		/* Configure  */
1154 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1155 			 NIG_RL_PERIOD_CLK_25M);
1156 		inc_val = NIG_RL_INC_VAL(req->lb_rate);
1157 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1158 			 inc_val);
1159 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1160 			 NIG_RL_MAX_VAL(inc_val, req->mtu));
1161 
1162 		/* Enable */
1163 		ctrl |=
1164 		    1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1165 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1166 	}
1167 
1168 	/* Per-TC RLs */
1169 	for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1170 	     tc++, reg_offset += 4) {
1171 		/* Disable TC RL */
1172 		ctrl =
1173 		    NIG_RL_BASE_TYPE <<
1174 		NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1175 		ecore_wr(p_hwfn, p_ptt,
1176 			 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1177 
1178 		/* Configure and enable TC RL */
1179 		if (!req->tc_rate[tc])
1180 			continue;
1181 
1182 		/* Configure */
1183 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1184 			 reg_offset, NIG_RL_PERIOD_CLK_25M);
1185 		inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1186 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1187 			 reg_offset, inc_val);
1188 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1189 			 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1190 
1191 		/* Enable */
1192 		ctrl |= 1 <<
1193 			NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1194 		ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1195 			 reg_offset, ctrl);
1196 	}
1197 }
1198 
ecore_init_nig_pri_tc_map(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct init_nig_pri_tc_map_req * req)1199 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1200 			       struct ecore_ptt *p_ptt,
1201 			       struct init_nig_pri_tc_map_req *req)
1202 {
1203 	u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1204 	u32 pri_tc_mask = 0;
1205 	u8 pri, tc;
1206 
1207 	for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1208 		if (!req->pri[pri].valid)
1209 			continue;
1210 
1211 		pri_tc_mask |= (req->pri[pri].tc_id <<
1212 				(pri * NIG_PRIORITY_MAP_TC_BITS));
1213 		tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1214 	}
1215 
1216 	/* Write priority -> TC mask */
1217 	ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1218 
1219 	/* Write TC -> priority mask */
1220 	for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1221 		ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1222 			 tc_pri_mask[tc]);
1223 		ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1224 			 tc_pri_mask[tc]);
1225 	}
1226 }
1227 
1228 #endif /* UNUSED_HSI_FUNC */
1229 
1230 #ifndef UNUSED_HSI_FUNC
1231 
1232 /* PRS: ETS configuration constants */
1233 #define PRS_ETS_MIN_WFQ_BYTES		1600
1234 #define PRS_ETS_UP_BOUND(weight, mtu) \
1235 	(2 * ((weight) > (mtu) ? (weight) : (mtu)))
1236 
1237 
ecore_init_prs_ets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct init_ets_req * req)1238 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1239 			struct ecore_ptt *p_ptt, struct init_ets_req *req)
1240 {
1241 	u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1242 	u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1243 
1244 	tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1245 			      PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1246 	tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1247 			     PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1248 
1249 	for (tc = 0; tc < NUM_OF_TCS; tc++) {
1250 		struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1251 
1252 		/* Update SP map */
1253 		if (tc_req->use_sp)
1254 			sp_tc_map |= (1 << tc);
1255 
1256 		if (!tc_req->use_wfq)
1257 			continue;
1258 
1259 		/* Update WFQ map */
1260 		wfq_tc_map |= (1 << tc);
1261 
1262 		/* Find minimal weight */
1263 		if (tc_req->weight < min_weight)
1264 			min_weight = tc_req->weight;
1265 	}
1266 
1267 	/* write SP map */
1268 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1269 
1270 	/* write WFQ map */
1271 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1272 		 wfq_tc_map);
1273 
1274 	/* write WFQ weights */
1275 	for (tc = 0; tc < NUM_OF_TCS; tc++) {
1276 		struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1277 		u32 byte_weight;
1278 
1279 		if (!tc_req->use_wfq)
1280 			continue;
1281 
1282 		/* Translate weight to bytes */
1283 		byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1284 			      min_weight;
1285 
1286 		/* Write WFQ weight */
1287 		ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1288 			 tc_weight_addr_diff, byte_weight);
1289 
1290 		/* Write WFQ upper bound */
1291 		ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1292 			 tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1293 								   req->mtu));
1294 	}
1295 }
1296 
1297 #endif /* UNUSED_HSI_FUNC */
1298 #ifndef UNUSED_HSI_FUNC
1299 
1300 /* BRB: RAM configuration constants */
1301 #define BRB_TOTAL_RAM_BLOCKS_BB	4800
1302 #define BRB_TOTAL_RAM_BLOCKS_K2	5632
1303 #define BRB_BLOCK_SIZE		128
1304 #define BRB_MIN_BLOCKS_PER_TC	9
1305 #define BRB_HYST_BYTES		10240
1306 #define BRB_HYST_BLOCKS		(BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1307 
1308 /* Temporary big RAM allocation - should be updated */
ecore_init_brb_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct init_brb_ram_req * req)1309 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1310 			struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1311 {
1312 	u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1313 	u32 active_port_blocks, reg_offset = 0;
1314 	u8 port, active_ports = 0;
1315 
1316 	tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1317 					       BRB_BLOCK_SIZE);
1318 	min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1319 						BRB_BLOCK_SIZE);
1320 	total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1321 						    BRB_TOTAL_RAM_BLOCKS_BB;
1322 
1323 	/* Find number of active ports */
1324 	for (port = 0; port < MAX_NUM_PORTS; port++)
1325 		if (req->num_active_tcs[port])
1326 			active_ports++;
1327 
1328 	active_port_blocks = (u32)(total_blocks / active_ports);
1329 
1330 	for (port = 0; port < req->max_ports_per_engine; port++) {
1331 		u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1332 		u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1333 		u32 tc_guaranteed_blocks;
1334 		u8 tc;
1335 
1336 		/* Calculate per-port sizes */
1337 		tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1338 							 BRB_BLOCK_SIZE);
1339 		port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1340 							  0;
1341 		port_guaranteed_blocks = req->num_active_tcs[port] *
1342 					 tc_guaranteed_blocks;
1343 		port_shared_blocks = port_blocks - port_guaranteed_blocks;
1344 		full_xoff_th = req->num_active_tcs[port] *
1345 			       BRB_MIN_BLOCKS_PER_TC;
1346 		full_xon_th = full_xoff_th + min_pkt_size_blocks;
1347 		pause_xoff_th = tc_headroom_blocks;
1348 		pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1349 
1350 		/* Init total size per port */
1351 		ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1352 			 port_blocks);
1353 
1354 		/* Init shared size per port */
1355 		ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1356 			 port_shared_blocks);
1357 
1358 		for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1359 			/* Clear init values for non-active TCs */
1360 			if (tc == req->num_active_tcs[port]) {
1361 				tc_guaranteed_blocks = 0;
1362 				full_xoff_th = 0;
1363 				full_xon_th = 0;
1364 				pause_xoff_th = 0;
1365 				pause_xon_th = 0;
1366 			}
1367 
1368 			/* Init guaranteed size per TC */
1369 			ecore_wr(p_hwfn, p_ptt,
1370 				 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1371 				 tc_guaranteed_blocks);
1372 			ecore_wr(p_hwfn, p_ptt,
1373 				 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1374 				 BRB_HYST_BLOCKS);
1375 
1376 			/* Init pause/full thresholds per physical TC - for
1377 			 * loopback traffic.
1378 			 */
1379 			ecore_wr(p_hwfn, p_ptt,
1380 				 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1381 				 reg_offset, full_xoff_th);
1382 			ecore_wr(p_hwfn, p_ptt,
1383 				 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1384 				 reg_offset, full_xon_th);
1385 			ecore_wr(p_hwfn, p_ptt,
1386 				 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1387 				 reg_offset, pause_xoff_th);
1388 			ecore_wr(p_hwfn, p_ptt,
1389 				 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1390 				 reg_offset, pause_xon_th);
1391 
1392 			/* Init pause/full thresholds per physical TC - for
1393 			 * main traffic.
1394 			 */
1395 			ecore_wr(p_hwfn, p_ptt,
1396 				 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1397 				 reg_offset, full_xoff_th);
1398 			ecore_wr(p_hwfn, p_ptt,
1399 				 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1400 				 reg_offset, full_xon_th);
1401 			ecore_wr(p_hwfn, p_ptt,
1402 				 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1403 				 reg_offset, pause_xoff_th);
1404 			ecore_wr(p_hwfn, p_ptt,
1405 				 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1406 				 reg_offset, pause_xon_th);
1407 		}
1408 	}
1409 }
1410 
1411 #endif /* UNUSED_HSI_FUNC */
1412 #ifndef UNUSED_HSI_FUNC
1413 
1414 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size)		\
1415 	do {							\
1416 		u32 i;						\
1417 		for (i = 0; i < (arr_size); i++)		\
1418 			ecore_wr(dev, ptt, ((addr) + (4 * i)),	\
1419 				 ((u32 *)(arr))[i]);		\
1420 	} while (0)
1421 
1422 #ifndef DWORDS_TO_BYTES
1423 #define DWORDS_TO_BYTES(dwords)		((dwords) * REG_SIZE)
1424 #endif
1425 
1426 
1427 /**
1428  * @brief ecore_dmae_to_grc - is an internal function - writes from host to
1429  * wide-bus registers (split registers are not supported yet)
1430  *
1431  * @param p_hwfn -       HW device data
1432  * @param p_ptt -       ptt window used for writing the registers.
1433  * @param pData - pointer to source data.
1434  * @param addr - Destination register address.
1435  * @param len_in_dwords - data length in DWARDS (u32)
1436  */
ecore_dmae_to_grc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * pData,u32 addr,u32 len_in_dwords)1437 static int ecore_dmae_to_grc(struct ecore_hwfn *p_hwfn,
1438 			     struct ecore_ptt *p_ptt,
1439 			     u32 *pData,
1440 			     u32 addr,
1441 			     u32 len_in_dwords)
1442 {
1443 	struct dmae_params params;
1444 	bool read_using_dmae = false;
1445 
1446 	if (!pData)
1447 		return -1;
1448 
1449 	/* Set DMAE params */
1450 	OSAL_MEMSET(&params, 0, sizeof(params));
1451 
1452 	SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 1);
1453 
1454 	/* Execute DMAE command */
1455 	read_using_dmae = !ecore_dmae_host2grc(p_hwfn, p_ptt,
1456 					       (u64)(osal_uintptr_t)(pData),
1457 					       addr, len_in_dwords, &params);
1458 	if (!read_using_dmae)
1459 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
1460 			   "Failed writing to chip using DMAE, using GRC instead\n");
1461 
1462 	/* If not read using DMAE, read using GRC */
1463 	if (!read_using_dmae)
1464 		/* write to registers using GRC */
1465 		ARR_REG_WR(p_hwfn, p_ptt, addr, pData, len_in_dwords);
1466 
1467 	return len_in_dwords;
1468 }
1469 
1470 /* In MF, should be called once per port to set EtherType of OuterTag */
ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn * p_hwfn,u32 ethType)1471 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1472 {
1473 	/* Update DORQ register */
1474 	STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1475 }
1476 
1477 #endif /* UNUSED_HSI_FUNC */
1478 
1479 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1480 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1481 #define PRS_ETH_TUNN_OUTPUT_FORMAT        -188897008
1482 #define PRS_ETH_OUTPUT_FORMAT             -46832
1483 
ecore_set_vxlan_dest_port(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 dest_port)1484 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1485 			       struct ecore_ptt *p_ptt, u16 dest_port)
1486 {
1487 	/* Update PRS register */
1488 	ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1489 
1490 	/* Update NIG register */
1491 	ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1492 
1493 	/* Update PBF register */
1494 	ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1495 }
1496 
ecore_set_vxlan_enable(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool vxlan_enable)1497 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1498 			    struct ecore_ptt *p_ptt, bool vxlan_enable)
1499 {
1500 	u32 reg_val;
1501 
1502 	/* Update PRS register */
1503 	reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1504 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1505 			   PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1506 			   vxlan_enable);
1507 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1508 	if (reg_val) { /* TODO: handle E5 init */
1509 		reg_val = ecore_rd(p_hwfn, p_ptt,
1510 				   PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1511 
1512 		/* Update output  only if tunnel blocks not included. */
1513 		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1514 			ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1515 				 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1516 	}
1517 
1518 	/* Update NIG register */
1519 	reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1520 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1521 				   NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1522 				   vxlan_enable);
1523 	ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1524 
1525 	/* Update DORQ register */
1526 	ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1527 		 vxlan_enable ? 1 : 0);
1528 }
1529 
ecore_set_gre_enable(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool eth_gre_enable,bool ip_gre_enable)1530 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1531 			  struct ecore_ptt *p_ptt,
1532 			  bool eth_gre_enable, bool ip_gre_enable)
1533 {
1534 	u32 reg_val;
1535 
1536 	/* Update PRS register */
1537 	reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1538 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1539 		   PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1540 		   eth_gre_enable);
1541 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1542 		   PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1543 		   ip_gre_enable);
1544 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1545 	if (reg_val) { /* TODO: handle E5 init */
1546 		reg_val = ecore_rd(p_hwfn, p_ptt,
1547 				   PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1548 
1549 		/* Update output  only if tunnel blocks not included. */
1550 		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1551 			ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1552 				 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1553 	}
1554 
1555 	/* Update NIG register */
1556 	reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1557 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1558 		   NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1559 		   eth_gre_enable);
1560 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1561 		   NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1562 		   ip_gre_enable);
1563 	ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1564 
1565 	/* Update DORQ registers */
1566 	ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1567 		 eth_gre_enable ? 1 : 0);
1568 	ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1569 		 ip_gre_enable ? 1 : 0);
1570 }
1571 
ecore_set_geneve_dest_port(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 dest_port)1572 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1573 				struct ecore_ptt *p_ptt, u16 dest_port)
1574 {
1575 	/* Update PRS register */
1576 	ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1577 
1578 	/* Update NIG register */
1579 	ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1580 
1581 	/* Update PBF register */
1582 	ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1583 }
1584 
ecore_set_geneve_enable(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool eth_geneve_enable,bool ip_geneve_enable)1585 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1586 			     struct ecore_ptt *p_ptt,
1587 			     bool eth_geneve_enable, bool ip_geneve_enable)
1588 {
1589 	u32 reg_val;
1590 
1591 	/* Update PRS register */
1592 	reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1593 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1594 		   PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1595 		   eth_geneve_enable);
1596 	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1597 		   PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1598 		   ip_geneve_enable);
1599 	ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1600 	if (reg_val) { /* TODO: handle E5 init */
1601 		reg_val = ecore_rd(p_hwfn, p_ptt,
1602 				   PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1603 
1604 		/* Update output  only if tunnel blocks not included. */
1605 		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1606 			ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1607 				 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1608 	}
1609 
1610 	/* Update NIG register */
1611 	ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1612 		 eth_geneve_enable ? 1 : 0);
1613 	ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1614 		 ip_geneve_enable ? 1 : 0);
1615 
1616 	/* EDPM with geneve tunnel not supported in BB */
1617 	if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1618 		return;
1619 
1620 	/* Update DORQ registers */
1621 	ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1622 		 eth_geneve_enable ? 1 : 0);
1623 	ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1624 		 ip_geneve_enable ? 1 : 0);
1625 }
1626 
1627 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET      3
1628 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT   -925189872
1629 
ecore_set_vxlan_no_l2_enable(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool enable)1630 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1631 				  struct ecore_ptt *p_ptt,
1632 				  bool enable)
1633 {
1634 	u32 reg_val, cfg_mask;
1635 
1636 	/* read PRS config register */
1637 	reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1638 
1639 	/* set VXLAN_NO_L2_ENABLE mask */
1640 	cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1641 
1642 	if (enable) {
1643 		/* set VXLAN_NO_L2_ENABLE flag */
1644 		reg_val |= cfg_mask;
1645 
1646 		/* update PRS FIC Format register */
1647 		ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1648 		 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1649 		/* clear VXLAN_NO_L2_ENABLE flag */
1650 		reg_val &= ~cfg_mask;
1651 	}
1652 
1653 	/* write PRS config register */
1654 	ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1655 }
1656 
1657 #ifndef UNUSED_HSI_FUNC
1658 
1659 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
1660 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1661 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1662 #define PARSER_ETH_CONN_CM_HDR 0
1663 #define CAM_LINE_SIZE sizeof(u32)
1664 #define RAM_LINE_SIZE sizeof(u64)
1665 #define REG_SIZE sizeof(u32)
1666 
ecore_gft_disable(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 pf_id)1667 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1668 		       struct ecore_ptt *p_ptt,
1669 		       u16 pf_id)
1670 {
1671 	struct regpair ram_line;
1672 	OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
1673 
1674 	/* disable gft search for PF */
1675 	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1676 
1677 	/* Clean ram & cam for next gft session*/
1678 
1679 	/* Zero camline */
1680 	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1681 
1682 	/* Zero ramline */
1683 	ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
1684 			  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1685 			  sizeof(ram_line) / REG_SIZE);
1686 
1687 }
1688 
1689 
ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1690 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1691 				   struct ecore_ptt *p_ptt)
1692 {
1693 	u32 rfs_cm_hdr_event_id;
1694 
1695 	/* Set RFS event ID to be awakened i Tstorm By Prs */
1696 	rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1697 	rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1698 	    PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1699 	rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1700 	    PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1701 	ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1702 }
1703 
ecore_gft_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 pf_id,bool tcp,bool udp,bool ipv4,bool ipv6,enum gft_profile_type profile_type)1704 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1705 			       struct ecore_ptt *p_ptt,
1706 			       u16 pf_id,
1707 			       bool tcp,
1708 			       bool udp,
1709 			       bool ipv4,
1710 			       bool ipv6,
1711 			       enum gft_profile_type profile_type)
1712 {
1713 	u32 reg_val, cam_line, search_non_ip_as_gft;
1714 	struct regpair ram_line = { 0 };
1715 
1716 	if (!ipv6 && !ipv4)
1717 		DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1718 	if (!tcp && !udp)
1719 		DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1720 	if (profile_type >= MAX_GFT_PROFILE_TYPE)
1721 		DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1722 
1723 	/* Set RFS event ID to be awakened i Tstorm By Prs */
1724 	reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1725 		  PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1726 	reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1727 	ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1728 
1729 	/* Do not load context only cid in PRS on match. */
1730 	ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1731 
1732 	/* Do not use tenant ID exist bit for gft search*/
1733 	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1734 
1735 	/* Set Cam */
1736 	cam_line = 0;
1737 	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1738 
1739 	/* Filters are per PF!! */
1740 	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1741 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1742 	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1743 
1744 	if (!(tcp && udp)) {
1745 		SET_FIELD(cam_line,
1746 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1747 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1748 		if (tcp)
1749 			SET_FIELD(cam_line,
1750 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1751 				  GFT_PROFILE_TCP_PROTOCOL);
1752 		else
1753 			SET_FIELD(cam_line,
1754 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1755 				  GFT_PROFILE_UDP_PROTOCOL);
1756 	}
1757 
1758 	if (!(ipv4 && ipv6)) {
1759 		SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1760 		if (ipv4)
1761 			SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1762 				  GFT_PROFILE_IPV4);
1763 		else
1764 			SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1765 				  GFT_PROFILE_IPV6);
1766 	}
1767 
1768 	/* Write characteristics to cam */
1769 	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1770 		 cam_line);
1771 	cam_line = ecore_rd(p_hwfn, p_ptt,
1772 			    PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1773 
1774 	/* Write line to RAM - compare to filter 4 tuple */
1775 
1776 	/* Search no IP as GFT */
1777 	search_non_ip_as_gft = 0;
1778 
1779 	/* Tunnel type */
1780 	SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1781 	SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1782 
1783 	if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1784 		SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
1785 		SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
1786 		SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1787 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1788 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
1789 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
1790 	} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1791 		SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1792 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1793 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
1794 	} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1795 		SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
1796 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1797 	} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1798 		SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
1799 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1800 	} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1801 		SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1802 
1803 		/* Allow tunneled traffic without inner IP */
1804 		search_non_ip_as_gft = 1;
1805 	}
1806 
1807 	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
1808 		 search_non_ip_as_gft);
1809 	ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
1810 			  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1811 			  sizeof(ram_line) / REG_SIZE);
1812 
1813 	/* Set default profile so that no filter match will happen */
1814 	ram_line.lo = 0xffffffff;
1815 	ram_line.hi = 0x3ff;
1816 	ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
1817 			  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1818 			  PRS_GFT_CAM_LINES_NO_MATCH,
1819 			  sizeof(ram_line) / REG_SIZE);
1820 
1821 	/* Enable gft search */
1822 	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1823 }
1824 
1825 
1826 #endif /* UNUSED_HSI_FUNC */
1827 
1828 /* Configure VF zone size mode */
ecore_config_vf_zone_size_mode(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 mode,bool runtime_init)1829 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1830 				    struct ecore_ptt *p_ptt, u16 mode,
1831 				    bool runtime_init)
1832 {
1833 	u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1834 	u32 msdm_vf_offset_mask;
1835 
1836 	if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1837 		msdm_vf_size_log += 1;
1838 	else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1839 		msdm_vf_size_log += 2;
1840 
1841 	msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1842 
1843 	if (runtime_init) {
1844 		STORE_RT_REG(p_hwfn,
1845 			     PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1846 			     msdm_vf_size_log);
1847 		STORE_RT_REG(p_hwfn,
1848 			     PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1849 			     msdm_vf_offset_mask);
1850 	} else {
1851 		ecore_wr(p_hwfn, p_ptt,
1852 			 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1853 		ecore_wr(p_hwfn, p_ptt,
1854 			 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1855 	}
1856 }
1857 
1858 /* Get mstorm statistics for offset by VF zone size mode */
ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn * p_hwfn,u16 stat_cnt_id,u16 vf_zone_size_mode)1859 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1860 				       u16 stat_cnt_id,
1861 				       u16 vf_zone_size_mode)
1862 {
1863 	u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1864 
1865 	if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1866 	    (stat_cnt_id > MAX_NUM_PFS)) {
1867 		if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1868 			offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1869 			    (stat_cnt_id - MAX_NUM_PFS);
1870 		else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1871 			offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1872 			    (stat_cnt_id - MAX_NUM_PFS);
1873 	}
1874 
1875 	return offset;
1876 }
1877 
1878 /* Get mstorm VF producer offset by VF zone size mode */
ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn * p_hwfn,u8 vf_id,u8 vf_queue_id,u16 vf_zone_size_mode)1879 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1880 					 u8 vf_id,
1881 					 u8 vf_queue_id,
1882 					 u16 vf_zone_size_mode)
1883 {
1884 	u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1885 
1886 	if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1887 		if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1888 			offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1889 				   vf_id;
1890 		else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1891 			offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1892 				  vf_id;
1893 	}
1894 
1895 	return offset;
1896 }
1897 
1898 #ifndef LINUX_REMOVE
1899 #define CRC8_INIT_VALUE 0xFF
1900 #endif
1901 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1902 
1903 /* Calculate and return CDU validation byte per connection type / region /
1904  * cid
1905  */
ecore_calc_cdu_validation_byte(struct ecore_hwfn * p_hwfn,u8 conn_type,u8 region,u32 cid)1906 static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
1907 					 u8 conn_type, u8 region, u32 cid)
1908 {
1909 	static u8 crc8_table_valid;	/*automatically initialized to 0*/
1910 	u8 crc, validation_byte = 0;
1911 	u32 validation_string = 0;
1912 	u32 data_to_crc;
1913 
1914 	if (crc8_table_valid == 0) {
1915 		OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1916 		crc8_table_valid = 1;
1917 	}
1918 
1919 	/*
1920 	 * The CRC is calculated on the String-to-compress:
1921 	 * [31:8]  = {CID[31:20],CID[11:0]}
1922 	 * [7:4]   = Region
1923 	 * [3:0]   = Type
1924 	 */
1925 #if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
1926 	CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1927 	validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1928 #endif
1929 
1930 #if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
1931 	CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1932 	validation_string |= ((region & 0xF) << 4);
1933 #endif
1934 
1935 #if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
1936 	CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1937 	validation_string |= (conn_type & 0xF);
1938 #endif
1939 	/* Convert to big-endian and calculate CRC8*/
1940 	data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1941 
1942 	crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1943 			CRC8_INIT_VALUE);
1944 
1945 	/* The validation byte [7:0] is composed:
1946 	 * for type A validation
1947 	 * [7]		= active configuration bit
1948 	 * [6:0]	= crc[6:0]
1949 	 *
1950 	 * for type B validation
1951 	 * [7]		= active configuration bit
1952 	 * [6:3]	= connection_type[3:0]
1953 	 * [2:0]	= crc[2:0]
1954 	 */
1955 	validation_byte |= ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >>
1956 			     CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1957 
1958 #if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
1959 	CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1960 	validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1961 #else
1962 	validation_byte |= crc & 0x7F;
1963 #endif
1964 	return validation_byte;
1965 }
1966 
1967 /* Calcualte and set validation bytes for session context */
ecore_calc_session_ctx_validation(struct ecore_hwfn * p_hwfn,void * p_ctx_mem,u16 ctx_size,u8 ctx_type,u32 cid)1968 void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
1969 				       void *p_ctx_mem, u16 ctx_size,
1970 				       u8 ctx_type, u32 cid)
1971 {
1972 	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1973 
1974 	p_ctx = (u8 *)p_ctx_mem;
1975 
1976 	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1977 	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1978 	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1979 
1980 	OSAL_MEMSET(p_ctx, 0, ctx_size);
1981 
1982 	*x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
1983 	*t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
1984 	*u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
1985 }
1986 
1987 /* Calcualte and set validation bytes for task context */
ecore_calc_task_ctx_validation(struct ecore_hwfn * p_hwfn,void * p_ctx_mem,u16 ctx_size,u8 ctx_type,u32 tid)1988 void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
1989 				    u16 ctx_size, u8 ctx_type, u32 tid)
1990 {
1991 	u8 *p_ctx, *region1_val_ptr;
1992 
1993 	p_ctx = (u8 *)p_ctx_mem;
1994 	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1995 
1996 	OSAL_MEMSET(p_ctx, 0, ctx_size);
1997 
1998 	*region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 1,
1999 							  tid);
2000 }
2001 
2002 /* Memset session context to 0 while preserving validation bytes */
ecore_memset_session_ctx(struct ecore_hwfn * p_hwfn,void * p_ctx_mem,u32 ctx_size,u8 ctx_type)2003 void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
2004 			      u32 ctx_size, u8 ctx_type)
2005 {
2006 	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
2007 	u8 x_val, t_val, u_val;
2008 
2009 	p_ctx = (u8 *)p_ctx_mem;
2010 
2011 	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
2012 	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
2013 	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
2014 
2015 	x_val = *x_val_ptr;
2016 	t_val = *t_val_ptr;
2017 	u_val = *u_val_ptr;
2018 
2019 	OSAL_MEMSET(p_ctx, 0, ctx_size);
2020 
2021 	*x_val_ptr = x_val;
2022 	*t_val_ptr = t_val;
2023 	*u_val_ptr = u_val;
2024 }
2025 
2026 /* Memset task context to 0 while preserving validation bytes */
ecore_memset_task_ctx(struct ecore_hwfn * p_hwfn,void * p_ctx_mem,u32 ctx_size,u8 ctx_type)2027 void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
2028 			   u32 ctx_size, u8 ctx_type)
2029 {
2030 	u8 *p_ctx, *region1_val_ptr;
2031 	u8 region1_val;
2032 
2033 	p_ctx = (u8 *)p_ctx_mem;
2034 	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
2035 
2036 	region1_val = *region1_val_ptr;
2037 
2038 	OSAL_MEMSET(p_ctx, 0, ctx_size);
2039 
2040 	*region1_val_ptr = region1_val;
2041 }
2042 
2043 /* Enable and configure context validation */
ecore_enable_context_validation(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2044 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
2045 				     struct ecore_ptt *p_ptt)
2046 {
2047 	u32 ctx_validation;
2048 
2049 	/* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
2050 	ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 24;
2051 	ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
2052 
2053 	/* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
2054 	ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8;
2055 	ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
2056 
2057 	/* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
2058 	ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8;
2059 	ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
2060 }
2061 
2062 #define PHYS_ADDR_DWORDS        DIV_ROUND_UP(sizeof(dma_addr_t), 4)
2063 #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
2064 
ecore_get_overlay_addr_ram_addr(struct ecore_hwfn * p_hwfn,u8 storm_id)2065 static u32 ecore_get_overlay_addr_ram_addr(struct ecore_hwfn *p_hwfn,
2066 					   u8 storm_id)
2067 {
2068 	switch (storm_id) {
2069 	case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2070 			TSTORM_OVERLAY_BUF_ADDR_OFFSET;
2071 	case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2072 			MSTORM_OVERLAY_BUF_ADDR_OFFSET;
2073 	case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2074 			USTORM_OVERLAY_BUF_ADDR_OFFSET;
2075 	case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2076 			XSTORM_OVERLAY_BUF_ADDR_OFFSET;
2077 	case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2078 			YSTORM_OVERLAY_BUF_ADDR_OFFSET;
2079 	case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
2080 			PSTORM_OVERLAY_BUF_ADDR_OFFSET;
2081 
2082 	default: return 0;
2083 	}
2084 }
2085 
ecore_fw_overlay_mem_alloc(struct ecore_hwfn * p_hwfn,const u32 * const fw_overlay_in_buf,u32 buf_size_in_bytes)2086 struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn,
2087 					 const u32 *const fw_overlay_in_buf,
2088 					 u32 buf_size_in_bytes)
2089 {
2090 	u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
2091 	struct phys_mem_desc *allocated_mem;
2092 
2093 	if (!buf_size)
2094 		return OSAL_NULL;
2095 
2096 	allocated_mem = (struct phys_mem_desc *)OSAL_ZALLOC(p_hwfn->p_dev,
2097 							    GFP_KERNEL,
2098 							    NUM_STORMS *
2099 						  sizeof(struct phys_mem_desc));
2100 	if (!allocated_mem)
2101 		return OSAL_NULL;
2102 
2103 	OSAL_MEMSET(allocated_mem, 0, NUM_STORMS *
2104 		    sizeof(struct phys_mem_desc));
2105 
2106 	/* For each Storm, set physical address in RAM */
2107 	while (buf_offset < buf_size) {
2108 		struct phys_mem_desc *storm_mem_desc;
2109 		struct fw_overlay_buf_hdr *hdr;
2110 		u32 storm_buf_size;
2111 		u8 storm_id;
2112 
2113 		hdr =
2114 		    (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
2115 		storm_buf_size = GET_FIELD(hdr->data,
2116 					   FW_OVERLAY_BUF_HDR_BUF_SIZE);
2117 		storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
2118 		storm_mem_desc = allocated_mem + storm_id;
2119 		storm_mem_desc->size = storm_buf_size * sizeof(u32);
2120 
2121 		/* Allocate physical memory for Storm's overlays buffer */
2122 		storm_mem_desc->virt_addr =
2123 			OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
2124 						&storm_mem_desc->phys_addr,
2125 						storm_mem_desc->size);
2126 		if (!storm_mem_desc->virt_addr)
2127 			break;
2128 
2129 		/* Skip overlays buffer header */
2130 		buf_offset += OVERLAY_HDR_SIZE_DWORDS;
2131 
2132 		/* Copy Storm's overlays buffer to allocated memory */
2133 		OSAL_MEMCPY(storm_mem_desc->virt_addr,
2134 			    &fw_overlay_in_buf[buf_offset],
2135 			    storm_mem_desc->size);
2136 
2137 		/* Advance to next Storm */
2138 		buf_offset += storm_buf_size;
2139 	}
2140 
2141 	/* If memory allocation has failed, free all allocated memory */
2142 	if (buf_offset < buf_size) {
2143 		ecore_fw_overlay_mem_free(p_hwfn, allocated_mem);
2144 		return OSAL_NULL;
2145 	}
2146 
2147 	return allocated_mem;
2148 }
2149 
ecore_fw_overlay_init_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct phys_mem_desc * fw_overlay_mem)2150 void ecore_fw_overlay_init_ram(struct ecore_hwfn *p_hwfn,
2151 			       struct ecore_ptt *p_ptt,
2152 			       struct phys_mem_desc *fw_overlay_mem)
2153 {
2154 	u8 storm_id;
2155 
2156 	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
2157 		struct phys_mem_desc *storm_mem_desc =
2158 			      (struct phys_mem_desc *)fw_overlay_mem + storm_id;
2159 		u32 ram_addr, i;
2160 
2161 		/* Skip Storms with no FW overlays */
2162 		if (!storm_mem_desc->virt_addr)
2163 			continue;
2164 
2165 		/* Calculate overlay RAM GRC address of current PF */
2166 		ram_addr = ecore_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
2167 			   sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
2168 
2169 		/* Write Storm's overlay physical address to RAM */
2170 		for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
2171 			ecore_wr(p_hwfn, p_ptt, ram_addr,
2172 				 ((u32 *)&storm_mem_desc->phys_addr)[i]);
2173 	}
2174 }
2175 
ecore_fw_overlay_mem_free(struct ecore_hwfn * p_hwfn,struct phys_mem_desc * fw_overlay_mem)2176 void ecore_fw_overlay_mem_free(struct ecore_hwfn *p_hwfn,
2177 			       struct phys_mem_desc *fw_overlay_mem)
2178 {
2179 	u8 storm_id;
2180 
2181 	if (!fw_overlay_mem)
2182 		return;
2183 
2184 	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
2185 		struct phys_mem_desc *storm_mem_desc =
2186 			      (struct phys_mem_desc *)fw_overlay_mem + storm_id;
2187 
2188 		/* Free Storm's physical memory */
2189 		if (storm_mem_desc->virt_addr)
2190 			OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2191 					       storm_mem_desc->virt_addr,
2192 					       storm_mem_desc->phys_addr,
2193 					       storm_mem_desc->size);
2194 	}
2195 
2196 	/* Free allocated virtual memory */
2197 	OSAL_FREE(p_hwfn->p_dev, fw_overlay_mem);
2198 }
2199