xref: /dpdk/drivers/net/qede/base/ecore_cxt.c (revision 2352f348c997a34549c71c99029fb3d214aad39a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include "bcm_osal.h"
8 #include "reg_addr.h"
9 #include "common_hsi.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore_hsi_eth.h"
12 #include "ecore_rt_defs.h"
13 #include "ecore_status.h"
14 #include "ecore.h"
15 #include "ecore_init_ops.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
18 #include "ecore_hw.h"
19 #include "ecore_dev_api.h"
20 #include "ecore_sriov.h"
21 #include "ecore_mcp.h"
22 
23 /* Doorbell-Queue constants */
24 #define DQ_RANGE_SHIFT	4
25 #define DQ_RANGE_ALIGN	(1 << DQ_RANGE_SHIFT)
26 
27 /* Searcher constants */
28 #define SRC_MIN_NUM_ELEMS 256
29 
30 /* GFS constants */
31 #define RGFS_MIN_NUM_ELEMS	256
32 #define TGFS_MIN_NUM_ELEMS	256
33 
34 /* Timers constants */
35 #define TM_SHIFT	7
36 #define TM_ALIGN	(1 << TM_SHIFT)
37 #define TM_ELEM_SIZE	4
38 
39 /* ILT constants */
40 #define ILT_DEFAULT_HW_P_SIZE	4
41 
42 #define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
43 #define ILT_CFG_REG(cli, reg)		PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
44 
45 /* ILT entry structure */
46 #define ILT_ENTRY_PHY_ADDR_MASK		0x000FFFFFFFFFFFULL
47 #define ILT_ENTRY_PHY_ADDR_SHIFT	0
48 #define ILT_ENTRY_VALID_MASK		0x1ULL
49 #define ILT_ENTRY_VALID_SHIFT		52
50 #define ILT_ENTRY_IN_REGS		2
51 #define ILT_REG_SIZE_IN_BYTES		4
52 
53 /* connection context union */
54 union conn_context {
55 	struct core_conn_context core_ctx;
56 	struct eth_conn_context eth_ctx;
57 };
58 
59 /* TYPE-0 task context - iSCSI, FCOE */
60 union type0_task_context {
61 };
62 
63 /* TYPE-1 task context - ROCE */
64 union type1_task_context {
65 	struct regpair reserved; /* @DPDK */
66 };
67 
68 struct src_ent {
69 	u8 opaque[56];
70 	u64 next;
71 };
72 
73 #define CDUT_SEG_ALIGNMET 3	/* in 4k chunks */
74 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
75 
76 #define CONN_CXT_SIZE(p_hwfn) \
77 	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
78 
79 #define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
80 
81 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
82 	ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
83 
84 /* Alignment is inherent to the type1_task_context structure */
85 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
86 
tm_cid_proto(enum protocol_type type)87 static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
88 {
89 	return type == PROTOCOLID_TOE;
90 }
91 
tm_tid_proto(enum protocol_type type)92 static bool tm_tid_proto(enum protocol_type type)
93 {
94 	return type == PROTOCOLID_FCOE;
95 }
96 
97 /* counts the iids for the CDU/CDUC ILT client configuration */
98 struct ecore_cdu_iids {
99 	u32 pf_cids;
100 	u32 per_vf_cids;
101 };
102 
ecore_cxt_cdu_iids(struct ecore_cxt_mngr * p_mngr,struct ecore_cdu_iids * iids)103 static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
104 			       struct ecore_cdu_iids *iids)
105 {
106 	u32 type;
107 
108 	for (type = 0; type < MAX_CONN_TYPES; type++) {
109 		iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
110 		iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
111 	}
112 }
113 
114 /* counts the iids for the Searcher block configuration */
115 struct ecore_src_iids {
116 	u32 pf_cids;
117 	u32 per_vf_cids;
118 };
119 
ecore_cxt_src_iids(struct ecore_cxt_mngr * p_mngr,struct ecore_src_iids * iids)120 static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
121 			       struct ecore_src_iids *iids)
122 {
123 	u32 i;
124 
125 	for (i = 0; i < MAX_CONN_TYPES; i++) {
126 		iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
127 		iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
128 	}
129 
130 	/* Add L2 filtering filters in addition */
131 	iids->pf_cids += p_mngr->arfs_count;
132 }
133 
134 /* counts the iids for the Timers block configuration */
135 struct ecore_tm_iids {
136 	u32 pf_cids;
137 	u32 pf_tids[NUM_TASK_PF_SEGMENTS];	/* per segment */
138 	u32 pf_tids_total;
139 	u32 per_vf_cids;
140 	u32 per_vf_tids;
141 };
142 
ecore_cxt_tm_iids(struct ecore_hwfn * p_hwfn,struct ecore_cxt_mngr * p_mngr,struct ecore_tm_iids * iids)143 static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
144 			      struct ecore_cxt_mngr *p_mngr,
145 			      struct ecore_tm_iids *iids)
146 {
147 	struct ecore_conn_type_cfg *p_cfg;
148 	bool tm_vf_required = false;
149 	bool tm_required = false;
150 	u32 i, j;
151 
152 	for (i = 0; i < MAX_CONN_TYPES; i++) {
153 		p_cfg = &p_mngr->conn_cfg[i];
154 
155 		if (tm_cid_proto(i) || tm_required) {
156 			if (p_cfg->cid_count)
157 				tm_required = true;
158 
159 			iids->pf_cids += p_cfg->cid_count;
160 		}
161 
162 		if (tm_cid_proto(i) || tm_vf_required) {
163 			if (p_cfg->cids_per_vf)
164 				tm_vf_required = true;
165 
166 		}
167 
168 		if (tm_tid_proto(i)) {
169 			struct ecore_tid_seg *segs = p_cfg->tid_seg;
170 
171 			/* for each segment there is at most one
172 			 * protocol for which count is not 0.
173 			 */
174 			for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
175 				iids->pf_tids[j] += segs[j].count;
176 
177 			/* The last array elelment is for the VFs. As for PF
178 			 * segments there can be only one protocol for
179 			 * which this value is not 0.
180 			 */
181 			iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
182 		}
183 	}
184 
185 	iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
186 	iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
187 	iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
188 
189 	for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
190 		iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
191 		iids->pf_tids_total += iids->pf_tids[j];
192 	}
193 }
194 
ecore_cxt_qm_iids(struct ecore_hwfn * p_hwfn,struct ecore_qm_iids * iids)195 static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
196 			      struct ecore_qm_iids *iids)
197 {
198 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
199 	struct ecore_tid_seg *segs;
200 	u32 vf_cids = 0, type, j;
201 	u32 vf_tids = 0;
202 
203 	for (type = 0; type < MAX_CONN_TYPES; type++) {
204 		iids->cids += p_mngr->conn_cfg[type].cid_count;
205 		vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
206 
207 		segs = p_mngr->conn_cfg[type].tid_seg;
208 		/* for each segment there is at most one
209 		 * protocol for which count is not 0.
210 		 */
211 		for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
212 			iids->tids += segs[j].count;
213 
214 		/* The last array elelment is for the VFs. As for PF
215 		 * segments there can be only one protocol for
216 		 * which this value is not 0.
217 		 */
218 		vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
219 	}
220 
221 	iids->vf_cids += vf_cids * p_mngr->vf_count;
222 	iids->tids += vf_tids * p_mngr->vf_count;
223 
224 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
225 		   "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
226 		   iids->cids, iids->vf_cids, iids->tids, vf_tids);
227 }
228 
ecore_cxt_tid_seg_info(struct ecore_hwfn * p_hwfn,u32 seg)229 static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
230 						    u32 seg)
231 {
232 	struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
233 	u32 i;
234 
235 	/* Find the protocol with tid count > 0 for this segment.
236 	 * Note: there can only be one and this is already validated.
237 	 */
238 	for (i = 0; i < MAX_CONN_TYPES; i++) {
239 		if (p_cfg->conn_cfg[i].tid_seg[seg].count)
240 			return &p_cfg->conn_cfg[i].tid_seg[seg];
241 	}
242 	return OSAL_NULL;
243 }
244 
ecore_cxt_set_srq_count(struct ecore_hwfn * p_hwfn,u32 num_srqs)245 static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
246 {
247 	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
248 
249 	p_mgr->srq_count = num_srqs;
250 }
251 
ecore_cxt_get_srq_count(struct ecore_hwfn * p_hwfn)252 u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
253 {
254 	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
255 
256 	return p_mgr->srq_count;
257 }
258 
259 /* set the iids (cid/tid) count per protocol */
ecore_cxt_set_proto_cid_count(struct ecore_hwfn * p_hwfn,enum protocol_type type,u32 cid_count,u32 vf_cid_cnt)260 static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
261 				   enum protocol_type type,
262 				   u32 cid_count, u32 vf_cid_cnt)
263 {
264 	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
265 	struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
266 
267 	p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
268 	p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
269 }
270 
ecore_cxt_get_proto_cid_count(struct ecore_hwfn * p_hwfn,enum protocol_type type,u32 * vf_cid)271 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
272 				  enum protocol_type type, u32 *vf_cid)
273 {
274 	if (vf_cid)
275 		*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
276 
277 	return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
278 }
279 
ecore_cxt_get_proto_cid_start(struct ecore_hwfn * p_hwfn,enum protocol_type type)280 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
281 				  enum protocol_type type)
282 {
283 	return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
284 }
285 
ecore_cxt_get_proto_tid_count(struct ecore_hwfn * p_hwfn,enum protocol_type type)286 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
287 					 enum protocol_type type)
288 {
289 	u32 cnt = 0;
290 	int i;
291 
292 	for (i = 0; i < TASK_SEGMENTS; i++)
293 		cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
294 
295 	return cnt;
296 }
297 
298 static OSAL_INLINE void
ecore_cxt_set_proto_tid_count(struct ecore_hwfn * p_hwfn,enum protocol_type proto,u8 seg,u8 seg_type,u32 count,bool has_fl)299 ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
300 			      enum protocol_type proto,
301 			      u8 seg, u8 seg_type, u32 count, bool has_fl)
302 {
303 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
304 	struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
305 
306 	p_seg->count = count;
307 	p_seg->has_fl_mem = has_fl;
308 	p_seg->type = seg_type;
309 }
310 
311 /* the *p_line parameter must be either 0 for the first invocation or the
312  * value returned in the previous invocation.
313  */
ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg * p_cli,struct ecore_ilt_cli_blk * p_blk,u32 start_line,u32 total_size,u32 elem_size)314 static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
315 				   struct ecore_ilt_cli_blk *p_blk,
316 				   u32 start_line,
317 				   u32 total_size, u32 elem_size)
318 {
319 	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
320 
321 	/* verify that it's called once for each block */
322 	if (p_blk->total_size)
323 		return;
324 
325 	p_blk->total_size = total_size;
326 	p_blk->real_size_in_page = 0;
327 	if (elem_size)
328 		p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
329 	p_blk->start_line = start_line;
330 }
331 
ecore_ilt_cli_adv_line(struct ecore_hwfn * p_hwfn,struct ecore_ilt_client_cfg * p_cli,struct ecore_ilt_cli_blk * p_blk,u32 * p_line,enum ilt_clients client_id)332 static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
333 				   struct ecore_ilt_client_cfg *p_cli,
334 				   struct ecore_ilt_cli_blk *p_blk,
335 				   u32 *p_line, enum ilt_clients client_id)
336 {
337 	if (!p_blk->total_size)
338 		return;
339 
340 	if (!p_cli->active)
341 		p_cli->first.val = *p_line;
342 
343 	p_cli->active = true;
344 	*p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
345 	p_cli->last.val = *p_line - 1;
346 
347 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
348 		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
349 		   " [Real %08x] Start line %d\n",
350 		   client_id, p_cli->first.val, p_cli->last.val,
351 		   p_blk->total_size, p_blk->real_size_in_page,
352 		   p_blk->start_line);
353 }
354 
ecore_ilt_get_dynamic_line_range(struct ecore_hwfn * p_hwfn,enum ilt_clients ilt_client,u32 * dynamic_line_offset,u32 * dynamic_line_cnt)355 static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
356 					     enum ilt_clients ilt_client,
357 					     u32 *dynamic_line_offset,
358 					     u32 *dynamic_line_cnt)
359 {
360 	struct ecore_ilt_client_cfg *p_cli;
361 	struct ecore_conn_type_cfg *p_cfg;
362 	u32 cxts_per_p;
363 
364 	/* TBD MK: ILT code should be simplified once PROTO enum is changed */
365 
366 	*dynamic_line_offset = 0;
367 	*dynamic_line_cnt = 0;
368 
369 	if (ilt_client == ILT_CLI_CDUC) {
370 		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
371 		p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE];
372 
373 		cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
374 		    (u32)CONN_CXT_SIZE(p_hwfn);
375 
376 		*dynamic_line_cnt = p_cfg->cid_count / cxts_per_p;
377 	}
378 }
379 
380 static struct ecore_ilt_client_cfg *
ecore_cxt_set_cli(struct ecore_ilt_client_cfg * p_cli)381 ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli)
382 {
383 	p_cli->active = false;
384 	p_cli->first.val = 0;
385 	p_cli->last.val = 0;
386 	return p_cli;
387 }
388 
389 static struct ecore_ilt_cli_blk *
ecore_cxt_set_blk(struct ecore_ilt_cli_blk * p_blk)390 ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)
391 {
392 	p_blk->total_size = 0;
393 	return p_blk;
394 	}
395 
396 static u32
ecore_cxt_src_elements(struct ecore_cxt_mngr * p_mngr)397 ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
398 {
399 	struct ecore_src_iids src_iids;
400 	u32 elem_num = 0;
401 
402 	OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
403 	ecore_cxt_src_iids(p_mngr, &src_iids);
404 
405 	/* Both the PF and VFs searcher connections are stored in the per PF
406 	 * database. Thus sum the PF searcher cids and all the VFs searcher
407 	 * cids.
408 	 */
409 	elem_num = src_iids.pf_cids +
410 		   src_iids.per_vf_cids * p_mngr->vf_count;
411 	if (elem_num == 0)
412 		return elem_num;
413 
414 	elem_num = OSAL_MAX_T(u32, elem_num, SRC_MIN_NUM_ELEMS);
415 	elem_num = OSAL_ROUNDUP_POW_OF_TWO(elem_num);
416 
417 	return elem_num;
418 }
419 
ecore_cxt_cfg_ilt_compute(struct ecore_hwfn * p_hwfn)420 enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
421 {
422 	u32 curr_line, total, i, task_size, line, total_size, elem_size;
423 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
424 	struct ecore_ilt_client_cfg *p_cli;
425 	struct ecore_ilt_cli_blk *p_blk;
426 	struct ecore_cdu_iids cdu_iids;
427 	struct ecore_qm_iids qm_iids;
428 	struct ecore_tm_iids tm_iids;
429 	struct ecore_tid_seg *p_seg;
430 
431 	OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
432 	OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
433 	OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
434 
435 	p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
436 
437 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
438 		   "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
439 		   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
440 
441 	/* CDUC */
442 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
443 
444 	curr_line = p_mngr->pf_start_line;
445 
446 	/* CDUC PF */
447 	p_cli->pf_total_lines = 0;
448 
449 	/* get the counters for the CDUC,CDUC and QM clients  */
450 	ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
451 
452 	p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
453 
454 	total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
455 
456 	ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
457 			       total, CONN_CXT_SIZE(p_hwfn));
458 
459 	ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
460 	p_cli->pf_total_lines = curr_line - p_blk->start_line;
461 
462 	ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
463 					 &p_blk->dynamic_line_offset,
464 					 &p_blk->dynamic_line_cnt);
465 
466 	/* CDUC VF */
467 	p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
468 	total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
469 
470 	ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
471 			       total, CONN_CXT_SIZE(p_hwfn));
472 
473 	ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
474 	p_cli->vf_total_lines = curr_line - p_blk->start_line;
475 
476 	for (i = 1; i < p_mngr->vf_count; i++)
477 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
478 				       ILT_CLI_CDUC);
479 
480 	/* CDUT PF */
481 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
482 	p_cli->first.val = curr_line;
483 
484 	/* first the 'working' task memory */
485 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
486 		p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
487 		if (!p_seg || p_seg->count == 0)
488 			continue;
489 
490 		p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
491 		total = p_seg->count * p_mngr->task_type_size[p_seg->type];
492 		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
493 				       p_mngr->task_type_size[p_seg->type]);
494 
495 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
496 				       ILT_CLI_CDUT);
497 	}
498 
499 	/* next the 'init' task memory (forced load memory) */
500 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
501 		p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
502 		if (!p_seg || p_seg->count == 0)
503 			continue;
504 
505 		p_blk =
506 		     ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
507 
508 		if (!p_seg->has_fl_mem) {
509 			/* The segment is active (total size pf 'working'
510 			 * memory is > 0) but has no FL (forced-load, Init)
511 			 * memory. Thus:
512 			 *
513 			 * 1.   The total-size in the corrsponding FL block of
514 			 *      the ILT client is set to 0 - No ILT line are
515 			 *      provisioned and no ILT memory allocated.
516 			 *
517 			 * 2.   The start-line of said block is set to the
518 			 *      start line of the matching working memory
519 			 *      block in the ILT client. This is later used to
520 			 *      configure the CDU segment offset registers and
521 			 *      results in an FL command for TIDs of this
522 			 *      segment behaves as regular load commands
523 			 *      (loading TIDs from the working memory).
524 			 */
525 			line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
526 
527 			ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
528 			continue;
529 		}
530 		total = p_seg->count * p_mngr->task_type_size[p_seg->type];
531 
532 		ecore_ilt_cli_blk_fill(p_cli, p_blk,
533 				       curr_line, total,
534 				       p_mngr->task_type_size[p_seg->type]);
535 
536 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
537 				       ILT_CLI_CDUT);
538 	}
539 	p_cli->pf_total_lines = curr_line - p_cli->first.val;
540 
541 	/* CDUT VF */
542 	p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
543 	if (p_seg && p_seg->count) {
544 		/* Stricly speaking we need to iterate over all VF
545 		 * task segment types, but a VF has only 1 segment
546 		 */
547 
548 		/* 'working' memory */
549 		total = p_seg->count * p_mngr->task_type_size[p_seg->type];
550 
551 		p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
552 		ecore_ilt_cli_blk_fill(p_cli, p_blk,
553 				       curr_line, total,
554 				       p_mngr->task_type_size[p_seg->type]);
555 
556 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
557 				       ILT_CLI_CDUT);
558 
559 		/* 'init' memory */
560 		p_blk =
561 		     ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
562 		if (!p_seg->has_fl_mem) {
563 			/* see comment above */
564 			line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
565 			ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
566 		} else {
567 			task_size = p_mngr->task_type_size[p_seg->type];
568 			ecore_ilt_cli_blk_fill(p_cli, p_blk,
569 					       curr_line, total, task_size);
570 			ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
571 					       ILT_CLI_CDUT);
572 		}
573 		p_cli->vf_total_lines = curr_line - (p_cli->first.val +
574 						     p_cli->pf_total_lines);
575 
576 		/* Now for the rest of the VFs */
577 		for (i = 1; i < p_mngr->vf_count; i++) {
578 			/* don't set p_blk i.e. don't clear total_size */
579 			p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
580 			ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
581 					       ILT_CLI_CDUT);
582 
583 			/* don't set p_blk i.e. don't clear total_size */
584 			p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
585 			ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
586 					       ILT_CLI_CDUT);
587 		}
588 	}
589 
590 	/* QM */
591 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
592 	p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
593 
594 	/* At this stage, after the first QM configuration, the PF PQs amount
595 	 * is the highest possible. Save this value at qm_info->ilt_pf_pqs to
596 	 * detect overflows in the future.
597 	 * Even though VF PQs amount can be larger than VF count, use vf_count
598 	 * because each VF requires only the full amount of CIDs.
599 	 */
600 	ecore_cxt_qm_iids(p_hwfn, &qm_iids);
601 	total = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids,
602 				     qm_iids.vf_cids, qm_iids.tids,
603 				     p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE,
604 				     p_hwfn->qm_info.num_vf_pqs);
605 
606 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
607 		   "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
608 		   " num_vf_pqs=%d, memory_size=%d)\n",
609 		   qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
610 		   p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
611 
612 	ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
613 			       QM_PQ_ELEMENT_SIZE);
614 
615 	ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
616 	p_cli->pf_total_lines = curr_line - p_blk->start_line;
617 
618 	/* TM PF */
619 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
620 	ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
621 	total = tm_iids.pf_cids + tm_iids.pf_tids_total;
622 	if (total) {
623 		p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
624 		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
625 				       total * TM_ELEM_SIZE,
626 				       TM_ELEM_SIZE);
627 
628 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
629 				       ILT_CLI_TM);
630 		p_cli->pf_total_lines = curr_line - p_blk->start_line;
631 	}
632 
633 	/* TM VF */
634 	total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
635 	if (total) {
636 		p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);
637 		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
638 				       total * TM_ELEM_SIZE, TM_ELEM_SIZE);
639 
640 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
641 				       ILT_CLI_TM);
642 
643 		p_cli->vf_total_lines = curr_line - p_blk->start_line;
644 		for (i = 1; i < p_mngr->vf_count; i++) {
645 			ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
646 					       ILT_CLI_TM);
647 		}
648 	}
649 
650 	/* SRC */
651 	p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
652 	total = ecore_cxt_src_elements(p_mngr);
653 
654 	if (total) {
655 		total_size = total * sizeof(struct src_ent);
656 		elem_size = sizeof(struct src_ent);
657 
658 		p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
659 		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
660 				       total_size, elem_size);
661 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
662 				       ILT_CLI_SRC);
663 		p_cli->pf_total_lines = curr_line - p_blk->start_line;
664 	}
665 
666 	/* TSDM (SRQ CONTEXT) */
667 	total = ecore_cxt_get_srq_count(p_hwfn);
668 
669 	if (total) {
670 		p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
671 		p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
672 		ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
673 				       total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
674 
675 		ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
676 				       ILT_CLI_TSDM);
677 		p_cli->pf_total_lines = curr_line - p_blk->start_line;
678 	}
679 
680 	if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
681 	    RESC_NUM(p_hwfn, ECORE_ILT)) {
682 		DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
683 		       curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
684 		return ECORE_INVAL;
685 	}
686 
687 	return ECORE_SUCCESS;
688 }
689 
ecore_cxt_src_t2_free(struct ecore_hwfn * p_hwfn)690 static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
691 {
692 	struct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
693 	u32 i;
694 
695 	if (!p_t2 || !p_t2->dma_mem)
696 		return;
697 
698 	for (i = 0; i < p_t2->num_pages; i++)
699 		if (p_t2->dma_mem[i].virt_addr)
700 			OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
701 					       p_t2->dma_mem[i].virt_addr,
702 					       p_t2->dma_mem[i].phys_addr,
703 					       p_t2->dma_mem[i].size);
704 
705 	OSAL_FREE(p_hwfn->p_dev, p_t2->dma_mem);
706 	p_t2->dma_mem = OSAL_NULL;
707 }
708 
709 static enum _ecore_status_t
ecore_cxt_t2_alloc_pages(struct ecore_hwfn * p_hwfn,struct ecore_src_t2 * p_t2,u32 total_size,u32 page_size)710 ecore_cxt_t2_alloc_pages(struct ecore_hwfn *p_hwfn,
711 			 struct ecore_src_t2 *p_t2,
712 			 u32 total_size, u32 page_size)
713 {
714 	void **p_virt;
715 	u32 size, i;
716 
717 	if (!p_t2 || !p_t2->dma_mem)
718 		return ECORE_INVAL;
719 
720 	for (i = 0; i < p_t2->num_pages; i++) {
721 		size = OSAL_MIN_T(u32, total_size, page_size);
722 		p_virt = &p_t2->dma_mem[i].virt_addr;
723 
724 		*p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
725 						  &p_t2->dma_mem[i].phys_addr,
726 						  size);
727 		if (!p_t2->dma_mem[i].virt_addr)
728 			return ECORE_NOMEM;
729 
730 		OSAL_MEM_ZERO(*p_virt, size);
731 		p_t2->dma_mem[i].size = size;
732 		total_size -= size;
733 	}
734 
735 	return ECORE_SUCCESS;
736 }
737 
ecore_cxt_src_t2_alloc(struct ecore_hwfn * p_hwfn)738 static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
739 {
740 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
741 	u32 conn_num, total_size, ent_per_page, psz, i;
742 	struct phys_mem_desc *p_t2_last_page;
743 	struct ecore_ilt_client_cfg *p_src;
744 	struct ecore_src_iids src_iids;
745 	struct ecore_src_t2 *p_t2;
746 	enum _ecore_status_t rc;
747 
748 	OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
749 
750 	/* if the SRC ILT client is inactive - there are no connection
751 	 * requiring the searcer, leave.
752 	 */
753 	p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
754 	if (!p_src->active)
755 		return ECORE_SUCCESS;
756 
757 	ecore_cxt_src_iids(p_mngr, &src_iids);
758 	conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
759 	total_size = conn_num * sizeof(struct src_ent);
760 
761 	/* use the same page size as the SRC ILT client */
762 	psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
763 	p_t2 = &p_mngr->src_t2;
764 	p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
765 
766 	/* allocate t2 */
767 	p_t2->dma_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
768 				    p_t2->num_pages *
769 				    sizeof(struct phys_mem_desc));
770 	if (!p_t2->dma_mem) {
771 		DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
772 		rc = ECORE_NOMEM;
773 		goto t2_fail;
774 	}
775 
776 	rc = ecore_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
777 	if (rc)
778 		goto t2_fail;
779 
780 	/* Set the t2 pointers */
781 
782 	/* entries per page - must be a power of two */
783 	ent_per_page = psz / sizeof(struct src_ent);
784 
785 	p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
786 
787 	p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
788 	p_t2->last_free = (u64)p_t2_last_page->phys_addr +
789 			  ((conn_num - 1) & (ent_per_page - 1)) *
790 			  sizeof(struct src_ent);
791 
792 	for (i = 0; i < p_t2->num_pages; i++) {
793 		u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
794 		struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
795 		u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
796 		u32 j;
797 
798 		for (j = 0; j < ent_num - 1; j++) {
799 			val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
800 			entries[j].next = OSAL_CPU_TO_BE64(val);
801 		}
802 
803 		if (i < p_t2->num_pages - 1)
804 			val = (u64)p_t2->dma_mem[i + 1].phys_addr;
805 		else
806 			val = 0;
807 		entries[j].next = OSAL_CPU_TO_BE64(val);
808 
809 		conn_num -= ent_num;
810 	}
811 
812 	return ECORE_SUCCESS;
813 
814 t2_fail:
815 	ecore_cxt_src_t2_free(p_hwfn);
816 	return rc;
817 }
818 
819 #define for_each_ilt_valid_client(pos, clients)		\
820 	for (pos = 0; pos < MAX_ILT_CLIENTS; pos++)		\
821 		if (!clients[pos].active) {		\
822 			continue;			\
823 		} else					\
824 
825 
826 /* Total number of ILT lines used by this PF */
ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg * ilt_clients)827 static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
828 {
829 	u32 size = 0;
830 	u32 i;
831 
832 	for_each_ilt_valid_client(i, ilt_clients)
833 		size += (ilt_clients[i].last.val -
834 			 ilt_clients[i].first.val + 1);
835 
836 	return size;
837 }
838 
ecore_ilt_shadow_free(struct ecore_hwfn * p_hwfn)839 static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
840 {
841 	struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
842 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
843 	u32 ilt_size, i;
844 
845 	if (p_mngr->ilt_shadow == OSAL_NULL)
846 		return;
847 
848 	ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
849 
850 	for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
851 		struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
852 
853 		if (p_dma->virt_addr)
854 			OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
855 					       p_dma->p_virt,
856 					       p_dma->phys_addr, p_dma->size);
857 		p_dma->virt_addr = OSAL_NULL;
858 	}
859 	OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
860 	p_mngr->ilt_shadow = OSAL_NULL;
861 }
862 
863 static enum _ecore_status_t
ecore_ilt_blk_alloc(struct ecore_hwfn * p_hwfn,struct ecore_ilt_cli_blk * p_blk,enum ilt_clients ilt_client,u32 start_line_offset)864 ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
865 		    struct ecore_ilt_cli_blk *p_blk,
866 		    enum ilt_clients ilt_client, u32 start_line_offset)
867 {
868 	struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
869 	u32 lines, line, sz_left, lines_to_skip, first_skipped_line;
870 
871 	/* Special handling for RoCE that supports dynamic allocation */
872 	if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
873 		return ECORE_SUCCESS;
874 
875 	if (!p_blk->total_size)
876 		return ECORE_SUCCESS;
877 
878 	sz_left = p_blk->total_size;
879 	lines_to_skip = p_blk->dynamic_line_cnt;
880 	lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
881 	line = p_blk->start_line + start_line_offset -
882 	       p_hwfn->p_cxt_mngr->pf_start_line;
883 	first_skipped_line = line + p_blk->dynamic_line_offset;
884 
885 	while (lines) {
886 		dma_addr_t p_phys;
887 		void *p_virt;
888 		u32 size;
889 
890 		if (lines_to_skip && (line == first_skipped_line)) {
891 			line += lines_to_skip;
892 			continue;
893 		}
894 
895 		size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
896 
897 /* @DPDK */
898 #define ILT_BLOCK_ALIGN_SIZE 0x1000
899 		p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
900 							 &p_phys, size,
901 							 ILT_BLOCK_ALIGN_SIZE);
902 		if (!p_virt)
903 			return ECORE_NOMEM;
904 		OSAL_MEM_ZERO(p_virt, size);
905 
906 		ilt_shadow[line].phys_addr = p_phys;
907 		ilt_shadow[line].virt_addr = p_virt;
908 		ilt_shadow[line].size = size;
909 
910 		DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
911 			   "ILT shadow: Line [%d] Physical 0x%lx"
912 			   " Virtual %p Size %d\n",
913 			   line, (unsigned long)p_phys, p_virt, size);
914 
915 		sz_left -= size;
916 		line++;
917 		lines--;
918 	}
919 
920 	return ECORE_SUCCESS;
921 }
922 
ecore_ilt_shadow_alloc(struct ecore_hwfn * p_hwfn)923 static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
924 {
925 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
926 	struct ecore_ilt_client_cfg *clients = p_mngr->clients;
927 	struct ecore_ilt_cli_blk *p_blk;
928 	u32 size, i, j, k;
929 	enum _ecore_status_t rc;
930 
931 	size = ecore_cxt_ilt_shadow_size(clients);
932 	p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
933 					 size * sizeof(struct phys_mem_desc));
934 
935 	if (!p_mngr->ilt_shadow) {
936 		DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
937 		rc = ECORE_NOMEM;
938 		goto ilt_shadow_fail;
939 	}
940 
941 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
942 		   "Allocated 0x%x bytes for ilt shadow\n",
943 		   (u32)(size * sizeof(struct phys_mem_desc)));
944 
945 	for_each_ilt_valid_client(i, clients) {
946 		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
947 			p_blk = &clients[i].pf_blks[j];
948 			rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
949 			if (rc != ECORE_SUCCESS)
950 				goto ilt_shadow_fail;
951 		}
952 		for (k = 0; k < p_mngr->vf_count; k++) {
953 			for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
954 				u32 lines = clients[i].vf_total_lines * k;
955 
956 				p_blk = &clients[i].vf_blks[j];
957 				rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
958 							 i, lines);
959 				if (rc != ECORE_SUCCESS)
960 					goto ilt_shadow_fail;
961 			}
962 		}
963 	}
964 
965 	return ECORE_SUCCESS;
966 
967 ilt_shadow_fail:
968 	ecore_ilt_shadow_free(p_hwfn);
969 	return rc;
970 }
971 
ecore_cid_map_free(struct ecore_hwfn * p_hwfn)972 static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
973 {
974 	u32 type, vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
975 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
976 
977 	for (type = 0; type < MAX_CONN_TYPES; type++) {
978 		OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
979 		p_mngr->acquired[type].cid_map = OSAL_NULL;
980 		p_mngr->acquired[type].max_count = 0;
981 		p_mngr->acquired[type].start_cid = 0;
982 
983 		for (vf = 0; vf < max_num_vfs; vf++) {
984 			OSAL_FREE(p_hwfn->p_dev,
985 				  p_mngr->acquired_vf[type][vf].cid_map);
986 			p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
987 			p_mngr->acquired_vf[type][vf].max_count = 0;
988 			p_mngr->acquired_vf[type][vf].start_cid = 0;
989 		}
990 	}
991 }
992 
993 static enum _ecore_status_t
__ecore_cid_map_alloc_single(struct ecore_hwfn * p_hwfn,u32 type,u32 cid_start,u32 cid_count,struct ecore_cid_acquired_map * p_map)994 __ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
995 			   u32 cid_start, u32 cid_count,
996 			   struct ecore_cid_acquired_map *p_map)
997 {
998 	u32 size;
999 
1000 	if (!cid_count)
1001 		return ECORE_SUCCESS;
1002 
1003 	size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
1004 	p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
1005 	if (p_map->cid_map == OSAL_NULL)
1006 		return ECORE_NOMEM;
1007 
1008 	p_map->max_count = cid_count;
1009 	p_map->start_cid = cid_start;
1010 
1011 	DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
1012 		   "Type %08x start: %08x count %08x\n",
1013 		   type, p_map->start_cid, p_map->max_count);
1014 
1015 	return ECORE_SUCCESS;
1016 }
1017 
1018 static enum _ecore_status_t
ecore_cid_map_alloc_single(struct ecore_hwfn * p_hwfn,u32 type,u32 start_cid,u32 vf_start_cid)1019 ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid,
1020 			   u32 vf_start_cid)
1021 {
1022 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1023 	u32 vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
1024 	struct ecore_cid_acquired_map *p_map;
1025 	struct ecore_conn_type_cfg *p_cfg;
1026 	enum _ecore_status_t rc;
1027 
1028 	p_cfg = &p_mngr->conn_cfg[type];
1029 
1030 		/* Handle PF maps */
1031 		p_map = &p_mngr->acquired[type];
1032 	rc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
1033 					  p_cfg->cid_count, p_map);
1034 	if (rc != ECORE_SUCCESS)
1035 		return rc;
1036 
1037 	/* Handle VF maps */
1038 	for (vf = 0; vf < max_num_vfs; vf++) {
1039 		p_map = &p_mngr->acquired_vf[type][vf];
1040 		rc = __ecore_cid_map_alloc_single(p_hwfn, type, vf_start_cid,
1041 						  p_cfg->cids_per_vf, p_map);
1042 		if (rc != ECORE_SUCCESS)
1043 			return rc;
1044 	}
1045 
1046 	return ECORE_SUCCESS;
1047 }
1048 
ecore_cid_map_alloc(struct ecore_hwfn * p_hwfn)1049 static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
1050 {
1051 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1052 	u32 start_cid = 0, vf_start_cid = 0;
1053 	u32 type;
1054 	enum _ecore_status_t rc;
1055 
1056 	for (type = 0; type < MAX_CONN_TYPES; type++) {
1057 		rc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
1058 						vf_start_cid);
1059 		if (rc != ECORE_SUCCESS)
1060 			goto cid_map_fail;
1061 
1062 		start_cid += p_mngr->conn_cfg[type].cid_count;
1063 		vf_start_cid += p_mngr->conn_cfg[type].cids_per_vf;
1064 	}
1065 
1066 	return ECORE_SUCCESS;
1067 
1068 cid_map_fail:
1069 	ecore_cid_map_free(p_hwfn);
1070 	return rc;
1071 }
1072 
ecore_cxt_mngr_alloc(struct ecore_hwfn * p_hwfn)1073 enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
1074 {
1075 	struct ecore_cid_acquired_map *acquired_vf;
1076 	struct ecore_ilt_client_cfg *clients;
1077 	struct ecore_cxt_mngr *p_mngr;
1078 	u32 i, max_num_vfs;
1079 
1080 	p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
1081 	if (!p_mngr) {
1082 		DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
1083 		return ECORE_NOMEM;
1084 	}
1085 
1086 	/* Initialize ILT client registers */
1087 	clients = p_mngr->clients;
1088 	clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1089 	clients[ILT_CLI_CDUC].last.reg  = ILT_CFG_REG(CDUC, LAST_ILT);
1090 	clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1091 
1092 	clients[ILT_CLI_QM].first.reg   = ILT_CFG_REG(QM, FIRST_ILT);
1093 	clients[ILT_CLI_QM].last.reg    = ILT_CFG_REG(QM, LAST_ILT);
1094 	clients[ILT_CLI_QM].p_size.reg  = ILT_CFG_REG(QM, P_SIZE);
1095 
1096 	clients[ILT_CLI_TM].first.reg   = ILT_CFG_REG(TM, FIRST_ILT);
1097 	clients[ILT_CLI_TM].last.reg    = ILT_CFG_REG(TM, LAST_ILT);
1098 	clients[ILT_CLI_TM].p_size.reg  = ILT_CFG_REG(TM, P_SIZE);
1099 
1100 	clients[ILT_CLI_SRC].first.reg  = ILT_CFG_REG(SRC, FIRST_ILT);
1101 	clients[ILT_CLI_SRC].last.reg   = ILT_CFG_REG(SRC, LAST_ILT);
1102 	clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1103 
1104 	clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1105 	clients[ILT_CLI_CDUT].last.reg  = ILT_CFG_REG(CDUT, LAST_ILT);
1106 	clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1107 
1108 	clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1109 	clients[ILT_CLI_TSDM].last.reg  = ILT_CFG_REG(TSDM, LAST_ILT);
1110 	clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1111 
1112 	/* default ILT page size for all clients is 64K */
1113 	for (i = 0; i < MAX_ILT_CLIENTS; i++)
1114 		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1115 
1116 	/* due to removal of ISCSI/FCoE files union type0_task_context
1117 	 * task_type_size will be 0. So hardcoded for now.
1118 	 */
1119 	p_mngr->task_type_size[0] = 512; /* @DPDK */
1120 	p_mngr->task_type_size[1] = 128; /* @DPDK */
1121 
1122 	if (p_hwfn->p_dev->p_iov_info)
1123 		p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
1124 
1125 	/* Initialize the dynamic ILT allocation mutex */
1126 #ifdef CONFIG_ECORE_LOCK_ALLOC
1127 	if (OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex)) {
1128 		DP_NOTICE(p_hwfn, false, "Failed to alloc p_mngr->mutex\n");
1129 		return ECORE_NOMEM;
1130 	}
1131 #endif
1132 	OSAL_MUTEX_INIT(&p_mngr->mutex);
1133 
1134 	/* Set the cxt mangr pointer prior to further allocations */
1135 	p_hwfn->p_cxt_mngr = p_mngr;
1136 
1137 	max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
1138 	for (i = 0; i < MAX_CONN_TYPES; i++) {
1139 		acquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL,
1140 					  max_num_vfs, sizeof(*acquired_vf));
1141 		if (!acquired_vf) {
1142 			DP_NOTICE(p_hwfn, false,
1143 				  "Failed to allocate an array of `struct ecore_cid_acquired_map'\n");
1144 			return ECORE_NOMEM;
1145 		}
1146 
1147 		p_mngr->acquired_vf[i] = acquired_vf;
1148 	}
1149 
1150 	return ECORE_SUCCESS;
1151 }
1152 
ecore_cxt_tables_alloc(struct ecore_hwfn * p_hwfn)1153 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
1154 {
1155 	enum _ecore_status_t rc;
1156 
1157 	/* Allocate the ILT shadow table */
1158 	rc = ecore_ilt_shadow_alloc(p_hwfn);
1159 	if (rc) {
1160 		DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
1161 		goto tables_alloc_fail;
1162 	}
1163 
1164 	/* Allocate the T2  table */
1165 	rc = ecore_cxt_src_t2_alloc(p_hwfn);
1166 	if (rc) {
1167 		DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
1168 		goto tables_alloc_fail;
1169 	}
1170 
1171 	/* Allocate and initialize the acquired cids bitmaps */
1172 	rc = ecore_cid_map_alloc(p_hwfn);
1173 	if (rc) {
1174 		DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
1175 		goto tables_alloc_fail;
1176 	}
1177 
1178 	return ECORE_SUCCESS;
1179 
1180 tables_alloc_fail:
1181 	ecore_cxt_mngr_free(p_hwfn);
1182 	return rc;
1183 }
1184 
ecore_cxt_mngr_free(struct ecore_hwfn * p_hwfn)1185 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
1186 {
1187 	u32 i;
1188 
1189 	if (!p_hwfn->p_cxt_mngr)
1190 		return;
1191 
1192 	ecore_cid_map_free(p_hwfn);
1193 	ecore_cxt_src_t2_free(p_hwfn);
1194 	ecore_ilt_shadow_free(p_hwfn);
1195 #ifdef CONFIG_ECORE_LOCK_ALLOC
1196 	OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
1197 #endif
1198 	for (i = 0; i < MAX_CONN_TYPES; i++)
1199 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr->acquired_vf[i]);
1200 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
1201 
1202 	p_hwfn->p_cxt_mngr = OSAL_NULL;
1203 }
1204 
ecore_cxt_mngr_setup(struct ecore_hwfn * p_hwfn)1205 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
1206 {
1207 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1208 	u32 len, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
1209 	struct ecore_cid_acquired_map *p_map;
1210 	struct ecore_conn_type_cfg *p_cfg;
1211 	int type;
1212 
1213 	/* Reset acquired cids */
1214 	for (type = 0; type < MAX_CONN_TYPES; type++) {
1215 		u32 vf;
1216 
1217 		p_cfg = &p_mngr->conn_cfg[type];
1218 		if (p_cfg->cid_count) {
1219 			p_map = &p_mngr->acquired[type];
1220 			len = DIV_ROUND_UP(p_map->max_count,
1221 					   BITS_PER_MAP_WORD) *
1222 			      MAP_WORD_SIZE;
1223 			OSAL_MEM_ZERO(p_map->cid_map, len);
1224 		}
1225 
1226 		if (!p_cfg->cids_per_vf)
1227 			continue;
1228 
1229 		for (vf = 0; vf < max_num_vfs; vf++) {
1230 			p_map = &p_mngr->acquired_vf[type][vf];
1231 			len = DIV_ROUND_UP(p_map->max_count,
1232 					   BITS_PER_MAP_WORD) *
1233 			      MAP_WORD_SIZE;
1234 			OSAL_MEM_ZERO(p_map->cid_map, len);
1235 		}
1236 	}
1237 }
1238 
1239 /* HW initialization helper (per Block, per phase) */
1240 
1241 /* CDU Common */
1242 #define CDUC_CXT_SIZE_SHIFT						\
1243 	CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1244 
1245 #define CDUC_CXT_SIZE_MASK						\
1246 	(CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1247 
1248 #define CDUC_BLOCK_WASTE_SHIFT						\
1249 	CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1250 
1251 #define CDUC_BLOCK_WASTE_MASK						\
1252 	(CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1253 
1254 #define CDUC_NCIB_SHIFT							\
1255 	CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1256 
1257 #define CDUC_NCIB_MASK							\
1258 	(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1259 
1260 #define CDUT_TYPE0_CXT_SIZE_SHIFT					\
1261 	CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1262 
1263 #define CDUT_TYPE0_CXT_SIZE_MASK					\
1264 	(CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >>				\
1265 	CDUT_TYPE0_CXT_SIZE_SHIFT)
1266 
1267 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT					\
1268 	CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1269 
1270 #define CDUT_TYPE0_BLOCK_WASTE_MASK					\
1271 	(CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >>			\
1272 	CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1273 
1274 #define CDUT_TYPE0_NCIB_SHIFT						\
1275 	CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1276 
1277 #define CDUT_TYPE0_NCIB_MASK						\
1278 	(CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >>		\
1279 	CDUT_TYPE0_NCIB_SHIFT)
1280 
1281 #define CDUT_TYPE1_CXT_SIZE_SHIFT					\
1282 	CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1283 
1284 #define CDUT_TYPE1_CXT_SIZE_MASK					\
1285 	(CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >>				\
1286 	CDUT_TYPE1_CXT_SIZE_SHIFT)
1287 
1288 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT					\
1289 	CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1290 
1291 #define CDUT_TYPE1_BLOCK_WASTE_MASK					\
1292 	(CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >>			\
1293 	CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1294 
1295 #define CDUT_TYPE1_NCIB_SHIFT						\
1296 	CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1297 
1298 #define CDUT_TYPE1_NCIB_MASK						\
1299 	(CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >>		\
1300 	CDUT_TYPE1_NCIB_SHIFT)
1301 
ecore_cdu_init_common(struct ecore_hwfn * p_hwfn)1302 static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
1303 {
1304 	u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1305 
1306 	/* CDUC - connection configuration */
1307 	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1308 	cxt_size = CONN_CXT_SIZE(p_hwfn);
1309 	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1310 	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1311 
1312 	SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1313 	SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1314 	SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1315 	STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1316 
1317 	/* CDUT - type-0 tasks configuration */
1318 	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1319 	cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1320 	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1321 	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1322 
1323 	/* cxt size and block-waste are multipes of 8 */
1324 	cdu_params = 0;
1325 	SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1326 	SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1327 	SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1328 	STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1329 
1330 	/* CDUT - type-1 tasks configuration */
1331 	cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1332 	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1333 	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1334 
1335 	/* cxt size and block-waste are multipes of 8 */
1336 	cdu_params = 0;
1337 	SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1338 	SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1339 	SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1340 	STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1341 }
1342 
1343 /* CDU PF */
1344 #define CDU_SEG_REG_TYPE_SHIFT		CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1345 #define CDU_SEG_REG_TYPE_MASK		0x1
1346 #define CDU_SEG_REG_OFFSET_SHIFT	0
1347 #define CDU_SEG_REG_OFFSET_MASK		CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1348 
ecore_cdu_init_pf(struct ecore_hwfn * p_hwfn)1349 static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
1350 {
1351 	struct ecore_ilt_client_cfg *p_cli;
1352 	struct ecore_tid_seg *p_seg;
1353 	u32 cdu_seg_params, offset;
1354 	int i;
1355 
1356 	static const u32 rt_type_offset_arr[] = {
1357 		CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1358 		CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1359 		CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1360 		CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1361 	};
1362 
1363 	static const u32 rt_type_offset_fl_arr[] = {
1364 		CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1365 		CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1366 		CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1367 		CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1368 	};
1369 
1370 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1371 
1372 	/* There are initializations only for CDUT during pf Phase */
1373 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1374 		/* Segment 0 */
1375 		p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
1376 		if (!p_seg)
1377 			continue;
1378 
1379 		/* Note: start_line is already adjusted for the CDU
1380 		 * segment register granularity, so we just need to
1381 		 * divide. Adjustment is implicit as we assume ILT
1382 		 * Page size is larger than 32K!
1383 		 */
1384 		offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1385 			  (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1386 			   p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1387 
1388 		cdu_seg_params = 0;
1389 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1390 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1391 		STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1392 
1393 		offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1394 			  (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1395 			   p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1396 
1397 		cdu_seg_params = 0;
1398 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1399 		SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1400 		STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1401 	}
1402 }
1403 
ecore_qm_init_pf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool is_pf_loading)1404 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1405 		      bool is_pf_loading)
1406 {
1407 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1408 	struct ecore_qm_iids iids;
1409 
1410 	OSAL_MEM_ZERO(&iids, sizeof(iids));
1411 	ecore_cxt_qm_iids(p_hwfn, &iids);
1412 	ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1413 			    qm_info->max_phys_tcs_per_port,
1414 			    is_pf_loading,
1415 			    iids.cids, iids.vf_cids, iids.tids,
1416 			    qm_info->start_pq,
1417 			    qm_info->num_pqs - qm_info->num_vf_pqs,
1418 			    qm_info->num_vf_pqs,
1419 			    qm_info->start_vport,
1420 			    qm_info->num_vports, qm_info->pf_wfq,
1421 			    qm_info->pf_rl,
1422 			    p_hwfn->qm_info.qm_pq_params,
1423 			    p_hwfn->qm_info.qm_vport_params);
1424 }
1425 
1426 /* CM PF */
ecore_cm_init_pf(struct ecore_hwfn * p_hwfn)1427 static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
1428 {
1429 	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1430 		     ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1431 }
1432 
1433 /* DQ PF */
ecore_dq_init_pf(struct ecore_hwfn * p_hwfn)1434 static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
1435 {
1436 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1437 	u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1438 
1439 	dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1440 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1441 
1442 	dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1443 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1444 
1445 	dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1446 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1447 
1448 	dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1449 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1450 
1451 	dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1452 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1453 
1454 	dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1455 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1456 
1457 	dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1458 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1459 
1460 	dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1461 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1462 
1463 	dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1464 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1465 
1466 	dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1467 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1468 
1469 	dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1470 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1471 
1472 	dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1473 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1474 
1475 	/* Connection types 6 & 7 are not in use, yet they must be configured
1476 	 * as the highest possible connection. Not configuring them means the
1477 	 * defaults will be  used, and with a large number of cids a bug may
1478 	 * occur, if the defaults will be smaller than dq_pf_max_cid /
1479 	 * dq_vf_max_cid.
1480 	 */
1481 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1482 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1483 
1484 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1485 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1486 }
1487 
ecore_ilt_bounds_init(struct ecore_hwfn * p_hwfn)1488 static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
1489 {
1490 	struct ecore_ilt_client_cfg *ilt_clients;
1491 	int i;
1492 
1493 	ilt_clients = p_hwfn->p_cxt_mngr->clients;
1494 	for_each_ilt_valid_client(i, ilt_clients) {
1495 		STORE_RT_REG(p_hwfn,
1496 			     ilt_clients[i].first.reg,
1497 			     ilt_clients[i].first.val);
1498 		STORE_RT_REG(p_hwfn,
1499 			     ilt_clients[i].last.reg, ilt_clients[i].last.val);
1500 		STORE_RT_REG(p_hwfn,
1501 			     ilt_clients[i].p_size.reg,
1502 			     ilt_clients[i].p_size.val);
1503 	}
1504 }
1505 
ecore_ilt_vf_bounds_init(struct ecore_hwfn * p_hwfn)1506 static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
1507 {
1508 	struct ecore_ilt_client_cfg *p_cli;
1509 	u32 blk_factor;
1510 
1511 	/* For simplicty  we set the 'block' to be an ILT page */
1512 	if (p_hwfn->p_dev->p_iov_info) {
1513 		struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
1514 
1515 		STORE_RT_REG(p_hwfn,
1516 			     PSWRQ2_REG_VF_BASE_RT_OFFSET,
1517 			     p_iov->first_vf_in_pf);
1518 		STORE_RT_REG(p_hwfn,
1519 			     PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1520 			     p_iov->first_vf_in_pf + p_iov->total_vfs);
1521 	}
1522 
1523 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1524 	blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1525 	if (p_cli->active) {
1526 		STORE_RT_REG(p_hwfn,
1527 			     PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1528 			     blk_factor);
1529 		STORE_RT_REG(p_hwfn,
1530 			     PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1531 			     p_cli->pf_total_lines);
1532 		STORE_RT_REG(p_hwfn,
1533 			     PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1534 			     p_cli->vf_total_lines);
1535 	}
1536 
1537 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1538 	blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1539 	if (p_cli->active) {
1540 		STORE_RT_REG(p_hwfn,
1541 			     PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1542 			     blk_factor);
1543 		STORE_RT_REG(p_hwfn,
1544 			     PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1545 			     p_cli->pf_total_lines);
1546 		STORE_RT_REG(p_hwfn,
1547 			     PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1548 			     p_cli->vf_total_lines);
1549 	}
1550 
1551 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1552 	blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1553 	if (p_cli->active) {
1554 		STORE_RT_REG(p_hwfn,
1555 			     PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1556 		STORE_RT_REG(p_hwfn,
1557 			     PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1558 			     p_cli->pf_total_lines);
1559 		STORE_RT_REG(p_hwfn,
1560 			     PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1561 			     p_cli->vf_total_lines);
1562 	}
1563 }
1564 
1565 /* ILT (PSWRQ2) PF */
ecore_ilt_init_pf(struct ecore_hwfn * p_hwfn)1566 static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
1567 {
1568 	struct ecore_ilt_client_cfg *clients;
1569 	struct ecore_cxt_mngr *p_mngr;
1570 	struct phys_mem_desc *p_shdw;
1571 	u32 line, rt_offst, i;
1572 
1573 	ecore_ilt_bounds_init(p_hwfn);
1574 	ecore_ilt_vf_bounds_init(p_hwfn);
1575 
1576 	p_mngr = p_hwfn->p_cxt_mngr;
1577 	p_shdw = p_mngr->ilt_shadow;
1578 	clients = p_hwfn->p_cxt_mngr->clients;
1579 
1580 	for_each_ilt_valid_client(i, clients) {
1581 		/* Client's 1st val and RT array are absolute, ILT shadows'
1582 		 * lines are relative.
1583 		 */
1584 		line = clients[i].first.val - p_mngr->pf_start_line;
1585 		rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1586 		    clients[i].first.val * ILT_ENTRY_IN_REGS;
1587 
1588 		for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1589 		     line++, rt_offst += ILT_ENTRY_IN_REGS) {
1590 			u64 ilt_hw_entry = 0;
1591 
1592 			/** p_virt could be OSAL_NULL incase of dynamic
1593 			 *  allocation
1594 			 */
1595 			if (p_shdw[line].virt_addr != OSAL_NULL) {
1596 				SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1597 				SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1598 					  (p_shdw[line].phys_addr >> 12));
1599 
1600 				DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1601 					"Setting RT[0x%08x] from"
1602 					" ILT[0x%08x] [Client is %d] to"
1603 					" Physical addr: 0x%lx\n",
1604 					rt_offst, line, i,
1605 					(unsigned long)(p_shdw[line].
1606 							phys_addr >> 12));
1607 			}
1608 
1609 			STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1610 		}
1611 	}
1612 }
1613 
1614 /* SRC (Searcher) PF */
ecore_src_init_pf(struct ecore_hwfn * p_hwfn)1615 static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
1616 {
1617 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1618 	u32 rounded_conn_num, conn_num, conn_max;
1619 	struct ecore_src_iids src_iids;
1620 
1621 	OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
1622 	ecore_cxt_src_iids(p_mngr, &src_iids);
1623 	conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1624 	if (!conn_num)
1625 		return;
1626 
1627 	conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
1628 	rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
1629 
1630 	STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1631 	STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1632 		     OSAL_LOG2(rounded_conn_num));
1633 
1634 	STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1635 			 p_hwfn->p_cxt_mngr->src_t2.first_free);
1636 	STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1637 			 p_hwfn->p_cxt_mngr->src_t2.last_free);
1638 	DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1639 		   "Configured SEARCHER for 0x%08x connections\n",
1640 		   conn_num);
1641 }
1642 
1643 /* Timers PF */
1644 #define TM_CFG_NUM_IDS_SHIFT		0
1645 #define TM_CFG_NUM_IDS_MASK		0xFFFFULL
1646 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT	16
1647 #define TM_CFG_PRE_SCAN_OFFSET_MASK	0x1FFULL
1648 #define TM_CFG_PARENT_PF_SHIFT		25
1649 #define TM_CFG_PARENT_PF_MASK		0x7ULL
1650 
1651 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT	30
1652 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK	0x1FFULL
1653 
1654 #define TM_CFG_TID_OFFSET_SHIFT		30
1655 #define TM_CFG_TID_OFFSET_MASK		0x7FFFFULL
1656 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT	49
1657 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK	0x1FFULL
1658 
ecore_tm_init_pf(struct ecore_hwfn * p_hwfn)1659 static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
1660 {
1661 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1662 	u32 active_seg_mask = 0, tm_offset, rt_reg;
1663 	struct ecore_tm_iids tm_iids;
1664 	u64 cfg_word;
1665 	u8 i;
1666 
1667 	OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
1668 	ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
1669 
1670 	/* @@@TBD No pre-scan for now */
1671 
1672 		cfg_word = 0;
1673 		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1674 		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1675 	SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1676 		SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1677 
1678 	/* Note: We assume consecutive VFs for a PF */
1679 	for (i = 0; i < p_mngr->vf_count; i++) {
1680 		rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1681 		    (sizeof(cfg_word) / sizeof(u32)) *
1682 		    (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
1683 		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1684 	}
1685 
1686 	cfg_word = 0;
1687 	SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1688 	SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1689 	SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);	/* n/a for PF */
1690 	SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all   */
1691 
1692 	rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1693 	    (sizeof(cfg_word) / sizeof(u32)) *
1694 	    (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
1695 	STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1696 
1697 	/* enable scan */
1698 	STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1699 		     tm_iids.pf_cids ? 0x1 : 0x0);
1700 
1701 	/* @@@TBD how to enable the scan for the VFs */
1702 
1703 	tm_offset = tm_iids.per_vf_cids;
1704 
1705 	/* Note: We assume consecutive VFs for a PF */
1706 	for (i = 0; i < p_mngr->vf_count; i++) {
1707 		cfg_word = 0;
1708 		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1709 		SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1710 		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1711 		SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1712 		SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1713 
1714 		rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1715 		    (sizeof(cfg_word) / sizeof(u32)) *
1716 		    (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
1717 
1718 		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1719 	}
1720 
1721 	tm_offset = tm_iids.pf_cids;
1722 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1723 		cfg_word = 0;
1724 		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1725 		SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1726 		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1727 		SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1728 		SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1729 
1730 		rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1731 		    (sizeof(cfg_word) / sizeof(u32)) *
1732 		    (NUM_OF_VFS(p_hwfn->p_dev) +
1733 		     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1734 
1735 		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1736 		active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
1737 
1738 		tm_offset += tm_iids.pf_tids[i];
1739 	}
1740 
1741 	STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1742 
1743 	/* @@@TBD how to enable the scan for the VFs */
1744 }
1745 
ecore_prs_init_pf(struct ecore_hwfn * p_hwfn)1746 static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
1747 {
1748 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1749 	struct ecore_conn_type_cfg *p_fcoe;
1750 	struct ecore_tid_seg *p_tid;
1751 
1752 	p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1753 
1754 	/* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1755 	if (!p_fcoe->cid_count)
1756 		return;
1757 
1758 	p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
1759 	STORE_RT_REG_AGG(p_hwfn,
1760 			PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1761 			p_tid->count);
1762 }
1763 
ecore_cxt_hw_init_common(struct ecore_hwfn * p_hwfn)1764 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
1765 {
1766 	/* CDU configuration */
1767 	ecore_cdu_init_common(p_hwfn);
1768 }
1769 
ecore_cxt_hw_init_pf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1770 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1771 {
1772 	ecore_qm_init_pf(p_hwfn, p_ptt, true);
1773 	ecore_cm_init_pf(p_hwfn);
1774 	ecore_dq_init_pf(p_hwfn);
1775 	ecore_cdu_init_pf(p_hwfn);
1776 	ecore_ilt_init_pf(p_hwfn);
1777 	ecore_src_init_pf(p_hwfn);
1778 	ecore_tm_init_pf(p_hwfn);
1779 	ecore_prs_init_pf(p_hwfn);
1780 }
1781 
_ecore_cxt_acquire_cid(struct ecore_hwfn * p_hwfn,enum protocol_type type,u32 * p_cid,u8 vfid)1782 enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
1783 					    enum protocol_type type,
1784 					    u32 *p_cid, u8 vfid)
1785 {
1786 	u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
1787 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1788 	struct ecore_cid_acquired_map *p_map;
1789 
1790 	if (type >= MAX_CONN_TYPES) {
1791 		DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
1792 		return ECORE_INVAL;
1793 	}
1794 
1795 	if (vfid >= max_num_vfs && vfid != ECORE_CXT_PF_CID) {
1796 		DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
1797 		return ECORE_INVAL;
1798 	}
1799 
1800 	/* Determine the right map to take this CID from */
1801 	if (vfid == ECORE_CXT_PF_CID)
1802 		p_map = &p_mngr->acquired[type];
1803 	else
1804 		p_map = &p_mngr->acquired_vf[type][vfid];
1805 
1806 	if (p_map->cid_map == OSAL_NULL) {
1807 		DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
1808 		return ECORE_INVAL;
1809 	}
1810 
1811 	rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
1812 					   p_map->max_count);
1813 
1814 	if (rel_cid >= p_map->max_count) {
1815 		DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
1816 			  type);
1817 		return ECORE_NORESOURCES;
1818 	}
1819 
1820 	OSAL_SET_BIT(rel_cid, p_map->cid_map);
1821 
1822 	*p_cid = rel_cid + p_map->start_cid;
1823 
1824 	DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
1825 		   "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1826 		   *p_cid, rel_cid, vfid, type);
1827 
1828 	return ECORE_SUCCESS;
1829 }
1830 
ecore_cxt_acquire_cid(struct ecore_hwfn * p_hwfn,enum protocol_type type,u32 * p_cid)1831 enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
1832 					   enum protocol_type type,
1833 					   u32 *p_cid)
1834 {
1835 	return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
1836 }
1837 
ecore_cxt_test_cid_acquired(struct ecore_hwfn * p_hwfn,u32 cid,u8 vfid,enum protocol_type * p_type,struct ecore_cid_acquired_map ** pp_map)1838 static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
1839 					u32 cid, u8 vfid,
1840 					enum protocol_type *p_type,
1841 					struct ecore_cid_acquired_map **pp_map)
1842 {
1843 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1844 	u32 rel_cid;
1845 
1846 	/* Iterate over protocols and find matching cid range */
1847 	for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1848 		if (vfid == ECORE_CXT_PF_CID)
1849 			*pp_map = &p_mngr->acquired[*p_type];
1850 		else
1851 			*pp_map = &p_mngr->acquired_vf[*p_type][vfid];
1852 
1853 		if (!((*pp_map)->cid_map))
1854 			continue;
1855 		if (cid >= (*pp_map)->start_cid &&
1856 		    cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
1857 			break;
1858 		}
1859 	}
1860 	if (*p_type == MAX_CONN_TYPES) {
1861 		DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
1862 		goto fail;
1863 	}
1864 
1865 	rel_cid = cid - (*pp_map)->start_cid;
1866 	if (!OSAL_GET_BIT(rel_cid, (*pp_map)->cid_map)) {
1867 		DP_NOTICE(p_hwfn, true,
1868 			  "CID %d [vifd %02x] not acquired", cid, vfid);
1869 		goto fail;
1870 	}
1871 
1872 	return true;
1873 fail:
1874 	*p_type = MAX_CONN_TYPES;
1875 	*pp_map = OSAL_NULL;
1876 	return false;
1877 }
1878 
_ecore_cxt_release_cid(struct ecore_hwfn * p_hwfn,u32 cid,u8 vfid)1879 void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
1880 {
1881 	u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
1882 	struct ecore_cid_acquired_map *p_map = OSAL_NULL;
1883 	enum protocol_type type;
1884 	bool b_acquired;
1885 
1886 	if (vfid != ECORE_CXT_PF_CID && vfid > max_num_vfs) {
1887 		DP_NOTICE(p_hwfn, true,
1888 			  "Trying to return incorrect CID belonging to VF %02x\n",
1889 			  vfid);
1890 		return;
1891 	}
1892 
1893 	/* Test acquired and find matching per-protocol map */
1894 	b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
1895 						 &type, &p_map);
1896 
1897 	if (!b_acquired)
1898 		return;
1899 
1900 	rel_cid = cid - p_map->start_cid;
1901 	OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
1902 
1903 	DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
1904 		   "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
1905 		   cid, rel_cid, vfid, type);
1906 }
1907 
ecore_cxt_release_cid(struct ecore_hwfn * p_hwfn,u32 cid)1908 void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
1909 {
1910 	_ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
1911 }
1912 
ecore_cxt_get_cid_info(struct ecore_hwfn * p_hwfn,struct ecore_cxt_info * p_info)1913 enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
1914 					    struct ecore_cxt_info *p_info)
1915 {
1916 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1917 	struct ecore_cid_acquired_map *p_map = OSAL_NULL;
1918 	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1919 	enum protocol_type type;
1920 	bool b_acquired;
1921 
1922 	/* Test acquired and find matching per-protocol map */
1923 	b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
1924 						 ECORE_CXT_PF_CID,
1925 						 &type, &p_map);
1926 
1927 	if (!b_acquired)
1928 		return ECORE_INVAL;
1929 
1930 	/* set the protocl type */
1931 	p_info->type = type;
1932 
1933 	/* compute context virtual pointer */
1934 	hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1935 
1936 	conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1937 	cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1938 	line = p_info->iid / cxts_per_p;
1939 
1940 	/* Make sure context is allocated (dynamic allocation) */
1941 	if (!p_mngr->ilt_shadow[line].virt_addr)
1942 		return ECORE_INVAL;
1943 
1944 	p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr +
1945 	    p_info->iid % cxts_per_p * conn_cxt_size;
1946 
1947 	DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
1948 		"Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1949 		(p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
1950 
1951 	return ECORE_SUCCESS;
1952 }
1953 
ecore_cxt_set_pf_params(struct ecore_hwfn * p_hwfn)1954 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
1955 {
1956 	/* Set the number of required CORE connections */
1957 	u32 core_cids = 1;	/* SPQ */
1958 
1959 	ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
1960 
1961 	switch (p_hwfn->hw_info.personality) {
1962 	case ECORE_PCI_ETH:
1963 		{
1964 		u32 count = 0;
1965 
1966 		struct ecore_eth_pf_params *p_params =
1967 			    &p_hwfn->pf_params.eth_pf_params;
1968 
1969 		if (!p_params->num_vf_cons)
1970 			p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
1971 		ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
1972 					      p_params->num_cons,
1973 					      p_params->num_vf_cons);
1974 
1975 		count = p_params->num_arfs_filters;
1976 
1977 		if (!OSAL_GET_BIT(ECORE_MF_DISABLE_ARFS,
1978 				   &p_hwfn->p_dev->mf_bits))
1979 			p_hwfn->p_cxt_mngr->arfs_count = count;
1980 
1981 		break;
1982 		}
1983 	default:
1984 		return ECORE_INVAL;
1985 	}
1986 
1987 	return ECORE_SUCCESS;
1988 }
1989 
1990 /* This function is very RoCE oriented, if another protocol in the future
1991  * will want this feature we'll need to modify the function to be more generic
1992  */
1993 enum _ecore_status_t
ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn * p_hwfn,enum ecore_cxt_elem_type elem_type,u32 iid)1994 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
1995 			    enum ecore_cxt_elem_type elem_type,
1996 			    u32 iid)
1997 {
1998 	u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
1999 	struct ecore_ilt_client_cfg *p_cli;
2000 	struct ecore_ilt_cli_blk *p_blk;
2001 	struct ecore_ptt *p_ptt;
2002 	dma_addr_t p_phys;
2003 	u64 ilt_hw_entry;
2004 	void *p_virt;
2005 	enum _ecore_status_t rc = ECORE_SUCCESS;
2006 
2007 	switch (elem_type) {
2008 	case ECORE_ELEM_CXT:
2009 		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2010 		elem_size = CONN_CXT_SIZE(p_hwfn);
2011 		p_blk = &p_cli->pf_blks[CDUC_BLK];
2012 		break;
2013 	case ECORE_ELEM_SRQ:
2014 		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2015 		elem_size = SRQ_CXT_SIZE;
2016 		p_blk = &p_cli->pf_blks[SRQ_BLK];
2017 		break;
2018 	case ECORE_ELEM_TASK:
2019 		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2020 		elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2021 		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
2022 		break;
2023 	default:
2024 		DP_NOTICE(p_hwfn, false,
2025 			  "ECORE_INVALID elem type = %d", elem_type);
2026 		return ECORE_INVAL;
2027 	}
2028 
2029 	/* Calculate line in ilt */
2030 	hw_p_size = p_cli->p_size.val;
2031 	elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2032 	line = p_blk->start_line + (iid / elems_per_p);
2033 	shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2034 
2035 	/* If line is already allocated, do nothing, otherwise allocate it and
2036 	 * write it to the PSWRQ2 registers.
2037 	 * This section can be run in parallel from different contexts and thus
2038 	 * a mutex protection is needed.
2039 	 */
2040 
2041 	OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
2042 
2043 	if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
2044 		goto out0;
2045 
2046 	p_ptt = ecore_ptt_acquire(p_hwfn);
2047 	if (!p_ptt) {
2048 		DP_NOTICE(p_hwfn, false,
2049 			  "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
2050 		rc = ECORE_TIMEOUT;
2051 		goto out0;
2052 	}
2053 
2054 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
2055 					 &p_phys,
2056 					 p_blk->real_size_in_page);
2057 	if (!p_virt) {
2058 		rc = ECORE_NOMEM;
2059 		goto out1;
2060 	}
2061 	OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
2062 
2063 	p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
2064 	p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
2065 	p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2066 		p_blk->real_size_in_page;
2067 
2068 	/* compute absolute offset */
2069 	reg_offset = PSWRQ2_REG_ILT_MEMORY +
2070 		     (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2071 
2072 	ilt_hw_entry = 0;
2073 	SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2074 	SET_FIELD(ilt_hw_entry,
2075 		  ILT_ENTRY_PHY_ADDR,
2076 		 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12));
2077 
2078 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2079 
2080 	ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
2081 			    reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
2082 			    OSAL_NULL /* default parameters */);
2083 
2084 out1:
2085 	ecore_ptt_release(p_hwfn, p_ptt);
2086 out0:
2087 	OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex);
2088 
2089 	return rc;
2090 }
2091 
2092 /* This function is very RoCE oriented, if another protocol in the future
2093  * will want this feature we'll need to modify the function to be more generic
2094  */
2095 static enum _ecore_status_t
ecore_cxt_free_ilt_range(struct ecore_hwfn * p_hwfn,enum ecore_cxt_elem_type elem_type,u32 start_iid,u32 count)2096 ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
2097 			 enum ecore_cxt_elem_type elem_type,
2098 			 u32 start_iid, u32 count)
2099 {
2100 	u32 start_line, end_line, shadow_start_line, shadow_end_line;
2101 	u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2102 	struct ecore_ilt_client_cfg *p_cli;
2103 	struct ecore_ilt_cli_blk *p_blk;
2104 	u32 end_iid = start_iid + count;
2105 	struct ecore_ptt *p_ptt;
2106 	u64 ilt_hw_entry = 0;
2107 	u32 i;
2108 
2109 	switch (elem_type) {
2110 	case ECORE_ELEM_CXT:
2111 		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2112 		elem_size = CONN_CXT_SIZE(p_hwfn);
2113 		p_blk = &p_cli->pf_blks[CDUC_BLK];
2114 		break;
2115 	case ECORE_ELEM_SRQ:
2116 		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2117 		elem_size = SRQ_CXT_SIZE;
2118 		p_blk = &p_cli->pf_blks[SRQ_BLK];
2119 		break;
2120 	case ECORE_ELEM_TASK:
2121 		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2122 		elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2123 		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
2124 		break;
2125 	default:
2126 		DP_NOTICE(p_hwfn, false,
2127 			  "ECORE_INVALID elem type = %d", elem_type);
2128 		return ECORE_INVAL;
2129 	}
2130 
2131 	/* Calculate line in ilt */
2132 	hw_p_size = p_cli->p_size.val;
2133 	elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2134 	start_line = p_blk->start_line + (start_iid / elems_per_p);
2135 	end_line = p_blk->start_line + (end_iid / elems_per_p);
2136 	if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2137 		end_line--;
2138 
2139 	shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2140 	shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2141 
2142 	p_ptt = ecore_ptt_acquire(p_hwfn);
2143 	if (!p_ptt) {
2144 		DP_NOTICE(p_hwfn, false,
2145 			  "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
2146 		return ECORE_TIMEOUT;
2147 	}
2148 
2149 	for (i = shadow_start_line; i < shadow_end_line; i++) {
2150 		if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
2151 			continue;
2152 
2153 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2154 				    p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
2155 				    p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr,
2156 				    p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
2157 
2158 		p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL;
2159 		p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
2160 		p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2161 
2162 		/* compute absolute offset */
2163 		reg_offset = PSWRQ2_REG_ILT_MEMORY +
2164 		    ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2165 		     ILT_ENTRY_IN_REGS);
2166 
2167 		/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2168 		 * wide-bus.
2169 		 */
2170 		ecore_dmae_host2grc(p_hwfn, p_ptt,
2171 				    (u64)(osal_uintptr_t)&ilt_hw_entry,
2172 				    reg_offset,
2173 				    sizeof(ilt_hw_entry) / sizeof(u32),
2174 				    OSAL_NULL /* default parameters */);
2175 	}
2176 
2177 	ecore_ptt_release(p_hwfn, p_ptt);
2178 
2179 	return ECORE_SUCCESS;
2180 }
2181 
ecore_blk_calculate_pages(struct ecore_ilt_cli_blk * p_blk)2182 static u16 ecore_blk_calculate_pages(struct ecore_ilt_cli_blk *p_blk)
2183 {
2184 	if (p_blk->real_size_in_page == 0)
2185 		return 0;
2186 
2187 	return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
2188 }
2189 
ecore_get_cdut_num_pf_init_pages(struct ecore_hwfn * p_hwfn)2190 u16 ecore_get_cdut_num_pf_init_pages(struct ecore_hwfn *p_hwfn)
2191 {
2192 	struct ecore_ilt_client_cfg *p_cli;
2193 	struct ecore_ilt_cli_blk *p_blk;
2194 	u16 i, pages = 0;
2195 
2196 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2197 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2198 		p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
2199 		pages += ecore_blk_calculate_pages(p_blk);
2200 	}
2201 
2202 	return pages;
2203 }
2204 
ecore_get_cdut_num_vf_init_pages(struct ecore_hwfn * p_hwfn)2205 u16 ecore_get_cdut_num_vf_init_pages(struct ecore_hwfn *p_hwfn)
2206 {
2207 	struct ecore_ilt_client_cfg *p_cli;
2208 	struct ecore_ilt_cli_blk *p_blk;
2209 	u16 i, pages = 0;
2210 
2211 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2212 	for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2213 		p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
2214 		pages += ecore_blk_calculate_pages(p_blk);
2215 	}
2216 
2217 	return pages;
2218 }
2219 
ecore_get_cdut_num_pf_work_pages(struct ecore_hwfn * p_hwfn)2220 u16 ecore_get_cdut_num_pf_work_pages(struct ecore_hwfn *p_hwfn)
2221 {
2222 	struct ecore_ilt_client_cfg *p_cli;
2223 	struct ecore_ilt_cli_blk *p_blk;
2224 	u16 i, pages = 0;
2225 
2226 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2227 	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2228 		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
2229 		pages += ecore_blk_calculate_pages(p_blk);
2230 	}
2231 
2232 	return pages;
2233 }
2234 
ecore_get_cdut_num_vf_work_pages(struct ecore_hwfn * p_hwfn)2235 u16 ecore_get_cdut_num_vf_work_pages(struct ecore_hwfn *p_hwfn)
2236 {
2237 	struct ecore_ilt_client_cfg *p_cli;
2238 	struct ecore_ilt_cli_blk *p_blk;
2239 	u16 pages = 0, i;
2240 
2241 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2242 	for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2243 		p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
2244 		pages += ecore_blk_calculate_pages(p_blk);
2245 	}
2246 
2247 	return pages;
2248 }
2249