xref: /dpdk/drivers/net/qede/base/ecore_cxt.h (revision 2352f348c997a34549c71c99029fb3d214aad39a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #ifndef _ECORE_CID_
8 #define _ECORE_CID_
9 
10 #include "ecore_hsi_common.h"
11 #include "ecore_proto_if.h"
12 #include "ecore_cxt_api.h"
13 
14 /* Tasks segments definitions  */
15 #define ECORE_CXT_ISCSI_TID_SEG			PROTOCOLID_ISCSI	/* 0 */
16 #define ECORE_CXT_FCOE_TID_SEG			PROTOCOLID_FCOE		/* 1 */
17 #define ECORE_CXT_ROCE_TID_SEG			PROTOCOLID_ROCE		/* 2 */
18 
19 enum ecore_cxt_elem_type {
20 	ECORE_ELEM_CXT,
21 	ECORE_ELEM_SRQ,
22 	ECORE_ELEM_TASK
23 };
24 
25 enum ilt_clients {
26 	ILT_CLI_CDUC,
27 	ILT_CLI_CDUT,
28 	ILT_CLI_QM,
29 	ILT_CLI_TM,
30 	ILT_CLI_SRC,
31 	ILT_CLI_TSDM,
32 	ILT_CLI_RGFS,
33 	ILT_CLI_TGFS,
34 	MAX_ILT_CLIENTS
35 };
36 
37 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
38 				  enum protocol_type type,
39 				  u32 *vf_cid);
40 
41 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
42 				  enum protocol_type type);
43 
44 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
45 				  enum protocol_type type);
46 u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
47 
48 /**
49  * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
50  *
51  * @param p_hwfn
52  *
53  * @return enum _ecore_status_t
54  */
55 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
56 
57 /**
58  * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
59  *
60  * @param p_hwfn
61  *
62  * @return enum _ecore_status_t
63  */
64 enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
65 
66 /**
67  * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
68  *
69  * @param p_hwfn
70  *
71  * @return enum _ecore_status_t
72  */
73 enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
74 
75 /**
76  * @brief ecore_cxt_mngr_free
77  *
78  * @param p_hwfn
79  */
80 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
81 
82 /**
83  * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
84  *        map
85  *
86  * @param p_hwfn
87  *
88  * @return enum _ecore_status_t
89  */
90 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
91 
92 /**
93  * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
94  *
95  * @param p_hwfn
96  */
97 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
98 
99 /**
100  * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
101  *        path.
102  *
103  * @param p_hwfn
104  */
105 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
106 
107 /**
108  * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
109  *
110  * @param p_hwfn
111  * @param p_ptt
112  */
113 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
114 
115 /**
116  * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
117  *
118  * @param p_hwfn
119  * @param p_ptt
120  * @param is_pf_loading
121  */
122 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
123 		      bool is_pf_loading);
124 
125  /**
126  * @brief Reconfigures QM pf on the fly
127  *
128  * @param p_hwfn
129  * @param p_ptt
130  *
131  * @return enum _ecore_status_t
132  */
133 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
134 				     struct ecore_ptt *p_ptt);
135 
136 #define ECORE_CXT_PF_CID (0xff)
137 
138 /**
139  * @brief ecore_cxt_release - Release a cid
140  *
141  * @param p_hwfn
142  * @param cid
143  */
144 void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
145 
146 /**
147  * @brief ecore_cxt_release - Release a cid belonging to a vf-queue
148  *
149  * @param p_hwfn
150  * @param cid
151  * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
152  */
153 void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
154 			    u32 cid, u8 vfid);
155 
156 /**
157  * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
158  *
159  * @param p_hwfn
160  * @param type
161  * @param p_cid
162  *
163  * @return enum _ecore_status_t
164  */
165 enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
166 					   enum protocol_type type,
167 					   u32 *p_cid);
168 
169 /**
170  * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
171  *                             for a vf-queue
172  *
173  * @param p_hwfn
174  * @param type
175  * @param p_cid
176  * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
177  *
178  * @return enum _ecore_status_t
179  */
180 enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
181 					    enum protocol_type type,
182 					    u32 *p_cid, u8 vfid);
183 
184 /**
185  * @brief ecore_cxt_get_tid_mem_info - function checks if the
186  *        page containing the iid in the ilt is already
187  *        allocated, if it is not it allocates the page.
188  *
189  * @param p_hwfn
190  * @param elem_type
191  * @param iid
192  *
193  * @return enum _ecore_status_t
194  */
195 enum _ecore_status_t
196 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
197 			    enum ecore_cxt_elem_type elem_type,
198 			    u32 iid);
199 
200 /**
201  * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
202  *        associated with the protocol passed.
203  *
204  * @param p_hwfn
205  * @param proto
206  *
207  * @return enum _ecore_status_t
208  */
209 enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
210 					      enum protocol_type proto);
211 
212 #define ECORE_CTX_WORKING_MEM 0
213 #define ECORE_CTX_FL_MEM 1
214 
215 /* Max number of connection types in HW (DQ/CDU etc.) */
216 #define MAX_CONN_TYPES		PROTOCOLID_COMMON
217 #define NUM_TASK_TYPES		2
218 #define NUM_TASK_PF_SEGMENTS	4
219 #define NUM_TASK_VF_SEGMENTS	1
220 
221 /* PF per protocol configuration object */
222 #define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
223 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
224 
225 struct ecore_tid_seg {
226 	u32 count;
227 	u8 type;
228 	bool has_fl_mem;
229 };
230 
231 struct ecore_conn_type_cfg {
232 	u32 cid_count;
233 	u32 cids_per_vf;
234 	struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
235 };
236 
237 /* ILT Client configuration,
238  * Per connection type (protocol) resources (cids, tis, vf cids etc.)
239  * 1 - for connection context (CDUC) and for each task context we need two
240  * values, for regular task context and for force load memory
241  */
242 #define ILT_CLI_PF_BLOCKS	(1 + NUM_TASK_PF_SEGMENTS * 2)
243 #define ILT_CLI_VF_BLOCKS	(1 + NUM_TASK_VF_SEGMENTS * 2)
244 #define CDUC_BLK		(0)
245 #define SRQ_BLK			(0)
246 #define CDUT_SEG_BLK(n)		(1 + (u8)(n))
247 #define CDUT_FL_SEG_BLK(n, X)	(1 + (n) + NUM_TASK_##X##_SEGMENTS)
248 
249 struct ilt_cfg_pair {
250 	u32 reg;
251 	u32 val;
252 };
253 
254 struct ecore_ilt_cli_blk {
255 	u32 total_size;		/* 0 means not active */
256 	u32 real_size_in_page;
257 	u32 start_line;
258 	u32 dynamic_line_offset;
259 	u32 dynamic_line_cnt;
260 };
261 
262 struct ecore_ilt_client_cfg {
263 	bool active;
264 
265 	/* ILT boundaries */
266 	struct ilt_cfg_pair first;
267 	struct ilt_cfg_pair last;
268 	struct ilt_cfg_pair p_size;
269 
270 	/* ILT client blocks for PF */
271 	struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
272 	u32 pf_total_lines;
273 
274 	/* ILT client blocks for VFs */
275 	struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
276 	u32 vf_total_lines;
277 };
278 
279 #define MAP_WORD_SIZE		sizeof(unsigned long)
280 #define BITS_PER_MAP_WORD	(MAP_WORD_SIZE * 8)
281 
282 struct ecore_cid_acquired_map {
283 	u32 start_cid;
284 	u32 max_count;
285 	u32 *cid_map;
286 };
287 
288 struct ecore_src_t2 {
289 	struct phys_mem_desc	*dma_mem;
290 	u32			num_pages;
291 	u64			first_free;
292 	u64			last_free;
293 };
294 
295 struct ecore_cxt_mngr {
296 	/* Per protocol configuration */
297 	struct ecore_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
298 
299 	/* computed ILT structure */
300 	struct ecore_ilt_client_cfg	clients[MAX_ILT_CLIENTS];
301 
302 	/* Task type sizes */
303 	u32				task_type_size[NUM_TASK_TYPES];
304 
305 	/* total number of VFs for this hwfn -
306 	 * ALL VFs are symmetric in terms of HW resources
307 	 */
308 	u32				vf_count;
309 	u32				first_vf_in_pf;
310 
311 	/* Acquired CIDs */
312 	struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
313 	struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];
314 
315 	/* ILT  shadow table */
316 	struct phys_mem_desc		*ilt_shadow;
317 	u32				ilt_shadow_size;
318 	u32				pf_start_line;
319 
320 	/* Mutex for a dynamic ILT allocation */
321 	osal_mutex_t mutex;
322 
323 	/* SRC T2 */
324 	struct ecore_src_t2		src_t2;
325 
326 	/* The infrastructure originally was very generic and context/task
327 	 * oriented - per connection-type we would set how many of those
328 	 * are needed, and later when determining how much memory we're
329 	 * needing for a given block we'd iterate over all the relevant
330 	 * connection-types.
331 	 * But since then we've had some additional resources, some of which
332 	 * require memory which is independent of the general context/task
333 	 * scheme. We add those here explicitly per-feature.
334 	 */
335 
336 	/* total number of SRQ's for this hwfn */
337 	u32				srq_count;
338 
339 	/* Maximal number of L2 steering filters */
340 	u32				arfs_count;
341 
342 	/* TODO - VF arfs filters ? */
343 
344 	u8				task_type_id;
345 	u16				task_ctx_size;
346 	u16				conn_ctx_size;
347 };
348 
349 u16 ecore_get_cdut_num_pf_init_pages(struct ecore_hwfn *p_hwfn);
350 u16 ecore_get_cdut_num_vf_init_pages(struct ecore_hwfn *p_hwfn);
351 u16 ecore_get_cdut_num_pf_work_pages(struct ecore_hwfn *p_hwfn);
352 u16 ecore_get_cdut_num_vf_work_pages(struct ecore_hwfn *p_hwfn);
353 #endif /* _ECORE_CID_ */
354