xref: /dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h (revision e3191f1078492c40cbd0da51cca9eaa10a116b94)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef __DLB2_HW_TYPES_NEW_H
6 #define __DLB2_HW_TYPES_NEW_H
7 
8 #include "../../dlb2_priv.h"
9 #include "dlb2_user.h"
10 
11 #include "dlb2_osdep_list.h"
12 #include "dlb2_osdep_types.h"
13 #include "dlb2_regs.h"
14 
15 #define DLB2_BITS_SET(x, val, mask)	(x = ((x) & ~(mask))     \
16 				 | (((val) << (mask##_LOC)) & (mask)))
17 #define DLB2_BITS_CLR(x, mask)	(x &= ~(mask))
18 #define DLB2_BIT_SET(x, mask)	((x) |= (mask))
19 #define DLB2_BITS_GET(x, mask)	(((x) & (mask)) >> (mask##_LOC))
20 
21 #define DLB2_MAX_NUM_VDEVS			16
22 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS	2
23 #define DLB2_NUM_ARB_WEIGHTS			8
24 #define DLB2_MAX_NUM_AQED_ENTRIES		2048
25 #define DLB2_MAX_WEIGHT				255
26 #define DLB2_NUM_COS_DOMAINS			4
27 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS	2
28 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES	5
29 #define DLB2_MAX_CQ_COMP_CHECK_LOOPS		409600
30 #define DLB2_MAX_QID_EMPTY_CHECK_LOOPS		(4 * DLB2_MAX_NUM_LDB_CREDITS)
31 
32 #define DLB2_FUNC_BAR				0
33 #define DLB2_CSR_BAR				2
34 
35 #define PCI_DEVICE_ID_INTEL_DLB2_PF 0x2710
36 #define PCI_DEVICE_ID_INTEL_DLB2_VF 0x2711
37 
38 #define PCI_DEVICE_ID_INTEL_DLB2_5_PF 0x2714
39 #define PCI_DEVICE_ID_INTEL_DLB2_5_VF 0x2715
40 
41 #define DLB2_ALARM_HW_SOURCE_SYS 0
42 #define DLB2_ALARM_HW_SOURCE_DLB 1
43 
44 #define DLB2_ALARM_HW_UNIT_CHP 4
45 
46 #define DLB2_ALARM_SYS_AID_ILLEGAL_QID		3
47 #define DLB2_ALARM_SYS_AID_DISABLED_QID		4
48 #define DLB2_ALARM_SYS_AID_ILLEGAL_HCW		5
49 #define DLB2_ALARM_HW_CHP_AID_ILLEGAL_ENQ	1
50 #define DLB2_ALARM_HW_CHP_AID_EXCESS_TOKEN_POPS 2
51 
52 /*
53  * Hardware-defined base addresses. Those prefixed 'DLB2_DRV' are only used by
54  * the PF driver.
55  */
56 #define DLB2_DRV_LDB_PP_BASE   0x2300000
57 #define DLB2_DRV_LDB_PP_STRIDE 0x1000
58 #define DLB2_DRV_LDB_PP_BOUND  (DLB2_DRV_LDB_PP_BASE + \
59 				DLB2_DRV_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
60 #define DLB2_DRV_DIR_PP_BASE   0x2200000
61 #define DLB2_DRV_DIR_PP_STRIDE 0x1000
62 #define DLB2_DRV_DIR_PP_BOUND  (DLB2_DRV_DIR_PP_BASE + \
63 				DLB2_DRV_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS)
64 #define DLB2_LDB_PP_BASE       0x2100000
65 #define DLB2_LDB_PP_STRIDE     0x1000
66 #define DLB2_LDB_PP_BOUND      (DLB2_LDB_PP_BASE + \
67 				DLB2_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
68 #define DLB2_LDB_PP_OFFS(id)   (DLB2_LDB_PP_BASE + (id) * DLB2_PP_SIZE)
69 #define DLB2_DIR_PP_BASE       0x2000000
70 #define DLB2_DIR_PP_STRIDE     0x1000
71 #define DLB2_DIR_PP_BOUND      (DLB2_DIR_PP_BASE + \
72 				DLB2_DIR_PP_STRIDE * \
73 				DLB2_MAX_NUM_DIR_PORTS_V2_5)
74 #define DLB2_DIR_PP_OFFS(id)   (DLB2_DIR_PP_BASE + (id) * DLB2_PP_SIZE)
75 
76 struct dlb2_resource_id {
77 	u32 phys_id;
78 	u32 virt_id;
79 	u8 vdev_owned;
80 	u8 vdev_id;
81 };
82 
83 struct dlb2_freelist {
84 	u32 base;
85 	u32 bound;
86 	u32 offset;
87 };
88 
dlb2_freelist_count(struct dlb2_freelist * list)89 static inline u32 dlb2_freelist_count(struct dlb2_freelist *list)
90 {
91 	return list->bound - list->base - list->offset;
92 }
93 
94 struct dlb2_hcw {
95 	u64 data;
96 	/* Word 3 */
97 	u16 opaque;
98 	u8 qid;
99 	u8 sched_type:2;
100 	u8 priority:3;
101 	u8 msg_type:3;
102 	/* Word 4 */
103 	u16 lock_id;
104 	u8 ts_flag:1;
105 	u8 rsvd1:2;
106 	u8 no_dec:1;
107 	u8 cmp_id:4;
108 	u8 cq_token:1;
109 	u8 qe_comp:1;
110 	u8 qe_frag:1;
111 	u8 qe_valid:1;
112 	u8 int_arm:1;
113 	u8 error:1;
114 	u8 rsvd:2;
115 };
116 
117 struct dlb2_ldb_queue {
118 	struct dlb2_list_entry domain_list;
119 	struct dlb2_list_entry func_list;
120 	struct dlb2_resource_id id;
121 	struct dlb2_resource_id domain_id;
122 	u32 num_qid_inflights;
123 	u32 aqed_limit;
124 	u32 sn_group; /* sn == sequence number */
125 	u32 sn_slot;
126 	u32 num_mappings;
127 	u8 sn_cfg_valid;
128 	u8 num_pending_additions;
129 	u8 owned;
130 	u8 configured;
131 };
132 
133 /*
134  * Directed ports and queues are paired by nature, so the driver tracks them
135  * with a single data structure.
136  */
137 struct dlb2_dir_pq_pair {
138 	struct dlb2_list_entry domain_list;
139 	struct dlb2_list_entry func_list;
140 	struct dlb2_resource_id id;
141 	struct dlb2_resource_id domain_id;
142 	u32 ref_cnt;
143 	u8 init_tkn_cnt;
144 	u8 queue_configured;
145 	u8 port_configured;
146 	u8 owned;
147 	u8 enabled;
148 };
149 
150 enum dlb2_qid_map_state {
151 	/* The slot does not contain a valid queue mapping */
152 	DLB2_QUEUE_UNMAPPED,
153 	/* The slot contains a valid queue mapping */
154 	DLB2_QUEUE_MAPPED,
155 	/* The driver is mapping a queue into this slot */
156 	DLB2_QUEUE_MAP_IN_PROG,
157 	/* The driver is unmapping a queue from this slot */
158 	DLB2_QUEUE_UNMAP_IN_PROG,
159 	/*
160 	 * The driver is unmapping a queue from this slot, and once complete
161 	 * will replace it with another mapping.
162 	 */
163 	DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP,
164 };
165 
166 struct dlb2_ldb_port_qid_map {
167 	enum dlb2_qid_map_state state;
168 	u16 qid;
169 	u16 pending_qid;
170 	u8 priority;
171 	u8 pending_priority;
172 };
173 
174 struct dlb2_ldb_port {
175 	struct dlb2_list_entry domain_list;
176 	struct dlb2_list_entry func_list;
177 	struct dlb2_resource_id id;
178 	struct dlb2_resource_id domain_id;
179 	/* The qid_map represents the hardware QID mapping state. */
180 	struct dlb2_ldb_port_qid_map qid_map[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
181 	u32 hist_list_entry_base;
182 	u32 hist_list_entry_limit;
183 	u32 ref_cnt;
184 	u8 cq_depth;
185 	u8 init_tkn_cnt;
186 	u8 num_pending_removals;
187 	u8 num_mappings;
188 	u8 owned;
189 	u8 enabled;
190 	u8 configured;
191 };
192 
193 struct dlb2_sn_group {
194 	u32 mode;
195 	u32 sequence_numbers_per_queue;
196 	u32 slot_use_bitmap;
197 	u32 id;
198 };
199 
dlb2_sn_group_full(struct dlb2_sn_group * group)200 static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group)
201 {
202 	const u32 mask[] = {
203 		0x0000ffff,  /* 64 SNs per queue */
204 		0x000000ff,  /* 128 SNs per queue */
205 		0x0000000f,  /* 256 SNs per queue */
206 		0x00000003,  /* 512 SNs per queue */
207 		0x00000001}; /* 1024 SNs per queue */
208 
209 	return group->slot_use_bitmap == mask[group->mode];
210 }
211 
dlb2_sn_group_alloc_slot(struct dlb2_sn_group * group)212 static inline int dlb2_sn_group_alloc_slot(struct dlb2_sn_group *group)
213 {
214 	const u32 bound[] = {16, 8, 4, 2, 1};
215 	u32 i;
216 
217 	for (i = 0; i < bound[group->mode]; i++) {
218 		if (!(group->slot_use_bitmap & (1 << i))) {
219 			group->slot_use_bitmap |= 1 << i;
220 			return i;
221 		}
222 	}
223 
224 	return -1;
225 }
226 
227 static inline void
dlb2_sn_group_free_slot(struct dlb2_sn_group * group,int slot)228 dlb2_sn_group_free_slot(struct dlb2_sn_group *group, int slot)
229 {
230 	group->slot_use_bitmap &= ~(1 << slot);
231 }
232 
dlb2_sn_group_used_slots(struct dlb2_sn_group * group)233 static inline int dlb2_sn_group_used_slots(struct dlb2_sn_group *group)
234 {
235 	int i, cnt = 0;
236 
237 	for (i = 0; i < 32; i++)
238 		cnt += !!(group->slot_use_bitmap & (1 << i));
239 
240 	return cnt;
241 }
242 
243 struct dlb2_hw_domain {
244 	struct dlb2_function_resources *parent_func;
245 	struct dlb2_list_entry func_list;
246 	struct dlb2_list_head used_ldb_queues;
247 	struct dlb2_list_head used_ldb_ports[DLB2_NUM_COS_DOMAINS];
248 	struct dlb2_list_head used_dir_pq_pairs;
249 	struct dlb2_list_head avail_ldb_queues;
250 	struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
251 	struct dlb2_list_head avail_dir_pq_pairs;
252 	struct dlb2_list_head rsvd_dir_pq_pairs;
253 	u32 total_hist_list_entries;
254 	u32 avail_hist_list_entries;
255 	u32 hist_list_entry_base;
256 	u32 hist_list_entry_offset;
257 	union {
258 		struct {
259 			u32 num_ldb_credits;
260 			u32 num_dir_credits;
261 		};
262 		struct {
263 			u32 num_credits;
264 		};
265 	};
266 	u32 num_avail_aqed_entries;
267 	u32 num_used_aqed_entries;
268 	struct dlb2_resource_id id;
269 	int num_pending_removals;
270 	int num_pending_additions;
271 	u8 configured;
272 	u8 started;
273 };
274 
275 struct dlb2_bitmap;
276 
277 struct dlb2_function_resources {
278 	struct dlb2_list_head avail_domains;
279 	struct dlb2_list_head used_domains;
280 	struct dlb2_list_head avail_ldb_queues;
281 	struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
282 	struct dlb2_list_head avail_dir_pq_pairs;
283 	struct dlb2_bitmap *avail_hist_list_entries;
284 	u32 num_avail_domains;
285 	u32 num_avail_ldb_queues;
286 	u32 num_avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
287 	u32 num_avail_dir_pq_pairs;
288 	union {
289 		struct {
290 			u32 num_avail_qed_entries;
291 			u32 num_avail_dqed_entries;
292 		};
293 		struct {
294 			u32 num_avail_entries;
295 		};
296 	};
297 	u32 num_avail_aqed_entries;
298 	u8 locked; /* (VDEV only) */
299 };
300 
301 /*
302  * After initialization, each resource in dlb2_hw_resources is located in one
303  * of the following lists:
304  * -- The PF's available resources list. These are unconfigured resources owned
305  *	by the PF and not allocated to a dlb2 scheduling domain.
306  * -- A VDEV's available resources list. These are VDEV-owned unconfigured
307  *	resources not allocated to a dlb2 scheduling domain.
308  * -- A domain's available resources list. These are domain-owned unconfigured
309  *	resources.
310  * -- A domain's used resources list. These are domain-owned configured
311  *	resources.
312  *
313  * A resource moves to a new list when a VDEV or domain is created or destroyed,
314  * or when the resource is configured.
315  */
316 struct dlb2_hw_resources {
317 	struct dlb2_ldb_queue ldb_queues[DLB2_MAX_NUM_LDB_QUEUES];
318 	struct dlb2_ldb_port ldb_ports[DLB2_MAX_NUM_LDB_PORTS];
319 	struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS_V2_5];
320 	struct dlb2_sn_group sn_groups[DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
321 };
322 
323 struct dlb2_mbox {
324 	u32 *mbox;
325 	u32 *isr_in_progress;
326 };
327 
328 struct dlb2_sw_mbox {
329 	struct dlb2_mbox vdev_to_pf;
330 	struct dlb2_mbox pf_to_vdev;
331 	void (*pf_to_vdev_inject)(void *arg);
332 	void *pf_to_vdev_inject_arg;
333 };
334 
335 struct dlb2_hw {
336 	uint8_t ver;
337 
338 	/* BAR 0 address */
339 	void *csr_kva;
340 	unsigned long csr_phys_addr;
341 	/* BAR 2 address */
342 	void *func_kva;
343 	unsigned long func_phys_addr;
344 
345 	/* Resource tracking */
346 	struct dlb2_hw_resources rsrcs;
347 	struct dlb2_function_resources pf;
348 	struct dlb2_function_resources vdev[DLB2_MAX_NUM_VDEVS];
349 	struct dlb2_hw_domain domains[DLB2_MAX_NUM_DOMAINS];
350 	u8 cos_reservation[DLB2_NUM_COS_DOMAINS];
351 	int prod_core_list[RTE_MAX_LCORE];
352 	u8 num_prod_cores;
353 	int dir_pp_allocations[DLB2_MAX_NUM_DIR_PORTS_V2_5];
354 	int ldb_pp_allocations[DLB2_MAX_NUM_LDB_PORTS + DLB2_NUM_COS_DOMAINS];
355 
356 	/* Virtualization */
357 	int virt_mode;
358 	struct dlb2_sw_mbox mbox[DLB2_MAX_NUM_VDEVS];
359 	unsigned int pasid[DLB2_MAX_NUM_VDEVS];
360 };
361 
362 #endif /* __DLB2_HW_TYPES_NEW_H */
363