xref: /dpdk/drivers/bus/dpaa/include/fsl_bman.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2012 Freescale Semiconductor, Inc.
4  *
5  */
6 
7 #ifndef __FSL_BMAN_H
8 #define __FSL_BMAN_H
9 
10 #include <rte_compat.h>
11 
12 #ifdef __cplusplus
13 extern "C" {
14 #endif
15 
16 /* This wrapper represents a bit-array for the depletion state of the 64 Bman
17  * buffer pools.
18  */
19 struct bman_depletion {
20 	u32 state[2];
21 };
22 
23 static inline void bman_depletion_init(struct bman_depletion *c)
24 {
25 	c->state[0] = c->state[1] = 0;
26 }
27 
28 static inline void bman_depletion_fill(struct bman_depletion *c)
29 {
30 	c->state[0] = c->state[1] = ~0;
31 }
32 
33 /* --- Bman data structures (and associated constants) --- */
34 
35 /* Represents s/w corenet portal mapped data structures */
36 struct bm_rcr_entry;	/* RCR (Release Command Ring) entries */
37 struct bm_mc_command;	/* MC (Management Command) command */
38 struct bm_mc_result;	/* MC result */
39 
40 /* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
41  * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
42  * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used.
43  */
44 struct __rte_aligned(8) bm_buffer {
45 	union {
46 		struct {
47 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
48 			u8 __reserved1;
49 			u8 bpid;
50 			u16 hi; /* High 16-bits of 48-bit address */
51 			u32 lo; /* Low 32-bits of 48-bit address */
52 #else
53 			u32 lo;
54 			u16 hi;
55 			u8 bpid;
56 			u8 __reserved;
57 #endif
58 		};
59 		struct {
60 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
61 			u64 __notaddress:16;
62 			u64 addr:48;
63 #else
64 			u64 addr:48;
65 			u64 __notaddress:16;
66 #endif
67 		};
68 		u64 opaque;
69 	};
70 };
71 static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
72 {
73 	return buf->addr;
74 }
75 
76 static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
77 {
78 	return (dma_addr_t)buf->addr;
79 }
80 
81 #define bm_buffer_set64(buf, v) \
82 	do { \
83 		struct bm_buffer *__buf931 = (buf); \
84 		__buf931->hi = upper_32_bits(v); \
85 		__buf931->lo = lower_32_bits(v); \
86 	} while (0)
87 
88 /* See 1.5.3.5.4: "Release Command" */
89 struct __rte_packed_begin bm_rcr_entry {
90 	union {
91 		struct {
92 			u8 __dont_write_directly__verb;
93 			u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
94 			u8 __reserved1[62];
95 		};
96 		struct bm_buffer bufs[8];
97 	};
98 } __rte_packed_end;
99 #define BM_RCR_VERB_VBIT		0x80
100 #define BM_RCR_VERB_CMD_MASK		0x70	/* one of two values; */
101 #define BM_RCR_VERB_CMD_BPID_SINGLE	0x20
102 #define BM_RCR_VERB_CMD_BPID_MULTI	0x30
103 #define BM_RCR_VERB_BUFCOUNT_MASK	0x0f	/* values 1..8 */
104 
105 /* See 1.5.3.1: "Acquire Command" */
106 /* See 1.5.3.2: "Query Command" */
107 struct __rte_packed_begin bm_mcc_acquire {
108 	u8 bpid;
109 	u8 __reserved1[62];
110 } __rte_packed_end;
111 struct __rte_packed_begin bm_mcc_query {
112 	u8 __reserved2[63];
113 } __rte_packed_end;
114 struct __rte_packed_begin bm_mc_command {
115 	u8 __dont_write_directly__verb;
116 	union {
117 		struct bm_mcc_acquire acquire;
118 		struct bm_mcc_query query;
119 	};
120 } __rte_packed_end;
121 #define BM_MCC_VERB_VBIT		0x80
122 #define BM_MCC_VERB_CMD_MASK		0x70	/* where the verb contains; */
123 #define BM_MCC_VERB_CMD_ACQUIRE		0x10
124 #define BM_MCC_VERB_CMD_QUERY		0x40
125 #define BM_MCC_VERB_ACQUIRE_BUFCOUNT	0x0f	/* values 1..8 go here */
126 
127 /* See 1.5.3.3: "Acquire Response" */
128 /* See 1.5.3.4: "Query Response" */
129 struct bm_pool_state {
130 	u8 __reserved1[32];
131 	/* "availability state" and "depletion state" */
132 	struct {
133 		u8 __reserved1[8];
134 		/* Access using bman_depletion_***() */
135 		struct bman_depletion state;
136 	} as, ds;
137 };
138 
139 struct __rte_packed_begin bm_mc_result {
140 	union {
141 		struct {
142 			u8 verb;
143 			u8 __reserved1[63];
144 		};
145 		union {
146 			struct {
147 				u8 __reserved1;
148 				u8 bpid;
149 				u8 __reserved2[62];
150 			};
151 			struct bm_buffer bufs[8];
152 		} acquire;
153 		struct bm_pool_state query;
154 	};
155 } __rte_packed_end;
156 #define BM_MCR_VERB_VBIT		0x80
157 #define BM_MCR_VERB_CMD_MASK		BM_MCC_VERB_CMD_MASK
158 #define BM_MCR_VERB_CMD_ACQUIRE		BM_MCC_VERB_CMD_ACQUIRE
159 #define BM_MCR_VERB_CMD_QUERY		BM_MCC_VERB_CMD_QUERY
160 #define BM_MCR_VERB_CMD_ERR_INVALID	0x60
161 #define BM_MCR_VERB_CMD_ERR_ECC		0x70
162 #define BM_MCR_VERB_ACQUIRE_BUFCOUNT	BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
163 
164 /* Portal and Buffer Pools */
165 /* Represents a managed portal */
166 struct bman_portal;
167 
168 /* This object type represents Bman buffer pools. */
169 struct bman_pool;
170 
171 /* This struct specifies parameters for a bman_pool object. */
172 struct bman_pool_params {
173 	/* index of the buffer pool to encapsulate (0-63), ignored if
174 	 * BMAN_POOL_FLAG_DYNAMIC_BPID is set.
175 	 */
176 	u32 bpid;
177 	/* bit-mask of BMAN_POOL_FLAG_*** options */
178 	u32 flags;
179 	/* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
180 	 * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
181 	 * when run in the control plane (which controls Bman CCSR). This array
182 	 * matches the definition of bm_pool_set().
183 	 */
184 	u32 thresholds[4];
185 };
186 
187 /* Flags to bman_new_pool() */
188 #define BMAN_POOL_FLAG_NO_RELEASE    0x00000001 /* can't release to pool */
189 #define BMAN_POOL_FLAG_ONLY_RELEASE  0x00000002 /* can only release to pool */
190 #define BMAN_POOL_FLAG_DYNAMIC_BPID  0x00000008 /* (de)allocate bpid */
191 #define BMAN_POOL_FLAG_THRESH        0x00000010 /* set depletion thresholds */
192 
193 /* Flags to bman_release() */
194 #define BMAN_RELEASE_FLAG_NOW        0x00000008 /* issue immediate release */
195 
196 
197 /**
198  * bman_get_portal_index - get portal configuration index
199  */
200 int bman_get_portal_index(void);
201 
202 /**
203  * bman_rcr_is_empty - Determine if portal's RCR is empty
204  *
205  * For use in situations where a cpu-affine caller needs to determine when all
206  * releases for the local portal have been processed by Bman but can't use the
207  * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
208  * The function forces tracking of RCR consumption (which normally doesn't
209  * happen until release processing needs to find space to put new release
210  * commands), and returns zero if the ring still has unprocessed entries,
211  * non-zero if it is empty.
212  */
213 int bman_rcr_is_empty(void);
214 
215 /**
216  * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
217  * @result: is set by the API to the base BPID of the allocated range
218  * @count: the number of BPIDs required
219  * @align: required alignment of the allocated range
220  * @partial: non-zero if the API can return fewer than @count BPIDs
221  *
222  * Returns the number of buffer pools allocated, or a negative error code. If
223  * @partial is non zero, the allocation request may return a smaller range of
224  * BPs than requested (though alignment will be as requested). If @partial is
225  * zero, the return value will either be 'count' or negative.
226  */
227 int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
228 static inline int bman_alloc_bpid(u32 *result)
229 {
230 	int ret = bman_alloc_bpid_range(result, 1, 0, 0);
231 
232 	return (ret > 0) ? 0 : ret;
233 }
234 
235 /**
236  * bman_release_bpid_range - Release the specified range of buffer pool IDs
237  * @bpid: the base BPID of the range to deallocate
238  * @count: the number of BPIDs in the range
239  *
240  * This function can also be used to seed the allocator with ranges of BPIDs
241  * that it can subsequently allocate from.
242  */
243 void bman_release_bpid_range(u32 bpid, unsigned int count);
244 static inline void bman_release_bpid(u32 bpid)
245 {
246 	bman_release_bpid_range(bpid, 1);
247 }
248 
249 int bman_reserve_bpid_range(u32 bpid, unsigned int count);
250 static inline int bman_reserve_bpid(u32 bpid)
251 {
252 	return bman_reserve_bpid_range(bpid, 1);
253 }
254 
255 void bman_seed_bpid_range(u32 bpid, unsigned int count);
256 
257 int bman_shutdown_pool(u32 bpid);
258 
259 /**
260  * bman_new_pool - Allocates a Buffer Pool object
261  * @params: parameters specifying the buffer pool ID and behaviour
262  *
263  * Creates a pool object for the given @params. A portal and the depletion
264  * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
265  * is set. NB, the fields from @params are copied into the new pool object, so
266  * the structure provided by the caller can be released or reused after the
267  * function returns.
268  */
269 __rte_internal
270 struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
271 
272 /**
273  * bman_free_pool - Deallocates a Buffer Pool object
274  * @pool: the pool object to release
275  */
276 __rte_internal
277 void bman_free_pool(struct bman_pool *pool);
278 
279 /**
280  * bman_get_params - Returns a pool object's parameters.
281  * @pool: the pool object
282  *
283  * The returned pointer refers to state within the pool object so must not be
284  * modified and can no longer be read once the pool object is destroyed.
285  */
286 __rte_internal
287 const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
288 
289 /**
290  * bman_release - Release buffer(s) to the buffer pool
291  * @pool: the buffer pool object to release to
292  * @bufs: an array of buffers to release
293  * @num: the number of buffers in @bufs (1-8)
294  * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
295  *
296  */
297 __rte_internal
298 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
299 		 u32 flags);
300 
301 /**
302  * bman_acquire - Acquire buffer(s) from a buffer pool
303  * @pool: the buffer pool object to acquire from
304  * @bufs: array for storing the acquired buffers
305  * @num: the number of buffers desired (@bufs is at least this big)
306  *
307  * Issues an "Acquire" command via the portal's management command interface.
308  * The return value will be the number of buffers obtained from the pool, or a
309  * negative error code if a h/w error or pool starvation was encountered.
310  */
311 __rte_internal
312 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
313 		 u32 flags);
314 
315 /**
316  * bman_query_pools - Query all buffer pool states
317  * @state: storage for the queried availability and depletion states
318  */
319 int bman_query_pools(struct bm_pool_state *state);
320 
321 /**
322  * bman_query_free_buffers - Query how many free buffers are in buffer pool
323  * @pool: the buffer pool object to query
324  *
325  * Return the number of the free buffers
326  */
327 __rte_internal
328 u32 bman_query_free_buffers(struct bman_pool *pool);
329 
330 /**
331  * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
332  * @pool: the buffer pool object to which the thresholds will be set
333  * @thresholds: the new thresholds
334  */
335 int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
336 
337 /**
338  * bm_pool_set_hw_threshold - Change the buffer pool's thresholds
339  * @pool: Pool id
340  * @low_thresh: low threshold
341  * @high_thresh: high threshold
342  */
343 int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
344 			     const u32 high_thresh);
345 
346 #ifdef __cplusplus
347 }
348 #endif
349 
350 #endif /* __FSL_BMAN_H */
351