xref: /dpdk/drivers/bus/dpaa/base/qbman/bman.h (revision 6d58990561fbfc04e2bd53e1778352ad1d117edf)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2010-2016 Freescale Semiconductor Inc.
4  * Copyright 2017 NXP
5  *
6  */
7 
8 #ifndef __BMAN_H
9 #define __BMAN_H
10 
11 #include "bman_priv.h"
12 
13 /* Cache-inhibited register offsets */
14 #define BM_REG_RCR_PI_CINH	0x3000
15 #define BM_REG_RCR_CI_CINH	0x3100
16 #define BM_REG_RCR_ITR		0x3200
17 #define BM_REG_CFG		0x3300
18 #define BM_REG_SCN(n)		(0x3400 + ((n) << 6))
19 #define BM_REG_ISR		0x3e00
20 #define BM_REG_IIR              0x3ec0
21 
22 /* Cache-enabled register offsets */
23 #define BM_CL_CR		0x0000
24 #define BM_CL_RR0		0x0100
25 #define BM_CL_RR1		0x0140
26 #define BM_CL_RCR		0x1000
27 #define BM_CL_RCR_PI_CENA	0x3000
28 #define BM_CL_RCR_CI_CENA	0x3100
29 
30 /* BTW, the drivers (and h/w programming model) already obtain the required
31  * synchronisation for portal accesses via lwsync(), hwsync(), and
32  * data-dependencies. Use of barrier()s or other order-preserving primitives
33  * simply degrade performance. Hence the use of the __raw_*() interfaces, which
34  * simply ensure that the compiler treats the portal registers as volatile (ie.
35  * non-coherent).
36  */
37 
38 /* Cache-inhibited register access. */
39 #define __bm_in(bm, o)		be32_to_cpu(__raw_readl((bm)->ci + (o)))
40 #define __bm_out(bm, o, val)    __raw_writel(cpu_to_be32(val), \
41 					     (bm)->ci + (o))
42 #define bm_in(reg)		__bm_in(&portal->addr, BM_REG_##reg)
43 #define bm_out(reg, val)	__bm_out(&portal->addr, BM_REG_##reg, val)
44 
45 /* Cache-enabled (index) register access */
46 #define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->ce + (o))
47 #define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->ce + (o))
48 #define __bm_cl_in(bm, o)	be32_to_cpu(__raw_readl((bm)->ce + (o)))
49 #define __bm_cl_out(bm, o, val) \
50 	do { \
51 		u32 *__tmpclout = (bm)->ce + (o); \
52 		__raw_writel(cpu_to_be32(val), __tmpclout); \
53 		dcbf(__tmpclout); \
54 	} while (0)
55 #define __bm_cl_invalidate(bm, o) dccivac((bm)->ce + (o))
56 #define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
57 #define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
58 #define bm_cl_in(reg)	    __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
59 #define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
60 #define bm_cl_invalidate(reg)\
61 	__bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
62 
63 /* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
64  * analysis, look at using the "extra" bit in the ring index registers to avoid
65  * cyclic issues.
66  */
bm_cyc_diff(u8 ringsize,u8 first,u8 last)67 static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
68 {
69 	/* 'first' is included, 'last' is excluded */
70 	if (first <= last)
71 		return last - first;
72 	return ringsize + last - first;
73 }
74 
75 /* Portal modes.
76  *   Enum types;
77  *     pmode == production mode
78  *     cmode == consumption mode,
79  *   Enum values use 3 letter codes. First letter matches the portal mode,
80  *   remaining two letters indicate;
81  *     ci == cache-inhibited portal register
82  *     ce == cache-enabled portal register
83  *     vb == in-band valid-bit (cache-enabled)
84  */
85 enum bm_rcr_pmode {		/* matches BCSP_CFG::RPM */
86 	bm_rcr_pci = 0,		/* PI index, cache-inhibited */
87 	bm_rcr_pce = 1,		/* PI index, cache-enabled */
88 	bm_rcr_pvb = 2		/* valid-bit */
89 };
90 
91 enum bm_rcr_cmode {		/* s/w-only */
92 	bm_rcr_cci,		/* CI index, cache-inhibited */
93 	bm_rcr_cce		/* CI index, cache-enabled */
94 };
95 
96 /* --- Portal structures --- */
97 
98 #define BM_RCR_SIZE		8
99 
100 struct bm_rcr {
101 	struct bm_rcr_entry *ring, *cursor;
102 	u8 ci, available, ithresh, vbit;
103 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
104 	u32 busy;
105 	enum bm_rcr_pmode pmode;
106 	enum bm_rcr_cmode cmode;
107 #endif
108 };
109 
110 struct bm_mc {
111 	struct bm_mc_command *cr;
112 	struct bm_mc_result *rr;
113 	u8 rridx, vbit;
114 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
115 	enum {
116 		/* Can only be _mc_start()ed */
117 		mc_idle,
118 		/* Can only be _mc_commit()ed or _mc_abort()ed */
119 		mc_user,
120 		/* Can only be _mc_retry()ed */
121 		mc_hw
122 	} state;
123 #endif
124 };
125 
126 struct bm_addr {
127 	void __iomem *ce;	/* cache-enabled */
128 	void __iomem *ci;	/* cache-inhibited */
129 };
130 
131 struct bm_portal {
132 	struct bm_addr addr;
133 	struct bm_rcr rcr;
134 	struct bm_mc mc;
135 	struct bm_portal_config config;
136 } ____cacheline_aligned;
137 
138 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
139 #define RCR_CARRYCLEAR(p) \
140 	(void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
141 
142 /* Bit-wise logic to convert a ring pointer to a ring index */
RCR_PTR2IDX(struct bm_rcr_entry * e)143 static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
144 {
145 	return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
146 }
147 
148 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
RCR_INC(struct bm_rcr * rcr)149 static inline void RCR_INC(struct bm_rcr *rcr)
150 {
151 	/* NB: this is odd-looking, but experiments show that it generates
152 	 * fast code with essentially no branching overheads. We increment to
153 	 * the next RCR pointer and handle overflow and 'vbit'.
154 	 */
155 	struct bm_rcr_entry *partial = rcr->cursor + 1;
156 
157 	rcr->cursor = RCR_CARRYCLEAR(partial);
158 	if (partial != rcr->cursor)
159 		rcr->vbit ^= BM_RCR_VERB_VBIT;
160 }
161 
bm_rcr_init(struct bm_portal * portal,enum bm_rcr_pmode pmode,__maybe_unused enum bm_rcr_cmode cmode)162 static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
163 			      __maybe_unused enum bm_rcr_cmode cmode)
164 {
165 	/* This use of 'register', as well as all other occurrences, is because
166 	 * it has been observed to generate much faster code with gcc than is
167 	 * otherwise the case.
168 	 */
169 	register struct bm_rcr *rcr = &portal->rcr;
170 	u32 cfg;
171 	u8 pi;
172 
173 	rcr->ring = portal->addr.ce + BM_CL_RCR;
174 	rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
175 
176 	pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
177 	rcr->cursor = rcr->ring + pi;
178 	rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ?  BM_RCR_VERB_VBIT : 0;
179 	rcr->available = BM_RCR_SIZE - 1
180 		- bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
181 	rcr->ithresh = bm_in(RCR_ITR);
182 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
183 	rcr->busy = 0;
184 	rcr->pmode = pmode;
185 	rcr->cmode = cmode;
186 #endif
187 	cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
188 	bm_out(CFG, cfg);
189 	return 0;
190 }
191 
bm_rcr_finish(struct bm_portal * portal)192 static inline void bm_rcr_finish(struct bm_portal *portal)
193 {
194 	register struct bm_rcr *rcr = &portal->rcr;
195 	u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
196 	u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
197 
198 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
199 	DPAA_ASSERT(!rcr->busy);
200 #endif
201 	if (pi != RCR_PTR2IDX(rcr->cursor))
202 		pr_crit("losing uncommitted RCR entries\n");
203 	if (ci != rcr->ci)
204 		pr_crit("missing existing RCR completions\n");
205 	if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
206 		pr_crit("RCR destroyed unquiesced\n");
207 }
208 
bm_rcr_start(struct bm_portal * portal)209 static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
210 {
211 	register struct bm_rcr *rcr = &portal->rcr;
212 
213 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
214 	DPAA_ASSERT(!rcr->busy);
215 #endif
216 	if (!rcr->available)
217 		return NULL;
218 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
219 	rcr->busy = 1;
220 #endif
221 	dcbz_64(rcr->cursor);
222 	return rcr->cursor;
223 }
224 
bm_rcr_abort(struct bm_portal * portal)225 static inline void bm_rcr_abort(struct bm_portal *portal)
226 {
227 	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
228 
229 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
230 	DPAA_ASSERT(rcr->busy);
231 	rcr->busy = 0;
232 #endif
233 }
234 
bm_rcr_pend_and_next(struct bm_portal * portal,u8 myverb)235 static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
236 					struct bm_portal *portal, u8 myverb)
237 {
238 	register struct bm_rcr *rcr = &portal->rcr;
239 
240 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
241 	DPAA_ASSERT(rcr->busy);
242 	DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
243 #endif
244 	if (rcr->available == 1)
245 		return NULL;
246 	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
247 	dcbf_64(rcr->cursor);
248 	RCR_INC(rcr);
249 	rcr->available--;
250 	dcbz_64(rcr->cursor);
251 	return rcr->cursor;
252 }
253 
bm_rcr_pci_commit(struct bm_portal * portal,u8 myverb)254 static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
255 {
256 	register struct bm_rcr *rcr = &portal->rcr;
257 
258 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
259 	DPAA_ASSERT(rcr->busy);
260 	DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
261 #endif
262 	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
263 	RCR_INC(rcr);
264 	rcr->available--;
265 	hwsync();
266 	bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
267 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
268 	rcr->busy = 0;
269 #endif
270 }
271 
bm_rcr_pce_prefetch(struct bm_portal * portal)272 static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
273 {
274 	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
275 
276 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
277 	DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
278 #endif
279 	bm_cl_invalidate(RCR_PI);
280 	bm_cl_touch_rw(RCR_PI);
281 }
282 
bm_rcr_pce_commit(struct bm_portal * portal,u8 myverb)283 static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
284 {
285 	register struct bm_rcr *rcr = &portal->rcr;
286 
287 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
288 	DPAA_ASSERT(rcr->busy);
289 	DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
290 #endif
291 	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
292 	RCR_INC(rcr);
293 	rcr->available--;
294 	lwsync();
295 	bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
296 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
297 	rcr->busy = 0;
298 #endif
299 }
300 
bm_rcr_pvb_commit(struct bm_portal * portal,u8 myverb)301 static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
302 {
303 	register struct bm_rcr *rcr = &portal->rcr;
304 	struct bm_rcr_entry *rcursor;
305 
306 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
307 	DPAA_ASSERT(rcr->busy);
308 	DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
309 #endif
310 	lwsync();
311 	rcursor = rcr->cursor;
312 	rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
313 	dcbf_64(rcursor);
314 	RCR_INC(rcr);
315 	rcr->available--;
316 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
317 	rcr->busy = 0;
318 #endif
319 }
320 
bm_rcr_cci_update(struct bm_portal * portal)321 static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
322 {
323 	register struct bm_rcr *rcr = &portal->rcr;
324 	u8 diff, old_ci = rcr->ci;
325 
326 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
327 	DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
328 #endif
329 	rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
330 	diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
331 	rcr->available += diff;
332 	return diff;
333 }
334 
bm_rcr_cce_prefetch(struct bm_portal * portal)335 static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
336 {
337 	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
338 
339 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
340 	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
341 #endif
342 	bm_cl_touch_ro(RCR_CI);
343 }
344 
bm_rcr_cce_update(struct bm_portal * portal)345 static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
346 {
347 	register struct bm_rcr *rcr = &portal->rcr;
348 	u8 diff, old_ci = rcr->ci;
349 
350 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
351 	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
352 #endif
353 	rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
354 	bm_cl_invalidate(RCR_CI);
355 	diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
356 	rcr->available += diff;
357 	return diff;
358 }
359 
bm_rcr_get_ithresh(struct bm_portal * portal)360 static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
361 {
362 	register struct bm_rcr *rcr = &portal->rcr;
363 
364 	return rcr->ithresh;
365 }
366 
bm_rcr_set_ithresh(struct bm_portal * portal,u8 ithresh)367 static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
368 {
369 	register struct bm_rcr *rcr = &portal->rcr;
370 
371 	rcr->ithresh = ithresh;
372 	bm_out(RCR_ITR, ithresh);
373 }
374 
bm_rcr_get_avail(struct bm_portal * portal)375 static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
376 {
377 	register struct bm_rcr *rcr = &portal->rcr;
378 
379 	return rcr->available;
380 }
381 
bm_rcr_get_fill(struct bm_portal * portal)382 static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
383 {
384 	register struct bm_rcr *rcr = &portal->rcr;
385 
386 	return BM_RCR_SIZE - 1 - rcr->available;
387 }
388 
389 /* --- Management command API --- */
390 
bm_mc_init(struct bm_portal * portal)391 static inline int bm_mc_init(struct bm_portal *portal)
392 {
393 	register struct bm_mc *mc = &portal->mc;
394 
395 	mc->cr = portal->addr.ce + BM_CL_CR;
396 	mc->rr = portal->addr.ce + BM_CL_RR0;
397 	mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
398 			BM_MCC_VERB_VBIT) ?  0 : 1;
399 	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
400 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
401 	mc->state = mc_idle;
402 #endif
403 	return 0;
404 }
405 
bm_mc_finish(struct bm_portal * portal)406 static inline void bm_mc_finish(struct bm_portal *portal)
407 {
408 	__maybe_unused register struct bm_mc *mc = &portal->mc;
409 
410 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
411 	DPAA_ASSERT(mc->state == mc_idle);
412 	if (mc->state != mc_idle)
413 		pr_crit("Losing incomplete MC command\n");
414 #endif
415 }
416 
bm_mc_start(struct bm_portal * portal)417 static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
418 {
419 	register struct bm_mc *mc = &portal->mc;
420 
421 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
422 	DPAA_ASSERT(mc->state == mc_idle);
423 	mc->state = mc_user;
424 #endif
425 	dcbz_64(mc->cr);
426 	return mc->cr;
427 }
428 
bm_mc_abort(struct bm_portal * portal)429 static inline void bm_mc_abort(struct bm_portal *portal)
430 {
431 	__maybe_unused register struct bm_mc *mc = &portal->mc;
432 
433 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
434 	DPAA_ASSERT(mc->state == mc_user);
435 	mc->state = mc_idle;
436 #endif
437 }
438 
bm_mc_commit(struct bm_portal * portal,u8 myverb)439 static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
440 {
441 	register struct bm_mc *mc = &portal->mc;
442 	struct bm_mc_result *rr = mc->rr + mc->rridx;
443 
444 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
445 	DPAA_ASSERT(mc->state == mc_user);
446 #endif
447 	lwsync();
448 	mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
449 	dcbf(mc->cr);
450 	dcbit_ro(rr);
451 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
452 	mc->state = mc_hw;
453 #endif
454 }
455 
bm_mc_result(struct bm_portal * portal)456 static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
457 {
458 	register struct bm_mc *mc = &portal->mc;
459 	struct bm_mc_result *rr = mc->rr + mc->rridx;
460 
461 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
462 	DPAA_ASSERT(mc->state == mc_hw);
463 #endif
464 	/* The inactive response register's verb byte always returns zero until
465 	 * its command is submitted and completed. This includes the valid-bit,
466 	 * in case you were wondering.
467 	 */
468 	if (!__raw_readb(&rr->verb)) {
469 		dcbit_ro(rr);
470 		return NULL;
471 	}
472 	mc->rridx ^= 1;
473 	mc->vbit ^= BM_MCC_VERB_VBIT;
474 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
475 	mc->state = mc_idle;
476 #endif
477 	return rr;
478 }
479 
480 #define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
481 #define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
bm_isr_bscn_mask(struct bm_portal * portal,u8 bpid,int enable)482 static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
483 				    int enable)
484 {
485 	u32 val;
486 
487 	DPAA_ASSERT(bpid < bman_pool_max);
488 	/* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
489 	val = __bm_in(&portal->addr, SCN_REG(bpid));
490 	if (enable)
491 		val |= SCN_BIT(bpid);
492 	else
493 		val &= ~SCN_BIT(bpid);
494 	__bm_out(&portal->addr, SCN_REG(bpid), val);
495 }
496 
__bm_isr_read(struct bm_portal * portal,enum bm_isr_reg n)497 static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
498 {
499 #if defined(RTE_ARCH_ARM64)
500 	return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
501 #else
502 	return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
503 #endif
504 }
505 
__bm_isr_write(struct bm_portal * portal,enum bm_isr_reg n,u32 val)506 static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
507 				  u32 val)
508 {
509 #if defined(RTE_ARCH_ARM64)
510 	__bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
511 #else
512 	__bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
513 #endif
514 }
515 
516 /* Buffer Pool Cleanup */
bm_shutdown_pool(struct bm_portal * p,u32 bpid)517 static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
518 {
519 	struct bm_mc_command *bm_cmd;
520 	struct bm_mc_result *bm_res;
521 
522 	bool stop = false;
523 
524 	while (!stop) {
525 		/* Acquire buffers until empty */
526 		bm_cmd = bm_mc_start(p);
527 		bm_cmd->acquire.bpid = bpid;
528 		bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE |  1);
529 		while (!(bm_res = bm_mc_result(p)))
530 			cpu_relax();
531 		if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
532 			/* Pool is empty */
533 			stop = true;
534 		}
535 	};
536 	return 0;
537 }
538 
539 #endif /* __BMAN_H */
540