xref: /dpdk/drivers/bus/dpaa/base/qbman/qman.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2016 Freescale Semiconductor Inc.
4  * Copyright 2017,2019-2024 NXP
5  *
6  */
7 
8 #include "qman.h"
9 #include <rte_branch_prediction.h>
10 #include <bus_dpaa_driver.h>
11 #include <rte_eventdev.h>
12 #include <rte_byteorder.h>
13 
14 #include <dpaa_bits.h>
15 
16 /* Compilation constants */
17 #define DQRR_MAXFILL	15
18 #define EQCR_ITHRESH	4	/* if EQCR congests, interrupt threshold */
19 #define IRQNAME		"QMan portal %d"
20 #define MAX_IRQNAME	16	/* big enough for "QMan portal %d" */
21 /* maximum number of DQRR entries to process in qman_poll() */
22 #define FSL_QMAN_POLL_LIMIT 8
23 
24 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
25  * inter-processor locking only. Note, FQLOCK() is always called either under a
26  * local_irq_save() or from interrupt context - hence there's no need for irq
27  * protection (and indeed, attempting to nest irq-protection doesn't work, as
28  * the "irq en/disable" machinery isn't recursive...).
29  */
30 #define FQLOCK(fq) \
31 	do { \
32 		struct qman_fq *__fq478 = (fq); \
33 		if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
34 			spin_lock(&__fq478->fqlock); \
35 	} while (0)
36 #define FQUNLOCK(fq) \
37 	do { \
38 		struct qman_fq *__fq478 = (fq); \
39 		if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
40 			spin_unlock(&__fq478->fqlock); \
41 	} while (0)
42 
43 static qman_cb_free_mbuf qman_free_mbuf_cb;
44 
45 static inline void fq_set(struct qman_fq *fq, u32 mask)
46 {
47 	dpaa_set_bits(mask, &fq->flags);
48 }
49 
50 static inline void fq_clear(struct qman_fq *fq, u32 mask)
51 {
52 	dpaa_clear_bits(mask, &fq->flags);
53 }
54 
55 static inline int fq_isset(struct qman_fq *fq, u32 mask)
56 {
57 	return fq->flags & mask;
58 }
59 
60 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
61 {
62 	return !(fq->flags & mask);
63 }
64 
65 struct qman_portal {
66 	struct qm_portal p;
67 	/* PORTAL_BITS_*** - dynamic, strictly internal */
68 	unsigned long bits;
69 	/* interrupt sources processed by portal_isr(), configurable */
70 	unsigned long irq_sources;
71 	u32 use_eqcr_ci_stashing;
72 	/* only 1 volatile dequeue at a time */
73 	struct qman_fq *vdqcr_owned;
74 	u32 sdqcr;
75 	int dqrr_disable_ref;
76 	/* A portal-specific handler for DCP ERNs. If this is NULL, the global
77 	 * handler is called instead.
78 	 */
79 	qman_cb_dc_ern cb_dc_ern;
80 	/* When the cpu-affine portal is activated, this is non-NULL */
81 	const struct qm_portal_config *config;
82 	struct dpa_rbtree retire_table;
83 	char irqname[MAX_IRQNAME];
84 	/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
85 	struct qman_cgrs *cgrs;
86 	/* linked-list of CSCN handlers. */
87 	struct list_head cgr_cbs;
88 	/* list lock */
89 	spinlock_t cgr_lock;
90 	/* track if memory was allocated by the driver */
91 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
92 	/* Keep a shadow copy of the DQRR on LE systems as the SW needs to
93 	 * do byte swaps of DQRR read only memory.  First entry must be aligned
94 	 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
95 	 * address (6 bits for address shift + 4 bits for the DQRR size).
96 	 */
97 	alignas(1024) struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE];
98 #endif
99 };
100 
101 /* Global handler for DCP ERNs. Used when the portal receiving the message does
102  * not have a portal-specific handler.
103  */
104 static qman_cb_dc_ern cb_dc_ern;
105 
106 static cpumask_t affine_mask;
107 static DEFINE_SPINLOCK(affine_mask_lock);
108 static u16 affine_channels[NR_CPUS];
109 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
110 
111 static inline struct qman_portal *get_affine_portal(void)
112 {
113 	return &RTE_PER_LCORE(qman_affine_portal);
114 }
115 
116 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
117  * retirement notifications (the fact they are sometimes h/w-consumed means that
118  * contextB isn't always a s/w demux - and as we can't know which case it is
119  * when looking at the notification, we have to use the slow lookup for all of
120  * them). NB, it's possible to have multiple FQ objects refer to the same FQID
121  * (though at most one of them should be the consumer), so this table isn't for
122  * all FQs - FQs are added when retirement commands are issued, and removed when
123  * they complete, which also massively reduces the size of this table.
124  */
125 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
126 /*
127  * This is what everything can wait on, even if it migrates to a different cpu
128  * to the one whose affine portal it is waiting on.
129  */
130 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
131 
132 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
133 {
134 	int ret = fqtree_push(&p->retire_table, fq);
135 
136 	if (ret)
137 		pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
138 	return ret;
139 }
140 
141 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
142 {
143 	fqtree_del(&p->retire_table, fq);
144 }
145 
146 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
147 {
148 	return fqtree_find(&p->retire_table, fqid);
149 }
150 
151 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
152 static void **qman_fq_lookup_table;
153 static size_t qman_fq_lookup_table_size;
154 
155 int qman_setup_fq_lookup_table(size_t num_entries)
156 {
157 	num_entries++;
158 	/* Allocate 1 more entry since the first entry is not used */
159 	qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
160 	if (!qman_fq_lookup_table) {
161 		pr_err("QMan: Could not allocate fq lookup table\n");
162 		return -ENOMEM;
163 	}
164 	memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
165 	qman_fq_lookup_table_size = num_entries;
166 	pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
167 		qman_fq_lookup_table,
168 			(unsigned long)qman_fq_lookup_table_size);
169 	return 0;
170 }
171 
172 void qman_set_fq_lookup_table(void **fq_table)
173 {
174 	qman_fq_lookup_table = fq_table;
175 }
176 
177 /* global structure that maintains fq object mapping */
178 static DEFINE_SPINLOCK(fq_hash_table_lock);
179 
180 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
181 {
182 	u32 i;
183 
184 	spin_lock(&fq_hash_table_lock);
185 	/* Can't use index zero because this has special meaning
186 	 * in context_b field.
187 	 */
188 	for (i = 1; i < qman_fq_lookup_table_size; i++) {
189 		if (qman_fq_lookup_table[i] == NULL) {
190 			*entry = i;
191 			qman_fq_lookup_table[i] = fq;
192 			spin_unlock(&fq_hash_table_lock);
193 			return 0;
194 		}
195 	}
196 	spin_unlock(&fq_hash_table_lock);
197 	return -ENOMEM;
198 }
199 
200 static void clear_fq_table_entry(u32 entry)
201 {
202 	spin_lock(&fq_hash_table_lock);
203 	DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
204 	qman_fq_lookup_table[entry] = NULL;
205 	spin_unlock(&fq_hash_table_lock);
206 }
207 
208 static inline struct qman_fq *get_fq_table_entry(u32 entry)
209 {
210 	DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
211 	return qman_fq_lookup_table[entry];
212 }
213 #endif
214 
215 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
216 {
217 	/* Byteswap the FQD to HW format */
218 	fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
219 	fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
220 	fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
221 	fqd->context_b = cpu_to_be32(fqd->context_b);
222 	fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
223 	fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
224 }
225 
226 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
227 {
228 	/* Byteswap the FQD to CPU format */
229 	fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
230 	fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
231 	fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
232 	fqd->context_b = be32_to_cpu(fqd->context_b);
233 	fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
234 }
235 
236 static inline void cpu_to_hw_fd(struct qm_fd *fd)
237 {
238 	fd->addr = cpu_to_be40(fd->addr);
239 	fd->status = cpu_to_be32(fd->status);
240 	fd->opaque = cpu_to_be32(fd->opaque);
241 }
242 
243 static inline void hw_fd_to_cpu(struct qm_fd *fd)
244 {
245 	fd->addr = be40_to_cpu(fd->addr);
246 	fd->status = be32_to_cpu(fd->status);
247 	fd->opaque = be32_to_cpu(fd->opaque);
248 }
249 
250 /* In the case that slow- and fast-path handling are both done by qman_poll()
251  * (ie. because there is no interrupt handling), we ought to balance how often
252  * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
253  * sources, so we call the fast poll 'n' times before calling the slow poll
254  * once. The idle decrementer constant is used when the last slow-poll detected
255  * no work to do, and the busy decrementer constant when the last slow-poll had
256  * work to do.
257  */
258 #define SLOW_POLL_IDLE   1000
259 #define SLOW_POLL_BUSY   10
260 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
261 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
262 					      unsigned int poll_limit);
263 
264 /* Portal interrupt handler */
265 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
266 {
267 	struct qman_portal *p = ptr;
268 	/*
269 	 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
270 	 * it could race against a Query Congestion State command also given
271 	 * as part of the handling of this interrupt source. We mustn't
272 	 * clear it a second time in this top-level function.
273 	 */
274 	u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
275 		~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
276 	u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
277 	/* DQRR-handling if it's interrupt-driven */
278 	if (is & QM_PIRQ_DQRI)
279 		__poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
280 	/* Handling of anything else that's interrupt-driven */
281 	clear |= __poll_portal_slow(p, is);
282 	qm_isr_status_clear(&p->p, clear);
283 	return IRQ_HANDLED;
284 }
285 
286 /* This inner version is used privately by qman_create_affine_portal(), as well
287  * as by the exported qman_stop_dequeues().
288  */
289 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
290 {
291 	if (!(p->dqrr_disable_ref++))
292 		qm_dqrr_set_maxfill(&p->p, 0);
293 }
294 
295 static inline void qm_mr_pvb_update(struct qm_portal *portal)
296 {
297 	register struct qm_mr *mr = &portal->mr;
298 	const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
299 
300 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
301 	DPAA_ASSERT(mr->pmode == qm_mr_pvb);
302 #endif
303 	/* when accessing 'verb', use __raw_readb() to ensure that compiler
304 	 * inlining doesn't try to optimise out "excess reads".
305 	 */
306 	if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
307 		mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
308 		if (!mr->pi)
309 			mr->vbit ^= QM_MR_VERB_VBIT;
310 		mr->fill++;
311 		res = MR_INC(res);
312 	}
313 	dcbit_ro(res);
314 }
315 
316 static int drain_mr_fqrni(struct qm_portal *p)
317 {
318 	const struct qm_mr_entry *msg;
319 loop:
320 	qm_mr_pvb_update(p);
321 	msg = qm_mr_current(p);
322 	if (!msg) {
323 		/*
324 		 * if MR was full and h/w had other FQRNI entries to produce, we
325 		 * need to allow it time to produce those entries once the
326 		 * existing entries are consumed. A worst-case situation
327 		 * (fully-loaded system) means h/w sequencers may have to do 3-4
328 		 * other things before servicing the portal's MR pump, each of
329 		 * which (if slow) may take ~50 qman cycles (which is ~200
330 		 * processor cycles). So rounding up and then multiplying this
331 		 * worst-case estimate by a factor of 10, just to be
332 		 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
333 		 * one entry at a time, so h/w has an opportunity to produce new
334 		 * entries well before the ring has been fully consumed, so
335 		 * we're being *really* paranoid here.
336 		 */
337 		u64 now, then = mfatb();
338 
339 		do {
340 			now = mfatb();
341 		} while ((then + 10000) > now);
342 		qm_mr_pvb_update(p);
343 		msg = qm_mr_current(p);
344 		if (!msg)
345 			return 0;
346 	}
347 	if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
348 		/* We aren't draining anything but FQRNIs */
349 		pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
350 		return -1;
351 	}
352 	qm_mr_next(p);
353 	qm_mr_cci_consume(p, 1);
354 	goto loop;
355 }
356 
357 static inline int qm_eqcr_init(struct qm_portal *portal,
358 			       enum qm_eqcr_pmode pmode,
359 			       unsigned int eq_stash_thresh,
360 			       int eq_stash_prio)
361 {
362 	/* This use of 'register', as well as all other occurrences, is because
363 	 * it has been observed to generate much faster code with gcc than is
364 	 * otherwise the case.
365 	 */
366 	register struct qm_eqcr *eqcr = &portal->eqcr;
367 	u32 cfg;
368 	u8 pi;
369 
370 	eqcr->ring = portal->addr.ce + QM_CL_EQCR;
371 	eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
372 	qm_cl_invalidate(EQCR_CI);
373 	pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
374 	eqcr->cursor = eqcr->ring + pi;
375 	eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
376 			QM_EQCR_VERB_VBIT : 0;
377 	eqcr->available = QM_EQCR_SIZE - 1 -
378 			qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
379 	eqcr->ithresh = qm_in(EQCR_ITR);
380 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
381 	eqcr->busy = 0;
382 	eqcr->pmode = pmode;
383 #endif
384 	cfg = (qm_in(CFG) & 0x00ffffff) |
385 		(eq_stash_thresh << 28) | /* QCSP_CFG: EST */
386 		(eq_stash_prio << 26)	| /* QCSP_CFG: EP */
387 		((pmode & 0x3) << 24);	/* QCSP_CFG::EPM */
388 	qm_out(CFG, cfg);
389 	return 0;
390 }
391 
392 static inline void qm_eqcr_finish(struct qm_portal *portal)
393 {
394 	register struct qm_eqcr *eqcr = &portal->eqcr;
395 	u8 pi, ci;
396 	u32 cfg;
397 
398 	/*
399 	 * Disable EQCI stashing because the QMan only
400 	 * presents the value it previously stashed to
401 	 * maintain coherency.  Setting the stash threshold
402 	 * to 1 then 0 ensures that QMan has resyncronized
403 	 * its internal copy so that the portal is clean
404 	 * when it is reinitialized in the future
405 	 */
406 	cfg = (qm_in(CFG) & 0x0fffffff) |
407 		(1 << 28); /* QCSP_CFG: EST */
408 	qm_out(CFG, cfg);
409 	cfg &= 0x0fffffff; /* stash threshold = 0 */
410 	qm_out(CFG, cfg);
411 
412 	pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
413 	ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
414 
415 	/* Refresh EQCR CI cache value */
416 	qm_cl_invalidate(EQCR_CI);
417 	eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
418 
419 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
420 	DPAA_ASSERT(!eqcr->busy);
421 #endif
422 	if (pi != EQCR_PTR2IDX(eqcr->cursor))
423 		pr_crit("losing uncommitted EQCR entries\n");
424 	if (ci != eqcr->ci)
425 		pr_crit("missing existing EQCR completions\n");
426 	if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
427 		pr_crit("EQCR destroyed unquiesced\n");
428 }
429 
430 static inline int qm_dqrr_init(struct qm_portal *portal,
431 			__maybe_unused const struct qm_portal_config *config,
432 			enum qm_dqrr_dmode dmode,
433 			__maybe_unused enum qm_dqrr_pmode pmode,
434 			enum qm_dqrr_cmode cmode, u8 max_fill)
435 {
436 	register struct qm_dqrr *dqrr = &portal->dqrr;
437 	u32 cfg;
438 
439 	/* Make sure the DQRR will be idle when we enable */
440 	qm_out(DQRR_SDQCR, 0);
441 	qm_out(DQRR_VDQCR, 0);
442 	qm_out(DQRR_PDQCR, 0);
443 	dqrr->ring = portal->addr.ce + QM_CL_DQRR;
444 	dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
445 	dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
446 	dqrr->cursor = dqrr->ring + dqrr->ci;
447 	dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
448 	dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
449 			QM_DQRR_VERB_VBIT : 0;
450 	dqrr->ithresh = qm_in(DQRR_ITR);
451 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
452 	dqrr->dmode = dmode;
453 	dqrr->pmode = pmode;
454 	dqrr->cmode = cmode;
455 #endif
456 	/* Invalidate every ring entry before beginning */
457 	for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
458 		dccivac(qm_cl(dqrr->ring, cfg));
459 	cfg = (qm_in(CFG) & 0xff000f00) |
460 		((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
461 		((dmode & 1) << 18) |			/* DP */
462 		((cmode & 3) << 16) |			/* DCM */
463 		0xa0 |					/* RE+SE */
464 		(0 ? 0x40 : 0) |			/* Ignore RP */
465 		(0 ? 0x10 : 0);				/* Ignore SP */
466 	qm_out(CFG, cfg);
467 	qm_dqrr_set_maxfill(portal, max_fill);
468 	return 0;
469 }
470 
471 static inline void qm_dqrr_finish(struct qm_portal *portal)
472 {
473 	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
474 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
475 	if ((dqrr->cmode != qm_dqrr_cdc) &&
476 	    (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
477 		pr_crit("Ignoring completed DQRR entries\n");
478 #endif
479 }
480 
481 static inline int qm_mr_init(struct qm_portal *portal,
482 			     __maybe_unused enum qm_mr_pmode pmode,
483 			     enum qm_mr_cmode cmode)
484 {
485 	register struct qm_mr *mr = &portal->mr;
486 	u32 cfg;
487 
488 	mr->ring = portal->addr.ce + QM_CL_MR;
489 	mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
490 	mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
491 	mr->cursor = mr->ring + mr->ci;
492 	mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
493 	mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
494 	mr->ithresh = qm_in(MR_ITR);
495 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
496 	mr->pmode = pmode;
497 	mr->cmode = cmode;
498 #endif
499 	cfg = (qm_in(CFG) & 0xfffff0ff) |
500 		((cmode & 1) << 8);		/* QCSP_CFG:MM */
501 	qm_out(CFG, cfg);
502 	return 0;
503 }
504 
505 struct qman_portal *
506 qman_init_portal(struct qman_portal *portal,
507 		   const struct qm_portal_config *c,
508 		   const struct qman_cgrs *cgrs)
509 {
510 	struct qm_portal *p;
511 	char buf[16];
512 	int ret;
513 	u32 isdr;
514 
515 	p = &portal->p;
516 
517 	if (!c)
518 		c = portal->config;
519 
520 	if (dpaa_svr_family == SVR_LS1043A_FAMILY)
521 		portal->use_eqcr_ci_stashing = 3;
522 	else
523 		portal->use_eqcr_ci_stashing =
524 					((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
525 
526 	/*
527 	 * prep the low-level portal struct with the mapped addresses from the
528 	 * config, everything that follows depends on it and "config" is more
529 	 * for (de)reference
530 	 */
531 	p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
532 	p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
533 	/*
534 	 * If CI-stashing is used, the current defaults use a threshold of 3,
535 	 * and stash with high-than-DQRR priority.
536 	 */
537 	if (qm_eqcr_init(p, qm_eqcr_pvb,
538 			 portal->use_eqcr_ci_stashing, 1)) {
539 		pr_err("Qman EQCR initialisation failed\n");
540 		goto fail_eqcr;
541 	}
542 	if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
543 			 qm_dqrr_cdc, DQRR_MAXFILL)) {
544 		pr_err("Qman DQRR initialisation failed\n");
545 		goto fail_dqrr;
546 	}
547 	if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
548 		pr_err("Qman MR initialisation failed\n");
549 		goto fail_mr;
550 	}
551 	if (qm_mc_init(p)) {
552 		pr_err("Qman MC initialisation failed\n");
553 		goto fail_mc;
554 	}
555 
556 	/* static interrupt-gating controls */
557 	qm_dqrr_set_ithresh(p, 0);
558 	qm_mr_set_ithresh(p, 0);
559 	qm_isr_set_iperiod(p, 0);
560 	portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
561 	if (!portal->cgrs)
562 		goto fail_cgrs;
563 	/* initial snapshot is no-depletion */
564 	qman_cgrs_init(&portal->cgrs[1]);
565 	if (cgrs)
566 		portal->cgrs[0] = *cgrs;
567 	else
568 		/* if the given mask is NULL, assume all CGRs can be seen */
569 		qman_cgrs_fill(&portal->cgrs[0]);
570 	INIT_LIST_HEAD(&portal->cgr_cbs);
571 	spin_lock_init(&portal->cgr_lock);
572 	portal->bits = 0;
573 	portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
574 			QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
575 			QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
576 	portal->dqrr_disable_ref = 0;
577 	portal->cb_dc_ern = NULL;
578 	sprintf(buf, "qportal-%d", c->channel);
579 	dpa_rbtree_init(&portal->retire_table);
580 	isdr = 0xffffffff;
581 	qm_isr_disable_write(p, isdr);
582 	portal->irq_sources = 0;
583 	qm_isr_enable_write(p, portal->irq_sources);
584 	qm_isr_status_clear(p, 0xffffffff);
585 	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
586 	if (request_irq(c->irq, portal_isr, 0, portal->irqname,
587 			portal)) {
588 		pr_err("request_irq() failed\n");
589 		goto fail_irq;
590 	}
591 
592 	/* Need EQCR to be empty before continuing */
593 	isdr &= ~QM_PIRQ_EQCI;
594 	qm_isr_disable_write(p, isdr);
595 	ret = qm_eqcr_get_fill(p);
596 	if (ret) {
597 		pr_err("Qman EQCR unclean\n");
598 		goto fail_eqcr_empty;
599 	}
600 	isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
601 	qm_isr_disable_write(p, isdr);
602 	if (qm_dqrr_current(p)) {
603 		pr_err("Qman DQRR unclean\n");
604 		qm_dqrr_cdc_consume_n(p, 0xffff);
605 	}
606 	if (qm_mr_current(p) && drain_mr_fqrni(p)) {
607 		/* special handling, drain just in case it's a few FQRNIs */
608 		if (drain_mr_fqrni(p))
609 			goto fail_dqrr_mr_empty;
610 	}
611 	/* Success */
612 	portal->config = c;
613 	qm_isr_disable_write(p, 0);
614 	qm_isr_uninhibit(p);
615 	/* Write a sane SDQCR */
616 	qm_dqrr_sdqcr_set(p, portal->sdqcr);
617 	return portal;
618 fail_dqrr_mr_empty:
619 fail_eqcr_empty:
620 	free_irq(c->irq, portal);
621 fail_irq:
622 	kfree(portal->cgrs);
623 	spin_lock_destroy(&portal->cgr_lock);
624 fail_cgrs:
625 	qm_mc_finish(p);
626 fail_mc:
627 	qm_mr_finish(p);
628 fail_mr:
629 	qm_dqrr_finish(p);
630 fail_dqrr:
631 	qm_eqcr_finish(p);
632 fail_eqcr:
633 	return NULL;
634 }
635 
636 #define MAX_GLOBAL_PORTALS 8
637 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
638 static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
639 
640 struct qman_portal *
641 qman_alloc_global_portal(struct qm_portal_config *q_pcfg)
642 {
643 	unsigned int i;
644 
645 	for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
646 		if (rte_atomic16_test_and_set(&global_portals_used[i])) {
647 			global_portals[i].config = q_pcfg;
648 			return &global_portals[i];
649 		}
650 	}
651 	pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
652 
653 	return NULL;
654 }
655 
656 int
657 qman_free_global_portal(struct qman_portal *portal)
658 {
659 	unsigned int i;
660 
661 	for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
662 		if (&global_portals[i] == portal) {
663 			rte_atomic16_clear(&global_portals_used[i]);
664 			return 0;
665 		}
666 	}
667 	return -1;
668 }
669 
670 void
671 qman_portal_uninhibit_isr(struct qman_portal *portal)
672 {
673 	qm_isr_uninhibit(&portal->p);
674 }
675 
676 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
677 					      const struct qman_cgrs *cgrs)
678 {
679 	struct qman_portal *res;
680 	struct qman_portal *portal = get_affine_portal();
681 
682 	/* A criteria for calling this function (from qman_driver.c) is that
683 	 * we're already affine to the cpu and won't schedule onto another cpu.
684 	 */
685 	res = qman_init_portal(portal, c, cgrs);
686 	if (res) {
687 		spin_lock(&affine_mask_lock);
688 		CPU_SET(c->cpu, &affine_mask);
689 		affine_channels[c->cpu] =
690 			c->channel;
691 		spin_unlock(&affine_mask_lock);
692 	}
693 	return res;
694 }
695 
696 static inline
697 void qman_destroy_portal(struct qman_portal *qm)
698 {
699 	const struct qm_portal_config *pcfg;
700 
701 	/* Stop dequeues on the portal */
702 	qm_dqrr_sdqcr_set(&qm->p, 0);
703 
704 	/*
705 	 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
706 	 * something related to QM_PIRQ_EQCI, this may need fixing.
707 	 * Also, due to the prefetching model used for CI updates in the enqueue
708 	 * path, this update will only invalidate the CI cacheline *after*
709 	 * working on it, so we need to call this twice to ensure a full update
710 	 * irrespective of where the enqueue processing was at when the teardown
711 	 * began.
712 	 */
713 	qm_eqcr_cce_update(&qm->p);
714 	qm_eqcr_cce_update(&qm->p);
715 	pcfg = qm->config;
716 
717 	free_irq(pcfg->irq, qm);
718 
719 	kfree(qm->cgrs);
720 	qm_mc_finish(&qm->p);
721 	qm_mr_finish(&qm->p);
722 	qm_dqrr_finish(&qm->p);
723 	qm_eqcr_finish(&qm->p);
724 
725 	qm->config = NULL;
726 
727 	spin_lock_destroy(&qm->cgr_lock);
728 }
729 
730 const struct qm_portal_config *
731 qman_destroy_affine_portal(struct qman_portal *qp)
732 {
733 	/* We don't want to redirect if we're a slave, use "raw" */
734 	struct qman_portal *qm;
735 	const struct qm_portal_config *pcfg;
736 	int cpu;
737 
738 	if (qp == NULL)
739 		qm = get_affine_portal();
740 	else
741 		qm = qp;
742 	pcfg = qm->config;
743 	cpu = pcfg->cpu;
744 
745 	qman_destroy_portal(qm);
746 
747 	spin_lock(&affine_mask_lock);
748 	CPU_CLR(cpu, &affine_mask);
749 	spin_unlock(&affine_mask_lock);
750 
751 	qman_free_global_portal(qm);
752 
753 	return pcfg;
754 }
755 
756 int qman_get_portal_index(void)
757 {
758 	struct qman_portal *p = get_affine_portal();
759 	return p->config->index;
760 }
761 
762 /* Inline helper to reduce nesting in __poll_portal_slow() */
763 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
764 				   const struct qm_mr_entry *msg, u8 verb)
765 {
766 	FQLOCK(fq);
767 	switch (verb) {
768 	case QM_MR_VERB_FQRL:
769 		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
770 		fq_clear(fq, QMAN_FQ_STATE_ORL);
771 		table_del_fq(p, fq);
772 		break;
773 	case QM_MR_VERB_FQRN:
774 		DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
775 			    (fq->state == qman_fq_state_sched));
776 		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
777 		fq_clear(fq, QMAN_FQ_STATE_CHANGING);
778 		if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
779 			fq_set(fq, QMAN_FQ_STATE_NE);
780 		if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
781 			fq_set(fq, QMAN_FQ_STATE_ORL);
782 		else
783 			table_del_fq(p, fq);
784 		fq->state = qman_fq_state_retired;
785 		break;
786 	case QM_MR_VERB_FQPN:
787 		DPAA_ASSERT(fq->state == qman_fq_state_sched);
788 		DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
789 		fq->state = qman_fq_state_parked;
790 	}
791 	FQUNLOCK(fq);
792 }
793 
794 void
795 qman_ern_register_cb(qman_cb_free_mbuf cb)
796 {
797 	qman_free_mbuf_cb = cb;
798 }
799 
800 
801 void
802 qman_ern_poll_free(void)
803 {
804 	struct qman_portal *p = get_affine_portal();
805 	u8 verb, num = 0;
806 	const struct qm_mr_entry *msg;
807 	const struct qm_fd *fd;
808 	struct qm_mr_entry swapped_msg;
809 
810 	qm_mr_pvb_update(&p->p);
811 	msg = qm_mr_current(&p->p);
812 
813 	while (msg != NULL) {
814 		swapped_msg = *msg;
815 		hw_fd_to_cpu(&swapped_msg.ern.fd);
816 		verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
817 		fd = &swapped_msg.ern.fd;
818 
819 		if (unlikely(verb & 0x20)) {
820 			pr_warn("HW ERN notification, Nothing to do\n");
821 		} else {
822 			if ((fd->bpid & 0xff) != 0xff)
823 				qman_free_mbuf_cb(fd);
824 		}
825 
826 		num++;
827 		qm_mr_next(&p->p);
828 		qm_mr_pvb_update(&p->p);
829 		msg = qm_mr_current(&p->p);
830 	}
831 
832 	qm_mr_cci_consume(&p->p, num);
833 }
834 
835 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
836 {
837 	const struct qm_mr_entry *msg;
838 	struct qm_mr_entry swapped_msg;
839 
840 	if (is & QM_PIRQ_CSCI) {
841 		struct qman_cgrs rr, c;
842 		struct qm_mc_result *mcr;
843 		struct qman_cgr *cgr;
844 
845 		spin_lock(&p->cgr_lock);
846 		/*
847 		 * The CSCI bit must be cleared _before_ issuing the
848 		 * Query Congestion State command, to ensure that a long
849 		 * CGR State Change callback cannot miss an intervening
850 		 * state change.
851 		 */
852 		qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
853 		qm_mc_start(&p->p);
854 		qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
855 		while (!(mcr = qm_mc_result(&p->p)))
856 			cpu_relax();
857 		/* mask out the ones I'm not interested in */
858 		qman_cgrs_and(&rr, (const struct qman_cgrs *)
859 			&mcr->querycongestion.state, &p->cgrs[0]);
860 		/* check previous snapshot for delta, enter/exit congestion */
861 		qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
862 		/* update snapshot */
863 		qman_cgrs_cp(&p->cgrs[1], &rr);
864 		/* Invoke callback */
865 		list_for_each_entry(cgr, &p->cgr_cbs, node)
866 			if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
867 				cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
868 		spin_unlock(&p->cgr_lock);
869 	}
870 
871 	if (is & QM_PIRQ_EQRI) {
872 		qm_eqcr_cce_update(&p->p);
873 		qm_eqcr_set_ithresh(&p->p, 0);
874 		wake_up(&affine_queue);
875 	}
876 
877 	if (is & QM_PIRQ_MRI) {
878 		struct qman_fq *fq;
879 		u8 verb, num = 0;
880 mr_loop:
881 		qm_mr_pvb_update(&p->p);
882 		msg = qm_mr_current(&p->p);
883 		if (!msg)
884 			goto mr_done;
885 		swapped_msg = *msg;
886 		hw_fd_to_cpu(&swapped_msg.ern.fd);
887 		verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
888 		/* The message is a software ERN iff the 0x20 bit is set */
889 		if (verb & 0x20) {
890 			switch (verb) {
891 			case QM_MR_VERB_FQRNI:
892 				/* nada, we drop FQRNIs on the floor */
893 				break;
894 			case QM_MR_VERB_FQRN:
895 			case QM_MR_VERB_FQRL:
896 				/* Lookup in the retirement table */
897 				fq = table_find_fq(p,
898 						   be32_to_cpu(msg->fq.fqid));
899 				DPAA_BUG_ON(fq != NULL);
900 				fq_state_change(p, fq, &swapped_msg, verb);
901 				if (fq->cb.fqs)
902 					fq->cb.fqs(p, fq, &swapped_msg);
903 				break;
904 			case QM_MR_VERB_FQPN:
905 				/* Parked */
906 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
907 				fq = get_fq_table_entry(msg->fq.contextB);
908 #else
909 				fq = (void *)(uintptr_t)msg->fq.contextB;
910 #endif
911 				DPAA_BUG_ON(fq != NULL);
912 				fq_state_change(p, fq, msg, verb);
913 				if (fq->cb.fqs)
914 					fq->cb.fqs(p, fq, &swapped_msg);
915 				break;
916 			case QM_MR_VERB_DC_ERN:
917 				/* DCP ERN */
918 				if (p->cb_dc_ern)
919 					p->cb_dc_ern(p, msg);
920 				else if (cb_dc_ern)
921 					cb_dc_ern(p, msg);
922 				else {
923 					static int warn_once;
924 
925 					if (!warn_once) {
926 						pr_crit("Leaking DCP ERNs!\n");
927 						warn_once = 1;
928 					}
929 				}
930 				break;
931 			default:
932 				pr_crit("Invalid MR verb 0x%02x\n", verb);
933 			}
934 		} else {
935 			/* Its a software ERN */
936 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
937 			fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
938 #else
939 			fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
940 #endif
941 			fq->cb.ern(p, fq, &swapped_msg);
942 		}
943 		num++;
944 		qm_mr_next(&p->p);
945 		goto mr_loop;
946 mr_done:
947 		qm_mr_cci_consume(&p->p, num);
948 	}
949 	/*
950 	 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
951 	 * processing. If that interrupt source has meanwhile been re-asserted,
952 	 * we mustn't clear it here (or in the top-level interrupt handler).
953 	 */
954 	return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
955 }
956 
957 /*
958  * remove some slowish-path stuff from the "fast path" and make sure it isn't
959  * inlined.
960  */
961 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
962 {
963 	p->vdqcr_owned = NULL;
964 	FQLOCK(fq);
965 	fq_clear(fq, QMAN_FQ_STATE_VDQCR);
966 	FQUNLOCK(fq);
967 	wake_up(&affine_queue);
968 }
969 
970 /*
971  * The only states that would conflict with other things if they ran at the
972  * same time on the same cpu are:
973  *
974  *   (i) setting/clearing vdqcr_owned, and
975  *  (ii) clearing the NE (Not Empty) flag.
976  *
977  * Both are safe. Because;
978  *
979  *   (i) this clearing can only occur after qman_set_vdq() has set the
980  *	 vdqcr_owned field (which it does before setting VDQCR), and
981  *	 qman_volatile_dequeue() blocks interrupts and preemption while this is
982  *	 done so that we can't interfere.
983  *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
984  *	 with (i) that API prevents us from interfering until it's safe.
985  *
986  * The good thing is that qman_set_vdq() and qman_retire_fq() run far
987  * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
988  * advantage comes from this function not having to "lock" anything at all.
989  *
990  * Note also that the callbacks are invoked at points which are safe against the
991  * above potential conflicts, but that this function itself is not re-entrant
992  * (this is because the function tracks one end of each FIFO in the portal and
993  * we do *not* want to lock that). So the consequence is that it is safe for
994  * user callbacks to call into any QMan API.
995  */
996 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
997 					      unsigned int poll_limit)
998 {
999 	const struct qm_dqrr_entry *dq;
1000 	struct qman_fq *fq;
1001 	enum qman_cb_dqrr_result res;
1002 	unsigned int limit = 0;
1003 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1004 	struct qm_dqrr_entry *shadow;
1005 #endif
1006 	do {
1007 		qm_dqrr_pvb_update(&p->p);
1008 		dq = qm_dqrr_current(&p->p);
1009 		if (unlikely(!dq))
1010 			break;
1011 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1012 	/* If running on an LE system the fields of the
1013 	 * dequeue entry must be swapper.  Because the
1014 	 * QMan HW will ignore writes the DQRR entry is
1015 	 * copied and the index stored within the copy
1016 	 */
1017 		shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1018 		*shadow = *dq;
1019 		dq = shadow;
1020 		shadow->fqid = be32_to_cpu(shadow->fqid);
1021 		shadow->seqnum = be16_to_cpu(shadow->seqnum);
1022 		hw_fd_to_cpu(&shadow->fd);
1023 #endif
1024 
1025 		if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1026 			/*
1027 			 * VDQCR: don't trust context_b as the FQ may have
1028 			 * been configured for h/w consumption and we're
1029 			 * draining it post-retirement.
1030 			 */
1031 			fq = p->vdqcr_owned;
1032 			/*
1033 			 * We only set QMAN_FQ_STATE_NE when retiring, so we
1034 			 * only need to check for clearing it when doing
1035 			 * volatile dequeues.  It's one less thing to check
1036 			 * in the critical path (SDQCR).
1037 			 */
1038 			if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1039 				fq_clear(fq, QMAN_FQ_STATE_NE);
1040 			/*
1041 			 * This is duplicated from the SDQCR code, but we
1042 			 * have stuff to do before *and* after this callback,
1043 			 * and we don't want multiple if()s in the critical
1044 			 * path (SDQCR).
1045 			 */
1046 			res = fq->cb.dqrr(p, fq, dq);
1047 			if (res == qman_cb_dqrr_stop)
1048 				break;
1049 			/* Check for VDQCR completion */
1050 			if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1051 				clear_vdqcr(p, fq);
1052 		} else {
1053 			/* SDQCR: context_b points to the FQ */
1054 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1055 			fq = get_fq_table_entry(dq->contextB);
1056 #else
1057 			fq = (void *)(uintptr_t)dq->contextB;
1058 #endif
1059 			/* Now let the callback do its stuff */
1060 			res = fq->cb.dqrr(p, fq, dq);
1061 			/*
1062 			 * The callback can request that we exit without
1063 			 * consuming this entry nor advancing;
1064 			 */
1065 			if (res == qman_cb_dqrr_stop)
1066 				break;
1067 		}
1068 		/* Interpret 'dq' from a driver perspective. */
1069 		/*
1070 		 * Parking isn't possible unless HELDACTIVE was set. NB,
1071 		 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1072 		 * check for HELDACTIVE to cover both.
1073 		 */
1074 		DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1075 			    (res != qman_cb_dqrr_park));
1076 		/* just means "skip it, I'll consume it myself later on" */
1077 		if (res != qman_cb_dqrr_defer)
1078 			qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1079 						 res == qman_cb_dqrr_park);
1080 		/* Move forward */
1081 		qm_dqrr_next(&p->p);
1082 		/*
1083 		 * Entry processed and consumed, increment our counter.  The
1084 		 * callback can request that we exit after consuming the
1085 		 * entry, and we also exit if we reach our processing limit,
1086 		 * so loop back only if neither of these conditions is met.
1087 		 */
1088 	} while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1089 
1090 	return limit;
1091 }
1092 
1093 int qman_irqsource_add(u32 bits)
1094 {
1095 	struct qman_portal *p = get_affine_portal();
1096 
1097 	bits = bits & QM_PIRQ_VISIBLE;
1098 
1099 	/* Clear any previously remaining interrupt conditions in
1100 	 * QCSP_ISR. This prevents raising a false interrupt when
1101 	 * interrupt conditions are enabled in QCSP_IER.
1102 	 */
1103 	qm_isr_status_clear(&p->p, bits);
1104 	dpaa_set_bits(bits, &p->irq_sources);
1105 	qm_isr_enable_write(&p->p, p->irq_sources);
1106 
1107 	return 0;
1108 }
1109 
1110 int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits)
1111 {
1112 	bits = bits & QM_PIRQ_VISIBLE;
1113 
1114 	/* Clear any previously remaining interrupt conditions in
1115 	 * QCSP_ISR. This prevents raising a false interrupt when
1116 	 * interrupt conditions are enabled in QCSP_IER.
1117 	 */
1118 	qm_isr_status_clear(&p->p, bits);
1119 	dpaa_set_bits(bits, &p->irq_sources);
1120 	qm_isr_enable_write(&p->p, p->irq_sources);
1121 
1122 	return 0;
1123 }
1124 
1125 int qman_irqsource_remove(u32 bits)
1126 {
1127 	struct qman_portal *p = get_affine_portal();
1128 	u32 ier;
1129 
1130 	/* Our interrupt handler only processes+clears status register bits that
1131 	 * are in p->irq_sources. As we're trimming that mask, if one of them
1132 	 * were to assert in the status register just before we remove it from
1133 	 * the enable register, there would be an interrupt-storm when we
1134 	 * release the IRQ lock. So we wait for the enable register update to
1135 	 * take effect in h/w (by reading it back) and then clear all other bits
1136 	 * in the status register. Ie. we clear them from ISR once it's certain
1137 	 * IER won't allow them to reassert.
1138 	 */
1139 
1140 	bits &= QM_PIRQ_VISIBLE;
1141 	dpaa_clear_bits(bits, &p->irq_sources);
1142 	qm_isr_enable_write(&p->p, p->irq_sources);
1143 	ier = qm_isr_enable_read(&p->p);
1144 	/* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1145 	 * data-dependency, ie. to protect against re-ordering.
1146 	 */
1147 	qm_isr_status_clear(&p->p, ~ier);
1148 	return 0;
1149 }
1150 
1151 int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
1152 {
1153 	u32 ier;
1154 
1155 	/* Our interrupt handler only processes+clears status register bits that
1156 	 * are in p->irq_sources. As we're trimming that mask, if one of them
1157 	 * were to assert in the status register just before we remove it from
1158 	 * the enable register, there would be an interrupt-storm when we
1159 	 * release the IRQ lock. So we wait for the enable register update to
1160 	 * take effect in h/w (by reading it back) and then clear all other bits
1161 	 * in the status register. Ie. we clear them from ISR once it's certain
1162 	 * IER won't allow them to reassert.
1163 	 */
1164 
1165 	bits &= QM_PIRQ_VISIBLE;
1166 	dpaa_clear_bits(bits, &p->irq_sources);
1167 	qm_isr_enable_write(&p->p, p->irq_sources);
1168 	ier = qm_isr_enable_read(&p->p);
1169 	/* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1170 	 * data-dependency, ie. to protect against re-ordering.
1171 	 */
1172 	qm_isr_status_clear(&p->p, ~ier);
1173 	return 0;
1174 }
1175 
1176 u16 qman_affine_channel(int cpu)
1177 {
1178 	if (cpu < 0) {
1179 		struct qman_portal *portal = get_affine_portal();
1180 
1181 		cpu = portal->config->cpu;
1182 	}
1183 	DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
1184 	return affine_channels[cpu];
1185 }
1186 
1187 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1188 				 void **bufs,
1189 				 struct qman_portal *p)
1190 {
1191 	struct qm_portal *portal = &p->p;
1192 	register struct qm_dqrr *dqrr = &portal->dqrr;
1193 	struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
1194 	struct qman_fq *fq;
1195 	unsigned int limit = 0, rx_number = 0;
1196 	uint32_t consume = 0;
1197 
1198 	do {
1199 		qm_dqrr_pvb_update(&p->p);
1200 		if (!dqrr->fill)
1201 			break;
1202 
1203 		dq[rx_number] = dqrr->cursor;
1204 		dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
1205 		/* Prefetch the next DQRR entry */
1206 		rte_prefetch0(dqrr->cursor);
1207 
1208 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1209 		/* If running on an LE system the fields of the
1210 		 * dequeue entry must be swapper.  Because the
1211 		 * QMan HW will ignore writes the DQRR entry is
1212 		 * copied and the index stored within the copy
1213 		 */
1214 		shadow[rx_number] =
1215 			&p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
1216 		shadow[rx_number]->fd.opaque_addr =
1217 			dq[rx_number]->fd.opaque_addr;
1218 		shadow[rx_number]->fd.addr =
1219 			be40_to_cpu(dq[rx_number]->fd.addr);
1220 		shadow[rx_number]->fd.opaque =
1221 			be32_to_cpu(dq[rx_number]->fd.opaque);
1222 #else
1223 		shadow[rx_number] = dq[rx_number];
1224 #endif
1225 
1226 		/* SDQCR: context_b points to the FQ */
1227 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1228 		fq = qman_fq_lookup_table[dq[rx_number]->contextB];
1229 #else
1230 		fq = (void *)dq[rx_number]->contextB;
1231 #endif
1232 		if (fq->cb.dqrr_prepare)
1233 			fq->cb.dqrr_prepare(shadow[rx_number],
1234 					    &bufs[rx_number]);
1235 
1236 		consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
1237 		rx_number++;
1238 		--dqrr->fill;
1239 	} while (++limit < poll_limit);
1240 
1241 	if (rx_number)
1242 		fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
1243 
1244 	/* Consume all the DQRR enries together */
1245 	qm_out(DQRR_DCAP, (1 << 8) | consume);
1246 
1247 	return rx_number;
1248 }
1249 
1250 void qman_clear_irq(void)
1251 {
1252 	struct qman_portal *p = get_affine_portal();
1253 	u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
1254 		~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
1255 	qm_isr_status_clear(&p->p, clear);
1256 }
1257 
1258 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1259 			void **bufs)
1260 {
1261 	const struct qm_dqrr_entry *dq;
1262 	struct qman_fq *fq;
1263 	enum qman_cb_dqrr_result res;
1264 	unsigned int limit = 0;
1265 	struct qman_portal *p = get_affine_portal();
1266 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1267 	struct qm_dqrr_entry *shadow;
1268 #endif
1269 	unsigned int rx_number = 0;
1270 
1271 	do {
1272 		qm_dqrr_pvb_update(&p->p);
1273 		dq = qm_dqrr_current(&p->p);
1274 		if (!dq)
1275 			break;
1276 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1277 		/*
1278 		 * If running on an LE system the fields of the
1279 		 * dequeue entry must be swapper.  Because the
1280 		 * QMan HW will ignore writes the DQRR entry is
1281 		 * copied and the index stored within the copy
1282 		 */
1283 		shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1284 		*shadow = *dq;
1285 		dq = shadow;
1286 		shadow->fqid = be32_to_cpu(shadow->fqid);
1287 		shadow->seqnum = be16_to_cpu(shadow->seqnum);
1288 		hw_fd_to_cpu(&shadow->fd);
1289 #endif
1290 
1291 	       /* SDQCR: context_b points to the FQ */
1292 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1293 		fq = get_fq_table_entry(dq->contextB);
1294 #else
1295 		fq = (void *)(uintptr_t)dq->contextB;
1296 #endif
1297 		/* Now let the callback do its stuff */
1298 		res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
1299 					 dq, &bufs[rx_number]);
1300 		rx_number++;
1301 		/* Interpret 'dq' from a driver perspective. */
1302 		/*
1303 		 * Parking isn't possible unless HELDACTIVE was set. NB,
1304 		 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1305 		 * check for HELDACTIVE to cover both.
1306 		 */
1307 		DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1308 			    (res != qman_cb_dqrr_park));
1309 		if (res != qman_cb_dqrr_defer)
1310 			qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1311 						 res == qman_cb_dqrr_park);
1312 		/* Move forward */
1313 		qm_dqrr_next(&p->p);
1314 		/*
1315 		 * Entry processed and consumed, increment our counter.  The
1316 		 * callback can request that we exit after consuming the
1317 		 * entry, and we also exit if we reach our processing limit,
1318 		 * so loop back only if neither of these conditions is met.
1319 		 */
1320 	} while (++limit < poll_limit);
1321 
1322 	return limit;
1323 }
1324 
1325 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1326 {
1327 	struct qman_portal *p = get_affine_portal();
1328 	const struct qm_dqrr_entry *dq;
1329 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1330 	struct qm_dqrr_entry *shadow;
1331 #endif
1332 
1333 	qm_dqrr_pvb_update(&p->p);
1334 	dq = qm_dqrr_current(&p->p);
1335 	if (!dq)
1336 		return NULL;
1337 
1338 	if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1339 		/* Invalid DQRR - put the portal and consume the DQRR.
1340 		 * Return NULL to user as no packet is seen.
1341 		 */
1342 		qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1343 		return NULL;
1344 	}
1345 
1346 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1347 	shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1348 	*shadow = *dq;
1349 	dq = shadow;
1350 	shadow->fqid = be32_to_cpu(shadow->fqid);
1351 	shadow->seqnum = be16_to_cpu(shadow->seqnum);
1352 	hw_fd_to_cpu(&shadow->fd);
1353 #endif
1354 
1355 	if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1356 		fq_clear(fq, QMAN_FQ_STATE_NE);
1357 
1358 	return (struct qm_dqrr_entry *)dq;
1359 }
1360 
1361 void qman_dqrr_consume(struct qman_fq *fq,
1362 		       struct qm_dqrr_entry *dq)
1363 {
1364 	struct qman_portal *p = get_affine_portal();
1365 
1366 	if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1367 		clear_vdqcr(p, fq);
1368 
1369 	qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1370 	qm_dqrr_next(&p->p);
1371 }
1372 
1373 void qman_stop_dequeues(void)
1374 {
1375 	struct qman_portal *p = get_affine_portal();
1376 
1377 	qman_stop_dequeues_ex(p);
1378 }
1379 
1380 void qman_start_dequeues(void)
1381 {
1382 	struct qman_portal *p = get_affine_portal();
1383 
1384 	DPAA_ASSERT(p->dqrr_disable_ref > 0);
1385 	if (!(--p->dqrr_disable_ref))
1386 		qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1387 }
1388 
1389 void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
1390 {
1391 	struct qman_portal *p = qp ? qp : get_affine_portal();
1392 
1393 	pools &= p->config->pools;
1394 	p->sdqcr |= pools;
1395 	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1396 }
1397 
1398 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
1399 {
1400 	struct qman_portal *p = qp ? qp : get_affine_portal();
1401 
1402 	pools &= p->config->pools;
1403 	p->sdqcr &= ~pools;
1404 	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1405 }
1406 
1407 u32 qman_static_dequeue_get(struct qman_portal *qp)
1408 {
1409 	struct qman_portal *p = qp ? qp : get_affine_portal();
1410 	return p->sdqcr;
1411 }
1412 
1413 void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
1414 {
1415 	struct qman_portal *p = get_affine_portal();
1416 
1417 	qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1418 }
1419 
1420 void qman_dca_index(u8 index, int park_request)
1421 {
1422 	struct qman_portal *p = get_affine_portal();
1423 
1424 	qm_dqrr_cdc_consume_1(&p->p, index, park_request);
1425 }
1426 
1427 /* Frame queue API */
1428 static const char *mcr_result_str(u8 result)
1429 {
1430 	switch (result) {
1431 	case QM_MCR_RESULT_NULL:
1432 		return "QM_MCR_RESULT_NULL";
1433 	case QM_MCR_RESULT_OK:
1434 		return "QM_MCR_RESULT_OK";
1435 	case QM_MCR_RESULT_ERR_FQID:
1436 		return "QM_MCR_RESULT_ERR_FQID";
1437 	case QM_MCR_RESULT_ERR_FQSTATE:
1438 		return "QM_MCR_RESULT_ERR_FQSTATE";
1439 	case QM_MCR_RESULT_ERR_NOTEMPTY:
1440 		return "QM_MCR_RESULT_ERR_NOTEMPTY";
1441 	case QM_MCR_RESULT_PENDING:
1442 		return "QM_MCR_RESULT_PENDING";
1443 	case QM_MCR_RESULT_ERR_BADCOMMAND:
1444 		return "QM_MCR_RESULT_ERR_BADCOMMAND";
1445 	}
1446 	return "<unknown MCR result>";
1447 }
1448 
1449 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1450 {
1451 	struct qm_fqd fqd;
1452 	struct qm_mcr_queryfq_np np;
1453 	struct qm_mc_command *mcc;
1454 	struct qm_mc_result *mcr;
1455 	struct qman_portal *p;
1456 
1457 	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1458 		int ret = qman_alloc_fqid(&fqid);
1459 
1460 		if (ret)
1461 			return ret;
1462 	}
1463 	spin_lock_init(&fq->fqlock);
1464 	fq->fqid = fqid;
1465 	fq->fqid_le = cpu_to_be32(fqid);
1466 	fq->flags = flags;
1467 	fq->state = qman_fq_state_oos;
1468 	fq->cgr_groupid = 0;
1469 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1470 	if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1471 		pr_info("Find empty table entry failed\n");
1472 		return -ENOMEM;
1473 	}
1474 	fq->qman_fq_lookup_table = qman_fq_lookup_table;
1475 #endif
1476 	if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1477 		return 0;
1478 	/* Everything else is AS_IS support */
1479 	p = get_affine_portal();
1480 	mcc = qm_mc_start(&p->p);
1481 	mcc->queryfq.fqid = cpu_to_be32(fqid);
1482 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1483 	while (!(mcr = qm_mc_result(&p->p)))
1484 		cpu_relax();
1485 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1486 	if (mcr->result != QM_MCR_RESULT_OK) {
1487 		pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1488 		goto err;
1489 	}
1490 	fqd = mcr->queryfq.fqd;
1491 	hw_fqd_to_cpu(&fqd);
1492 	mcc = qm_mc_start(&p->p);
1493 	mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1494 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1495 	while (!(mcr = qm_mc_result(&p->p)))
1496 		cpu_relax();
1497 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1498 	if (mcr->result != QM_MCR_RESULT_OK) {
1499 		pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1500 		goto err;
1501 	}
1502 	np = mcr->queryfq_np;
1503 	/* Phew, have queryfq and queryfq_np results, stitch together
1504 	 * the FQ object from those.
1505 	 */
1506 	fq->cgr_groupid = fqd.cgid;
1507 	switch (np.state & QM_MCR_NP_STATE_MASK) {
1508 	case QM_MCR_NP_STATE_OOS:
1509 		break;
1510 	case QM_MCR_NP_STATE_RETIRED:
1511 		fq->state = qman_fq_state_retired;
1512 		if (np.frm_cnt)
1513 			fq_set(fq, QMAN_FQ_STATE_NE);
1514 		break;
1515 	case QM_MCR_NP_STATE_TEN_SCHED:
1516 	case QM_MCR_NP_STATE_TRU_SCHED:
1517 	case QM_MCR_NP_STATE_ACTIVE:
1518 		fq->state = qman_fq_state_sched;
1519 		if (np.state & QM_MCR_NP_STATE_R)
1520 			fq_set(fq, QMAN_FQ_STATE_CHANGING);
1521 		break;
1522 	case QM_MCR_NP_STATE_PARKED:
1523 		fq->state = qman_fq_state_parked;
1524 		break;
1525 	default:
1526 		DPAA_ASSERT(NULL == "invalid FQ state");
1527 	}
1528 	if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1529 		fq->state |= QMAN_FQ_STATE_CGR_EN;
1530 	return 0;
1531 err:
1532 	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1533 		qman_release_fqid(fqid);
1534 	return -EIO;
1535 }
1536 
1537 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1538 {
1539 	/*
1540 	 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1541 	 * quiesced. Instead, run some checks.
1542 	 */
1543 	switch (fq->state) {
1544 	case qman_fq_state_parked:
1545 		DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1546 		/* Fallthrough */
1547 	case qman_fq_state_oos:
1548 		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1549 			qman_release_fqid(fq->fqid);
1550 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1551 		clear_fq_table_entry(fq->key);
1552 #endif
1553 		return;
1554 	default:
1555 		break;
1556 	}
1557 	DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1558 }
1559 
1560 u32 qman_fq_fqid(struct qman_fq *fq)
1561 {
1562 	return fq->fqid;
1563 }
1564 
1565 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1566 {
1567 	if (state)
1568 		*state = fq->state;
1569 	if (flags)
1570 		*flags = fq->flags;
1571 }
1572 
1573 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1574 {
1575 	struct qm_mc_command *mcc;
1576 	struct qm_mc_result *mcr;
1577 	struct qman_portal *p;
1578 
1579 	u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1580 		QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1581 
1582 	if ((fq->state != qman_fq_state_oos) &&
1583 	    (fq->state != qman_fq_state_parked))
1584 		return -EINVAL;
1585 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1586 	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1587 		return -EINVAL;
1588 #endif
1589 	if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1590 		/* And can't be set at the same time as TDTHRESH */
1591 		if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1592 			return -EINVAL;
1593 	}
1594 	/* Issue an INITFQ_[PARKED|SCHED] management command */
1595 	p = get_affine_portal();
1596 	FQLOCK(fq);
1597 	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1598 		     ((fq->state != qman_fq_state_oos) &&
1599 				(fq->state != qman_fq_state_parked)))) {
1600 		FQUNLOCK(fq);
1601 		return -EBUSY;
1602 	}
1603 	mcc = qm_mc_start(&p->p);
1604 	if (opts)
1605 		mcc->initfq = *opts;
1606 	mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1607 	mcc->initfq.count = 0;
1608 	/*
1609 	 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1610 	 * demux pointer. Otherwise, the caller-provided value is allowed to
1611 	 * stand, don't overwrite it.
1612 	 */
1613 	if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1614 		dma_addr_t phys_fq;
1615 
1616 		mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1617 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1618 		mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
1619 #else
1620 		mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1621 #endif
1622 		/*
1623 		 *  and the physical address - NB, if the user wasn't trying to
1624 		 * set CONTEXTA, clear the stashing settings.
1625 		 */
1626 		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1627 			mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1628 			memset(&mcc->initfq.fqd.context_a, 0,
1629 			       sizeof(mcc->initfq.fqd.context_a));
1630 		} else {
1631 			phys_fq = rte_mem_virt2iova(fq);
1632 			qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1633 		}
1634 	}
1635 	if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1636 		mcc->initfq.fqd.dest.channel = p->config->channel;
1637 		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1638 			mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1639 			mcc->initfq.fqd.dest.wq = 4;
1640 		}
1641 	}
1642 	mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1643 	cpu_to_hw_fqd(&mcc->initfq.fqd);
1644 	qm_mc_commit(&p->p, myverb);
1645 	while (!(mcr = qm_mc_result(&p->p)))
1646 		cpu_relax();
1647 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1648 	res = mcr->result;
1649 	if (res != QM_MCR_RESULT_OK) {
1650 		FQUNLOCK(fq);
1651 		return -EIO;
1652 	}
1653 	if (opts) {
1654 		if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1655 			if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1656 				fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1657 			else
1658 				fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1659 		}
1660 		if (opts->we_mask & QM_INITFQ_WE_CGID)
1661 			fq->cgr_groupid = opts->fqd.cgid;
1662 	}
1663 	fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1664 		qman_fq_state_sched : qman_fq_state_parked;
1665 	FQUNLOCK(fq);
1666 	return 0;
1667 }
1668 
1669 int qman_schedule_fq(struct qman_fq *fq)
1670 {
1671 	struct qm_mc_command *mcc;
1672 	struct qm_mc_result *mcr;
1673 	struct qman_portal *p;
1674 
1675 	int ret = 0;
1676 	u8 res;
1677 
1678 	if (fq->state != qman_fq_state_parked)
1679 		return -EINVAL;
1680 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1681 	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1682 		return -EINVAL;
1683 #endif
1684 	/* Issue a ALTERFQ_SCHED management command */
1685 	p = get_affine_portal();
1686 
1687 	FQLOCK(fq);
1688 	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1689 		     (fq->state != qman_fq_state_parked))) {
1690 		ret = -EBUSY;
1691 		goto out;
1692 	}
1693 	mcc = qm_mc_start(&p->p);
1694 	mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1695 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1696 	while (!(mcr = qm_mc_result(&p->p)))
1697 		cpu_relax();
1698 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1699 	res = mcr->result;
1700 	if (res != QM_MCR_RESULT_OK) {
1701 		ret = -EIO;
1702 		goto out;
1703 	}
1704 	fq->state = qman_fq_state_sched;
1705 out:
1706 	FQUNLOCK(fq);
1707 
1708 	return ret;
1709 }
1710 
1711 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1712 {
1713 	struct qm_mc_command *mcc;
1714 	struct qm_mc_result *mcr;
1715 	struct qman_portal *p;
1716 
1717 	int rval;
1718 	u8 res;
1719 
1720 	/* Queue is already in retire or oos state */
1721 	if ((fq->state != qman_fq_state_parked) &&
1722 	    (fq->state != qman_fq_state_sched))
1723 		return 0;
1724 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1725 	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1726 		return -EINVAL;
1727 #endif
1728 	p = get_affine_portal();
1729 
1730 	FQLOCK(fq);
1731 	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1732 		     (fq->state == qman_fq_state_retired) ||
1733 				(fq->state == qman_fq_state_oos))) {
1734 		rval = -EBUSY;
1735 		goto out;
1736 	}
1737 	rval = table_push_fq(p, fq);
1738 	if (rval)
1739 		goto out;
1740 	mcc = qm_mc_start(&p->p);
1741 	mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1742 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1743 	while (!(mcr = qm_mc_result(&p->p)))
1744 		cpu_relax();
1745 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1746 	res = mcr->result;
1747 	/*
1748 	 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1749 	 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1750 	 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1751 	 * friendly, otherwise the caller doesn't necessarily have a fully
1752 	 * "retired" FQ on return even if the retirement was immediate. However
1753 	 * this does mean some code duplication between here and
1754 	 * fq_state_change().
1755 	 */
1756 	if (likely(res == QM_MCR_RESULT_OK)) {
1757 		rval = 0;
1758 		/* Process 'fq' right away, we'll ignore FQRNI */
1759 		if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1760 			fq_set(fq, QMAN_FQ_STATE_NE);
1761 		if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1762 			fq_set(fq, QMAN_FQ_STATE_ORL);
1763 		else
1764 			table_del_fq(p, fq);
1765 		if (flags)
1766 			*flags = fq->flags;
1767 		fq->state = qman_fq_state_retired;
1768 		if (fq->cb.fqs) {
1769 			/*
1770 			 * Another issue with supporting "immediate" retirement
1771 			 * is that we're forced to drop FQRNIs, because by the
1772 			 * time they're seen it may already be "too late" (the
1773 			 * fq may have been OOS'd and free()'d already). But if
1774 			 * the upper layer wants a callback whether it's
1775 			 * immediate or not, we have to fake a "MR" entry to
1776 			 * look like an FQRNI...
1777 			 */
1778 			struct qm_mr_entry msg;
1779 
1780 			msg.ern.verb = QM_MR_VERB_FQRNI;
1781 			msg.fq.fqs = mcr->alterfq.fqs;
1782 			msg.fq.fqid = fq->fqid;
1783 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1784 			msg.fq.contextB = fq->key;
1785 #else
1786 			msg.fq.contextB = (u32)(uintptr_t)fq;
1787 #endif
1788 			fq->cb.fqs(p, fq, &msg);
1789 		}
1790 	} else if (res == QM_MCR_RESULT_PENDING) {
1791 		rval = 1;
1792 		fq_set(fq, QMAN_FQ_STATE_CHANGING);
1793 	} else {
1794 		rval = -EIO;
1795 		table_del_fq(p, fq);
1796 	}
1797 out:
1798 	FQUNLOCK(fq);
1799 	/* Draining FQRNIs, if any */
1800 	drain_mr_fqrni(&p->p);
1801 	return rval;
1802 }
1803 
1804 int qman_oos_fq(struct qman_fq *fq)
1805 {
1806 	struct qm_mc_command *mcc;
1807 	struct qm_mc_result *mcr;
1808 	struct qman_portal *p;
1809 
1810 	int ret = 0;
1811 	u8 res;
1812 
1813 	if (fq->state != qman_fq_state_retired)
1814 		return -EINVAL;
1815 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1816 	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1817 		return -EINVAL;
1818 #endif
1819 	p = get_affine_portal();
1820 	FQLOCK(fq);
1821 	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1822 		     (fq->state != qman_fq_state_retired))) {
1823 		ret = -EBUSY;
1824 		goto out;
1825 	}
1826 	mcc = qm_mc_start(&p->p);
1827 	mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1828 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1829 	while (!(mcr = qm_mc_result(&p->p)))
1830 		cpu_relax();
1831 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1832 	res = mcr->result;
1833 	if (res != QM_MCR_RESULT_OK) {
1834 		ret = -EIO;
1835 		goto out;
1836 	}
1837 	fq->state = qman_fq_state_oos;
1838 out:
1839 	FQUNLOCK(fq);
1840 	return ret;
1841 }
1842 
1843 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1844 {
1845 	struct qm_mc_command *mcc;
1846 	struct qm_mc_result *mcr;
1847 	struct qman_portal *p;
1848 
1849 	int ret = 0;
1850 	u8 res;
1851 	u8 myverb;
1852 
1853 	if ((fq->state == qman_fq_state_oos) ||
1854 	    (fq->state == qman_fq_state_retired) ||
1855 		(fq->state == qman_fq_state_parked))
1856 		return -EINVAL;
1857 
1858 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1859 	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1860 		return -EINVAL;
1861 #endif
1862 	/* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1863 	p = get_affine_portal();
1864 	FQLOCK(fq);
1865 	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1866 		     (fq->state == qman_fq_state_parked) ||
1867 			(fq->state == qman_fq_state_oos) ||
1868 			(fq->state == qman_fq_state_retired))) {
1869 		ret = -EBUSY;
1870 		goto out;
1871 	}
1872 	mcc = qm_mc_start(&p->p);
1873 	mcc->alterfq.fqid = fq->fqid;
1874 	mcc->alterfq.count = 0;
1875 	myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1876 
1877 	qm_mc_commit(&p->p, myverb);
1878 	while (!(mcr = qm_mc_result(&p->p)))
1879 		cpu_relax();
1880 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1881 
1882 	res = mcr->result;
1883 	if (res != QM_MCR_RESULT_OK) {
1884 		ret = -EIO;
1885 		goto out;
1886 	}
1887 out:
1888 	FQUNLOCK(fq);
1889 	return ret;
1890 }
1891 
1892 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1893 {
1894 	struct qm_mc_command *mcc;
1895 	struct qm_mc_result *mcr;
1896 	struct qman_portal *p = get_affine_portal();
1897 
1898 	u8 res;
1899 
1900 	mcc = qm_mc_start(&p->p);
1901 	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1902 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1903 	while (!(mcr = qm_mc_result(&p->p)))
1904 		cpu_relax();
1905 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1906 	res = mcr->result;
1907 	if (res == QM_MCR_RESULT_OK)
1908 		*fqd = mcr->queryfq.fqd;
1909 	hw_fqd_to_cpu(fqd);
1910 	if (res != QM_MCR_RESULT_OK)
1911 		return -EIO;
1912 	return 0;
1913 }
1914 
1915 int qman_query_fq_has_pkts(struct qman_fq *fq)
1916 {
1917 	struct qm_mc_command *mcc;
1918 	struct qm_mc_result *mcr;
1919 	struct qman_portal *p = get_affine_portal();
1920 
1921 	int ret = 0;
1922 	u8 res;
1923 
1924 	mcc = qm_mc_start(&p->p);
1925 	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1926 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1927 	while (!(mcr = qm_mc_result(&p->p)))
1928 		cpu_relax();
1929 	res = mcr->result;
1930 	if (res == QM_MCR_RESULT_OK)
1931 		ret = !!mcr->queryfq_np.frm_cnt;
1932 	return ret;
1933 }
1934 
1935 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1936 {
1937 	struct qm_mc_command *mcc;
1938 	struct qm_mc_result *mcr;
1939 	struct qman_portal *p = get_affine_portal();
1940 
1941 	u8 res;
1942 
1943 	mcc = qm_mc_start(&p->p);
1944 	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1945 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1946 	while (!(mcr = qm_mc_result(&p->p)))
1947 		cpu_relax();
1948 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1949 	res = mcr->result;
1950 	if (res == QM_MCR_RESULT_OK) {
1951 		*np = mcr->queryfq_np;
1952 		np->fqd_link = be24_to_cpu(np->fqd_link);
1953 		np->odp_seq = be16_to_cpu(np->odp_seq);
1954 		np->orp_nesn = be16_to_cpu(np->orp_nesn);
1955 		np->orp_ea_hseq  = be16_to_cpu(np->orp_ea_hseq);
1956 		np->orp_ea_tseq  = be16_to_cpu(np->orp_ea_tseq);
1957 		np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1958 		np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1959 		np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1960 		np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1961 		np->ics_surp = be16_to_cpu(np->ics_surp);
1962 		np->byte_cnt = be32_to_cpu(np->byte_cnt);
1963 		np->frm_cnt = be24_to_cpu(np->frm_cnt);
1964 		np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1965 		np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1966 		np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1967 		np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1968 		np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1969 	}
1970 	if (res == QM_MCR_RESULT_ERR_FQID)
1971 		return -ERANGE;
1972 	else if (res != QM_MCR_RESULT_OK)
1973 		return -EIO;
1974 	return 0;
1975 }
1976 
1977 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
1978 {
1979 	struct qm_mc_command *mcc;
1980 	struct qm_mc_result *mcr;
1981 	struct qman_portal *p = get_affine_portal();
1982 
1983 	mcc = qm_mc_start(&p->p);
1984 	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1985 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1986 	while (!(mcr = qm_mc_result(&p->p)))
1987 		cpu_relax();
1988 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1989 
1990 	if (mcr->result == QM_MCR_RESULT_OK)
1991 		*frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
1992 	else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
1993 		return -ERANGE;
1994 	else if (mcr->result != QM_MCR_RESULT_OK)
1995 		return -EIO;
1996 	return 0;
1997 }
1998 
1999 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
2000 {
2001 	struct qm_mc_command *mcc;
2002 	struct qm_mc_result *mcr;
2003 	struct qman_portal *p = get_affine_portal();
2004 
2005 	u8 res, myverb;
2006 
2007 	myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
2008 				 QM_MCR_VERB_QUERYWQ;
2009 	mcc = qm_mc_start(&p->p);
2010 	mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
2011 	qm_mc_commit(&p->p, myverb);
2012 	while (!(mcr = qm_mc_result(&p->p)))
2013 		cpu_relax();
2014 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
2015 	res = mcr->result;
2016 	if (res == QM_MCR_RESULT_OK) {
2017 		int i, array_len;
2018 
2019 		wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
2020 		array_len = ARRAY_SIZE(mcr->querywq.wq_len);
2021 		for (i = 0; i < array_len; i++)
2022 			wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
2023 	}
2024 	if (res != QM_MCR_RESULT_OK) {
2025 		pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
2026 		return -EIO;
2027 	}
2028 	return 0;
2029 }
2030 
2031 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
2032 		       struct qm_mcr_cgrtestwrite *result)
2033 {
2034 	struct qm_mc_command *mcc;
2035 	struct qm_mc_result *mcr;
2036 	struct qman_portal *p = get_affine_portal();
2037 
2038 	u8 res;
2039 
2040 	mcc = qm_mc_start(&p->p);
2041 	mcc->cgrtestwrite.cgid = cgr->cgrid;
2042 	mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
2043 	mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
2044 	qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
2045 	while (!(mcr = qm_mc_result(&p->p)))
2046 		cpu_relax();
2047 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
2048 	res = mcr->result;
2049 	if (res == QM_MCR_RESULT_OK)
2050 		*result = mcr->cgrtestwrite;
2051 	if (res != QM_MCR_RESULT_OK) {
2052 		pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
2053 		return -EIO;
2054 	}
2055 	return 0;
2056 }
2057 
2058 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
2059 {
2060 	struct qm_mc_command *mcc;
2061 	struct qm_mc_result *mcr;
2062 	struct qman_portal *p = get_affine_portal();
2063 	u8 res;
2064 	unsigned int i;
2065 
2066 	mcc = qm_mc_start(&p->p);
2067 	mcc->querycgr.cgid = cgr->cgrid;
2068 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2069 	while (!(mcr = qm_mc_result(&p->p)))
2070 		cpu_relax();
2071 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2072 	res = mcr->result;
2073 	if (res == QM_MCR_RESULT_OK)
2074 		*cgrd = mcr->querycgr;
2075 	if (res != QM_MCR_RESULT_OK) {
2076 		pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
2077 		return -EIO;
2078 	}
2079 	cgrd->cgr.wr_parm_g.word =
2080 		be32_to_cpu(cgrd->cgr.wr_parm_g.word);
2081 	cgrd->cgr.wr_parm_y.word =
2082 		be32_to_cpu(cgrd->cgr.wr_parm_y.word);
2083 	cgrd->cgr.wr_parm_r.word =
2084 		be32_to_cpu(cgrd->cgr.wr_parm_r.word);
2085 	cgrd->cgr.cscn_targ =  be32_to_cpu(cgrd->cgr.cscn_targ);
2086 	cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
2087 	for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
2088 		cgrd->cscn_targ_swp[i] =
2089 			be32_to_cpu(cgrd->cscn_targ_swp[i]);
2090 	return 0;
2091 }
2092 
2093 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
2094 {
2095 	struct qm_mc_result *mcr;
2096 	struct qman_portal *p = get_affine_portal();
2097 	u8 res;
2098 	unsigned int i;
2099 
2100 	qm_mc_start(&p->p);
2101 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
2102 	while (!(mcr = qm_mc_result(&p->p)))
2103 		cpu_relax();
2104 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2105 			QM_MCC_VERB_QUERYCONGESTION);
2106 	res = mcr->result;
2107 	if (res == QM_MCR_RESULT_OK)
2108 		*congestion = mcr->querycongestion;
2109 	if (res != QM_MCR_RESULT_OK) {
2110 		pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
2111 		return -EIO;
2112 	}
2113 	for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
2114 		congestion->state.state[i] =
2115 			be32_to_cpu(congestion->state.state[i]);
2116 	return 0;
2117 }
2118 
2119 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
2120 {
2121 	struct qman_portal *p = get_affine_portal();
2122 	uint32_t vdqcr;
2123 	int ret = -EBUSY;
2124 
2125 	vdqcr = vdqcr_flags;
2126 	vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
2127 
2128 	if ((fq->state != qman_fq_state_parked) &&
2129 	    (fq->state != qman_fq_state_retired)) {
2130 		ret = -EINVAL;
2131 		goto out;
2132 	}
2133 	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
2134 		ret = -EBUSY;
2135 		goto out;
2136 	}
2137 	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2138 
2139 	if (!p->vdqcr_owned) {
2140 		FQLOCK(fq);
2141 		if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2142 			goto escape;
2143 		fq_set(fq, QMAN_FQ_STATE_VDQCR);
2144 		FQUNLOCK(fq);
2145 		p->vdqcr_owned = fq;
2146 		ret = 0;
2147 	}
2148 escape:
2149 	if (!ret)
2150 		qm_dqrr_vdqcr_set(&p->p, vdqcr);
2151 
2152 out:
2153 	return ret;
2154 }
2155 
2156 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
2157 			  u32 vdqcr)
2158 {
2159 	struct qman_portal *p;
2160 	int ret = -EBUSY;
2161 
2162 	if ((fq->state != qman_fq_state_parked) &&
2163 	    (fq->state != qman_fq_state_retired))
2164 		return -EINVAL;
2165 	if (vdqcr & QM_VDQCR_FQID_MASK)
2166 		return -EINVAL;
2167 	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2168 		return -EBUSY;
2169 	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2170 
2171 	p = get_affine_portal();
2172 
2173 	if (!p->vdqcr_owned) {
2174 		FQLOCK(fq);
2175 		if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2176 			goto escape;
2177 		fq_set(fq, QMAN_FQ_STATE_VDQCR);
2178 		FQUNLOCK(fq);
2179 		p->vdqcr_owned = fq;
2180 		ret = 0;
2181 	}
2182 escape:
2183 	if (ret)
2184 		return ret;
2185 
2186 	/* VDQCR is set */
2187 	qm_dqrr_vdqcr_set(&p->p, vdqcr);
2188 	return 0;
2189 }
2190 
2191 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
2192 {
2193 	if (avail)
2194 		qm_eqcr_cce_prefetch(&p->p);
2195 	else
2196 		qm_eqcr_cce_update(&p->p);
2197 }
2198 
2199 int qman_eqcr_is_empty(void)
2200 {
2201 	struct qman_portal *p = get_affine_portal();
2202 	u8 avail;
2203 
2204 	update_eqcr_ci(p, 0);
2205 	avail = qm_eqcr_get_fill(&p->p);
2206 	return (avail == 0);
2207 }
2208 
2209 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
2210 {
2211 	if (affine) {
2212 		struct qman_portal *p = get_affine_portal();
2213 
2214 		p->cb_dc_ern = handler;
2215 	} else
2216 		cb_dc_ern = handler;
2217 }
2218 
2219 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
2220 					struct qman_fq *fq,
2221 					const struct qm_fd *fd,
2222 					u32 flags)
2223 {
2224 	struct qm_eqcr_entry *eq;
2225 	u8 avail;
2226 
2227 	if (p->use_eqcr_ci_stashing) {
2228 		/*
2229 		 * The stashing case is easy, only update if we need to in
2230 		 * order to try and liberate ring entries.
2231 		 */
2232 		eq = qm_eqcr_start_stash(&p->p);
2233 	} else {
2234 		/*
2235 		 * The non-stashing case is harder, need to prefetch ahead of
2236 		 * time.
2237 		 */
2238 		avail = qm_eqcr_get_avail(&p->p);
2239 		if (avail < 2)
2240 			update_eqcr_ci(p, avail);
2241 		eq = qm_eqcr_start_no_stash(&p->p);
2242 	}
2243 
2244 	if (unlikely(!eq))
2245 		return NULL;
2246 
2247 	if (flags & QMAN_ENQUEUE_FLAG_DCA)
2248 		eq->dca = QM_EQCR_DCA_ENABLE |
2249 			((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
2250 					QM_EQCR_DCA_PARK : 0) |
2251 			((flags >> 8) & QM_EQCR_DCA_IDXMASK);
2252 	eq->fqid = cpu_to_be32(fq->fqid);
2253 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2254 	eq->tag = cpu_to_be32(fq->key);
2255 #else
2256 	eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
2257 #endif
2258 	eq->fd = *fd;
2259 	cpu_to_hw_fd(&eq->fd);
2260 	return eq;
2261 }
2262 
2263 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
2264 {
2265 	struct qman_portal *p = get_affine_portal();
2266 	struct qm_eqcr_entry *eq;
2267 
2268 	eq = try_p_eq_start(p, fq, fd, flags);
2269 	if (!eq)
2270 		return -EBUSY;
2271 	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2272 	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
2273 		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2274 	/* Factor the below out, it's used from qman_enqueue_orp() too */
2275 	return 0;
2276 }
2277 
2278 int qman_enqueue_multi(struct qman_fq *fq,
2279 		       const struct qm_fd *fd, u32 *flags,
2280 		int frames_to_send)
2281 {
2282 	struct qman_portal *p = get_affine_portal();
2283 	struct qm_portal *portal = &p->p;
2284 
2285 	register struct qm_eqcr *eqcr = &portal->eqcr;
2286 	struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2287 
2288 	u8 i = 0, diff, old_ci, sent = 0;
2289 
2290 	/* Update the available entries if no entry is free */
2291 	if (!eqcr->available) {
2292 		old_ci = eqcr->ci;
2293 		eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2294 		diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2295 		eqcr->available += diff;
2296 		if (!diff)
2297 			return 0;
2298 	}
2299 
2300 	/* try to send as many frames as possible */
2301 	while (eqcr->available && frames_to_send--) {
2302 		eq->fqid = fq->fqid_le;
2303 		eq->fd.opaque_addr = fd->opaque_addr;
2304 		eq->fd.addr = cpu_to_be40(fd->addr);
2305 		eq->fd.status = cpu_to_be32(fd->status);
2306 		eq->fd.opaque = cpu_to_be32(fd->opaque);
2307 		if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2308 			eq->dca = QM_EQCR_DCA_ENABLE |
2309 				((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2310 		}
2311 		i++;
2312 		eq = (void *)((unsigned long)(eq + 1) &
2313 			(~(unsigned long)(QM_EQCR_SIZE << 6)));
2314 		eqcr->available--;
2315 		sent++;
2316 		fd++;
2317 	}
2318 	lwsync();
2319 
2320 	/* In order for flushes to complete faster, all lines are recorded in
2321 	 * 32 bit word.
2322 	 */
2323 	eq = eqcr->cursor;
2324 	for (i = 0; i < sent; i++) {
2325 		eq->__dont_write_directly__verb =
2326 			QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2327 		prev_eq = eq;
2328 		eq = (void *)((unsigned long)(eq + 1) &
2329 			(~(unsigned long)(QM_EQCR_SIZE << 6)));
2330 		if (unlikely((prev_eq + 1) != eq))
2331 			eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2332 	}
2333 
2334 	/* We need  to flush all the lines but without load/store operations
2335 	 * between them
2336 	 */
2337 	eq = eqcr->cursor;
2338 	for (i = 0; i < sent; i++) {
2339 		dcbf(eq);
2340 		eq = (void *)((unsigned long)(eq + 1) &
2341 			(~(unsigned long)(QM_EQCR_SIZE << 6)));
2342 	}
2343 	/* Update cursor for the next call */
2344 	eqcr->cursor = eq;
2345 	return sent;
2346 }
2347 
2348 int
2349 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
2350 		      u32 *flags, int frames_to_send)
2351 {
2352 	struct qman_portal *p = get_affine_portal();
2353 	struct qm_portal *portal = &p->p;
2354 
2355 	register struct qm_eqcr *eqcr = &portal->eqcr;
2356 	struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2357 
2358 	u8 i = 0, diff, old_ci, sent = 0;
2359 
2360 	/* Update the available entries if no entry is free */
2361 	if (!eqcr->available) {
2362 		old_ci = eqcr->ci;
2363 		eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2364 		diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2365 		eqcr->available += diff;
2366 		if (!diff)
2367 			return 0;
2368 	}
2369 
2370 	/* try to send as many frames as possible */
2371 	while (eqcr->available && frames_to_send--) {
2372 		eq->fqid = fq[sent]->fqid_le;
2373 		eq->fd.opaque_addr = fd->opaque_addr;
2374 		eq->fd.addr = cpu_to_be40(fd->addr);
2375 		eq->fd.status = cpu_to_be32(fd->status);
2376 		eq->fd.opaque = cpu_to_be32(fd->opaque);
2377 		if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2378 			eq->dca = QM_EQCR_DCA_ENABLE |
2379 				((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2380 		}
2381 		i++;
2382 
2383 		eq = (void *)((unsigned long)(eq + 1) &
2384 			(~(unsigned long)(QM_EQCR_SIZE << 6)));
2385 		eqcr->available--;
2386 		sent++;
2387 		fd++;
2388 	}
2389 	lwsync();
2390 
2391 	/* In order for flushes to complete faster, all lines are recorded in
2392 	 * 32 bit word.
2393 	 */
2394 	eq = eqcr->cursor;
2395 	for (i = 0; i < sent; i++) {
2396 		eq->__dont_write_directly__verb =
2397 			QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2398 		prev_eq = eq;
2399 		eq = (void *)((unsigned long)(eq + 1) &
2400 			(~(unsigned long)(QM_EQCR_SIZE << 6)));
2401 		if (unlikely((prev_eq + 1) != eq))
2402 			eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2403 	}
2404 
2405 	/* We need  to flush all the lines but without load/store operations
2406 	 * between them
2407 	 */
2408 	eq = eqcr->cursor;
2409 	for (i = 0; i < sent; i++) {
2410 		dcbf(eq);
2411 		eq = (void *)((unsigned long)(eq + 1) &
2412 			(~(unsigned long)(QM_EQCR_SIZE << 6)));
2413 	}
2414 	/* Update cursor for the next call */
2415 	eqcr->cursor = eq;
2416 	return sent;
2417 }
2418 
2419 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2420 		     struct qman_fq *orp, u16 orp_seqnum)
2421 {
2422 	struct qman_portal *p  = get_affine_portal();
2423 	struct qm_eqcr_entry *eq;
2424 
2425 	eq = try_p_eq_start(p, fq, fd, flags);
2426 	if (!eq)
2427 		return -EBUSY;
2428 	/* Process ORP-specifics here */
2429 	if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2430 		orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2431 	else {
2432 		orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2433 		if (flags & QMAN_ENQUEUE_FLAG_NESN)
2434 			orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2435 		else
2436 			/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2437 			orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2438 	}
2439 	eq->seqnum = cpu_to_be16(orp_seqnum);
2440 	eq->orp = cpu_to_be32(orp->fqid);
2441 	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2442 	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2443 		((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2444 				0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2445 		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2446 
2447 	return 0;
2448 }
2449 
2450 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2451 		    struct qm_mcc_initcgr *opts)
2452 {
2453 	struct qm_mc_command *mcc;
2454 	struct qm_mc_result *mcr;
2455 	struct qman_portal *p = get_affine_portal();
2456 
2457 	u8 res;
2458 	u8 verb = QM_MCC_VERB_MODIFYCGR;
2459 
2460 	mcc = qm_mc_start(&p->p);
2461 	if (opts)
2462 		mcc->initcgr = *opts;
2463 	mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2464 	mcc->initcgr.cgr.wr_parm_g.word =
2465 		cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2466 	mcc->initcgr.cgr.wr_parm_y.word =
2467 		cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2468 	mcc->initcgr.cgr.wr_parm_r.word =
2469 		cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2470 	mcc->initcgr.cgr.cscn_targ =  cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2471 	mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2472 
2473 	mcc->initcgr.cgid = cgr->cgrid;
2474 	if (flags & QMAN_CGR_FLAG_USE_INIT)
2475 		verb = QM_MCC_VERB_INITCGR;
2476 	qm_mc_commit(&p->p, verb);
2477 	while (!(mcr = qm_mc_result(&p->p)))
2478 		cpu_relax();
2479 
2480 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2481 	res = mcr->result;
2482 	return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2483 }
2484 
2485 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2486 					QM_CHANNEL_SWPORTAL0))
2487 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2488 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2489 
2490 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2491 		    struct qm_mcc_initcgr *opts)
2492 {
2493 	struct qm_mcr_querycgr cgr_state;
2494 	struct qm_mcc_initcgr local_opts;
2495 	int ret;
2496 	struct qman_portal *p;
2497 
2498 	/* We have to check that the provided CGRID is within the limits of the
2499 	 * data-structures, for obvious reasons. However we'll let h/w take
2500 	 * care of determining whether it's within the limits of what exists on
2501 	 * the SoC.
2502 	 */
2503 	if (cgr->cgrid >= __CGR_NUM)
2504 		return -EINVAL;
2505 
2506 	p = get_affine_portal();
2507 
2508 	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2509 	cgr->chan = p->config->channel;
2510 	spin_lock(&p->cgr_lock);
2511 
2512 	/* if no opts specified, just add it to the list */
2513 	if (!opts)
2514 		goto add_list;
2515 
2516 	ret = qman_query_cgr(cgr, &cgr_state);
2517 	if (ret)
2518 		goto release_lock;
2519 	if (opts)
2520 		local_opts = *opts;
2521 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2522 		local_opts.cgr.cscn_targ_upd_ctrl =
2523 			QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2524 	else
2525 		/* Overwrite TARG */
2526 		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2527 							TARG_MASK(p);
2528 	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2529 
2530 	/* send init if flags indicate so */
2531 	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2532 		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2533 	else
2534 		ret = qman_modify_cgr(cgr, 0, &local_opts);
2535 	if (ret)
2536 		goto release_lock;
2537 add_list:
2538 	list_add(&cgr->node, &p->cgr_cbs);
2539 
2540 	/* Determine if newly added object requires its callback to be called */
2541 	ret = qman_query_cgr(cgr, &cgr_state);
2542 	if (ret) {
2543 		/* we can't go back, so proceed and return success, but screen
2544 		 * and wail to the log file.
2545 		 */
2546 		pr_crit("CGR HW state partially modified\n");
2547 		ret = 0;
2548 		goto release_lock;
2549 	}
2550 	if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2551 							      cgr->cgrid))
2552 		cgr->cb(p, cgr, 1);
2553 release_lock:
2554 	spin_unlock(&p->cgr_lock);
2555 	return ret;
2556 }
2557 
2558 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2559 			   struct qm_mcc_initcgr *opts)
2560 {
2561 	struct qm_mcc_initcgr local_opts;
2562 	struct qm_mcr_querycgr cgr_state;
2563 	int ret;
2564 
2565 	if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2566 		pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2567 		return -EINVAL;
2568 	}
2569 	/* We have to check that the provided CGRID is within the limits of the
2570 	 * data-structures, for obvious reasons. However we'll let h/w take
2571 	 * care of determining whether it's within the limits of what exists on
2572 	 * the SoC.
2573 	 */
2574 	if (cgr->cgrid >= __CGR_NUM)
2575 		return -EINVAL;
2576 
2577 	ret = qman_query_cgr(cgr, &cgr_state);
2578 	if (ret)
2579 		return ret;
2580 
2581 	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2582 	if (opts)
2583 		local_opts = *opts;
2584 
2585 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2586 		local_opts.cgr.cscn_targ_upd_ctrl =
2587 				QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2588 				QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2589 	else
2590 		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2591 					TARG_DCP_MASK(dcp_portal);
2592 	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2593 
2594 	/* send init if flags indicate so */
2595 	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2596 		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2597 				      &local_opts);
2598 	else
2599 		ret = qman_modify_cgr(cgr, 0, &local_opts);
2600 
2601 	return ret;
2602 }
2603 
2604 int qman_delete_cgr(struct qman_cgr *cgr)
2605 {
2606 	struct qm_mcr_querycgr cgr_state;
2607 	struct qm_mcc_initcgr local_opts;
2608 	int ret = 0;
2609 	struct qman_cgr *i;
2610 	struct qman_portal *p = get_affine_portal();
2611 
2612 	if (cgr->chan != p->config->channel) {
2613 		pr_crit("Attempting to delete cgr from different portal than"
2614 			" it was create: create 0x%x, delete 0x%x\n",
2615 			cgr->chan, p->config->channel);
2616 		ret = -EINVAL;
2617 		goto put_portal;
2618 	}
2619 	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2620 	spin_lock(&p->cgr_lock);
2621 	list_del(&cgr->node);
2622 	/*
2623 	 * If there are no other CGR objects for this CGRID in the list,
2624 	 * update CSCN_TARG accordingly
2625 	 */
2626 	list_for_each_entry(i, &p->cgr_cbs, node)
2627 		if ((i->cgrid == cgr->cgrid) && i->cb)
2628 			goto release_lock;
2629 	ret = qman_query_cgr(cgr, &cgr_state);
2630 	if (ret)  {
2631 		/* add back to the list */
2632 		list_add(&cgr->node, &p->cgr_cbs);
2633 		goto release_lock;
2634 	}
2635 	/* Overwrite TARG */
2636 	local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2637 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2638 		local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2639 	else
2640 		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2641 							 ~(TARG_MASK(p));
2642 	ret = qman_modify_cgr(cgr, 0, &local_opts);
2643 	if (ret)
2644 		/* add back to the list */
2645 		list_add(&cgr->node, &p->cgr_cbs);
2646 release_lock:
2647 	spin_unlock(&p->cgr_lock);
2648 put_portal:
2649 	return ret;
2650 }
2651 
2652 int qman_shutdown_fq(u32 fqid)
2653 {
2654 	struct qman_portal *p;
2655 	struct qm_portal *low_p;
2656 	struct qm_mc_command *mcc;
2657 	struct qm_mc_result *mcr;
2658 	u8 state;
2659 	int orl_empty, fq_empty, drain = 0;
2660 	u32 result;
2661 	u32 channel, wq;
2662 	u16 dest_wq;
2663 
2664 	p = get_affine_portal();
2665 	low_p = &p->p;
2666 
2667 	/* Determine the state of the FQID */
2668 	mcc = qm_mc_start(low_p);
2669 	mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2670 	qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2671 	while (!(mcr = qm_mc_result(low_p)))
2672 		cpu_relax();
2673 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2674 	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2675 	if (state == QM_MCR_NP_STATE_OOS)
2676 		return 0; /* Already OOS, no need to do anymore checks */
2677 
2678 	/* Query which channel the FQ is using */
2679 	mcc = qm_mc_start(low_p);
2680 	mcc->queryfq.fqid = cpu_to_be32(fqid);
2681 	qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2682 	while (!(mcr = qm_mc_result(low_p)))
2683 		cpu_relax();
2684 	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2685 
2686 	/* Need to store these since the MCR gets reused */
2687 	dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2688 	channel = dest_wq & 0x7;
2689 	wq = dest_wq >> 3;
2690 
2691 	switch (state) {
2692 	case QM_MCR_NP_STATE_TEN_SCHED:
2693 	case QM_MCR_NP_STATE_TRU_SCHED:
2694 	case QM_MCR_NP_STATE_ACTIVE:
2695 	case QM_MCR_NP_STATE_PARKED:
2696 		orl_empty = 0;
2697 		mcc = qm_mc_start(low_p);
2698 		mcc->alterfq.fqid = cpu_to_be32(fqid);
2699 		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2700 		while (!(mcr = qm_mc_result(low_p)))
2701 			cpu_relax();
2702 		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2703 			   QM_MCR_VERB_ALTER_RETIRE);
2704 		result = mcr->result; /* Make a copy as we reuse MCR below */
2705 
2706 		if (result == QM_MCR_RESULT_PENDING) {
2707 			/* Need to wait for the FQRN in the message ring, which
2708 			 * will only occur once the FQ has been drained.  In
2709 			 * order for the FQ to drain the portal needs to be set
2710 			 * to dequeue from the channel the FQ is scheduled on
2711 			 */
2712 			const struct qm_mr_entry *msg;
2713 			const struct qm_dqrr_entry *dqrr = NULL;
2714 			int found_fqrn = 0;
2715 			__maybe_unused u16 dequeue_wq = 0;
2716 
2717 			/* Flag that we need to drain FQ */
2718 			drain = 1;
2719 
2720 			if (channel >= qm_channel_pool1 &&
2721 			    channel < (u16)(qm_channel_pool1 + 15)) {
2722 				/* Pool channel, enable the bit in the portal */
2723 				dequeue_wq = (channel -
2724 					      qm_channel_pool1 + 1) << 4 | wq;
2725 			} else if (channel < qm_channel_pool1) {
2726 				/* Dedicated channel */
2727 				dequeue_wq = wq;
2728 			} else {
2729 				pr_info("Cannot recover FQ 0x%x,"
2730 					" it is scheduled on channel 0x%x",
2731 					fqid, channel);
2732 				return -EBUSY;
2733 			}
2734 			/* Set the sdqcr to drain this channel */
2735 			if (channel < qm_channel_pool1)
2736 				qm_dqrr_sdqcr_set(low_p,
2737 						  QM_SDQCR_TYPE_ACTIVE |
2738 					  QM_SDQCR_CHANNELS_DEDICATED);
2739 			else
2740 				qm_dqrr_sdqcr_set(low_p,
2741 						  QM_SDQCR_TYPE_ACTIVE |
2742 						  QM_SDQCR_CHANNELS_POOL_CONV
2743 						  (channel));
2744 			while (!found_fqrn) {
2745 				/* Keep draining DQRR while checking the MR*/
2746 				qm_dqrr_pvb_update(low_p);
2747 				dqrr = qm_dqrr_current(low_p);
2748 				while (dqrr) {
2749 					qm_dqrr_cdc_consume_1ptr(
2750 						low_p, dqrr, 0);
2751 					qm_dqrr_pvb_update(low_p);
2752 					qm_dqrr_next(low_p);
2753 					dqrr = qm_dqrr_current(low_p);
2754 				}
2755 				/* Process message ring too */
2756 				qm_mr_pvb_update(low_p);
2757 				msg = qm_mr_current(low_p);
2758 				while (msg) {
2759 					if ((msg->ern.verb &
2760 					     QM_MR_VERB_TYPE_MASK)
2761 					    == QM_MR_VERB_FQRN)
2762 						found_fqrn = 1;
2763 					qm_mr_next(low_p);
2764 					qm_mr_cci_consume_to_current(low_p);
2765 					qm_mr_pvb_update(low_p);
2766 					msg = qm_mr_current(low_p);
2767 				}
2768 				cpu_relax();
2769 			}
2770 		}
2771 		if (result != QM_MCR_RESULT_OK &&
2772 		    result !=  QM_MCR_RESULT_PENDING) {
2773 			/* error */
2774 			pr_err("qman_retire_fq failed on FQ 0x%x,"
2775 			       " result=0x%x\n", fqid, result);
2776 			return -1;
2777 		}
2778 		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2779 			/* ORL had no entries, no need to wait until the
2780 			 * ERNs come in.
2781 			 */
2782 			orl_empty = 1;
2783 		}
2784 		/* Retirement succeeded, check to see if FQ needs
2785 		 * to be drained.
2786 		 */
2787 		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2788 			/* FQ is Not Empty, drain using volatile DQ commands */
2789 			fq_empty = 0;
2790 			do {
2791 				const struct qm_dqrr_entry *dqrr = NULL;
2792 				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2793 
2794 				qm_dqrr_vdqcr_set(low_p, vdqcr);
2795 
2796 				/* Wait for a dequeue to occur */
2797 				while (dqrr == NULL) {
2798 					qm_dqrr_pvb_update(low_p);
2799 					dqrr = qm_dqrr_current(low_p);
2800 					if (!dqrr)
2801 						cpu_relax();
2802 				}
2803 				/* Process the dequeues, making sure to
2804 				 * empty the ring completely.
2805 				 */
2806 				while (dqrr) {
2807 					if (dqrr->fqid == fqid &&
2808 					    dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2809 						fq_empty = 1;
2810 					qm_dqrr_cdc_consume_1ptr(low_p,
2811 								 dqrr, 0);
2812 					qm_dqrr_pvb_update(low_p);
2813 					qm_dqrr_next(low_p);
2814 					dqrr = qm_dqrr_current(low_p);
2815 				}
2816 			} while (fq_empty == 0);
2817 		}
2818 		qm_dqrr_sdqcr_set(low_p, 0);
2819 
2820 		/* Wait for the ORL to have been completely drained */
2821 		while (orl_empty == 0) {
2822 			const struct qm_mr_entry *msg;
2823 
2824 			qm_mr_pvb_update(low_p);
2825 			msg = qm_mr_current(low_p);
2826 			while (msg) {
2827 				if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
2828 				    QM_MR_VERB_FQRL)
2829 					orl_empty = 1;
2830 				qm_mr_next(low_p);
2831 				qm_mr_cci_consume_to_current(low_p);
2832 				qm_mr_pvb_update(low_p);
2833 				msg = qm_mr_current(low_p);
2834 			}
2835 			cpu_relax();
2836 		}
2837 		mcc = qm_mc_start(low_p);
2838 		mcc->alterfq.fqid = cpu_to_be32(fqid);
2839 		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2840 		while (!(mcr = qm_mc_result(low_p)))
2841 			cpu_relax();
2842 		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2843 			   QM_MCR_VERB_ALTER_OOS);
2844 		if (mcr->result != QM_MCR_RESULT_OK) {
2845 			pr_err(
2846 			"OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2847 			       fqid, mcr->result);
2848 			return -1;
2849 		}
2850 		return 0;
2851 
2852 	case QM_MCR_NP_STATE_RETIRED:
2853 		/* Send OOS Command */
2854 		mcc = qm_mc_start(low_p);
2855 		mcc->alterfq.fqid = cpu_to_be32(fqid);
2856 		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2857 		while (!(mcr = qm_mc_result(low_p)))
2858 			cpu_relax();
2859 		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2860 			   QM_MCR_VERB_ALTER_OOS);
2861 		if (mcr->result) {
2862 			pr_err("OOS Failed on FQID 0x%x\n", fqid);
2863 			return -1;
2864 		}
2865 		return 0;
2866 
2867 	}
2868 	return -1;
2869 }
2870