xref: /dpdk/drivers/bus/fslmc/qbman/qbman_portal.c (revision a3a2e2c8f7de433e10b1548df65b20bf10086d9c)
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *     * Redistributions of source code must retain the above copyright
9  *       notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above copyright
11  *       notice, this list of conditions and the following disclaimer in the
12  *       documentation and/or other materials provided with the distribution.
13  *     * Neither the name of Freescale Semiconductor nor the
14  *       names of its contributors may be used to endorse or promote products
15  *       derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "qbman_portal.h"
30 
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE       0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
34 
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR    0x8c0
39 #define QBMAN_CINH_SWP_DQPI    0xa00
40 #define QBMAN_CINH_SWP_DCAP    0xac0
41 #define QBMAN_CINH_SWP_SDQCR   0xb00
42 #define QBMAN_CINH_SWP_RAR     0xcc0
43 #define QBMAN_CINH_SWP_ISR     0xe00
44 #define QBMAN_CINH_SWP_IER     0xe40
45 #define QBMAN_CINH_SWP_ISDR    0xe80
46 #define QBMAN_CINH_SWP_IIR     0xec0
47 
48 /* CENA register offsets */
49 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
50 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
51 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_CR      0x600
53 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((uint32_t)(vb) >> 1))
54 #define QBMAN_CENA_SWP_VDQCR   0x780
55 #define QBMAN_CENA_SWP_EQCR_CI 0x840
56 
57 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
58 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
59 
60 /* QBMan FQ management command codes */
61 #define QBMAN_FQ_SCHEDULE	0x48
62 #define QBMAN_FQ_FORCE		0x49
63 #define QBMAN_FQ_XON		0x4d
64 #define QBMAN_FQ_XOFF		0x4e
65 
66 /*******************************/
67 /* Pre-defined attribute codes */
68 /*******************************/
69 
70 struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
71 struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
72 
73 /*************************/
74 /* SDQCR attribute codes */
75 /*************************/
76 
77 /* we put these here because at least some of them are required by
78  * qbman_swp_init()
79  */
80 struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
81 struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
82 struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
83 static struct qb_attr_code code_eq_dca_idx;
84 #define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
85 enum qbman_sdqcr_dct {
86 	qbman_sdqcr_dct_null = 0,
87 	qbman_sdqcr_dct_prio_ics,
88 	qbman_sdqcr_dct_active_ics,
89 	qbman_sdqcr_dct_active
90 };
91 
92 enum qbman_sdqcr_fc {
93 	qbman_sdqcr_fc_one = 0,
94 	qbman_sdqcr_fc_up_to_3 = 1
95 };
96 
97 struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
98 
99 /* We need to keep track of which SWP triggered a pull command
100  * so keep an array of portal IDs and use the token field to
101  * be able to find the proper portal
102  */
103 #define MAX_QBMAN_PORTALS  35
104 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
105 
106 uint32_t qman_version;
107 
108 /*********************************/
109 /* Portal constructor/destructor */
110 /*********************************/
111 
112 /* Software portals should always be in the power-on state when we initialise,
113  * due to the CCSR-based portal reset functionality that MC has.
114  *
115  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
116  * valid-bits, so we need to support a workaround where we don't trust
117  * valid-bits when detecting new entries until any stale ring entries have been
118  * overwritten at least once. The idea is that we read PI for the first few
119  * entries, then switch to valid-bit after that. The trick is to clear the
120  * bug-work-around boolean once the PI wraps around the ring for the first time.
121  *
122  * Note: this still carries a slight additional cost once the decrementer hits
123  * zero.
124  */
125 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
126 {
127 	int ret;
128 	uint32_t eqcr_pi;
129 	struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
130 
131 	if (!p)
132 		return NULL;
133 	p->desc = *d;
134 #ifdef QBMAN_CHECKING
135 	p->mc.check = swp_mc_can_start;
136 #endif
137 	p->mc.valid_bit = QB_VALID_BIT;
138 	p->sdq = 0;
139 	qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
140 	qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
141 	qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
142 	atomic_set(&p->vdq.busy, 1);
143 	p->vdq.valid_bit = QB_VALID_BIT;
144 	p->dqrr.next_idx = 0;
145 	p->dqrr.valid_bit = QB_VALID_BIT;
146 	qman_version = p->desc.qman_version;
147 	if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
148 		p->dqrr.dqrr_size = 4;
149 		p->dqrr.reset_bug = 1;
150 		/* Set size of DQRR to 4, encoded in 2 bits */
151 		code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);
152 	} else {
153 		p->dqrr.dqrr_size = 8;
154 		p->dqrr.reset_bug = 0;
155 		/* Set size of DQRR to 8, encoded in 3 bits */
156 		code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);
157 	}
158 
159 	ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
160 	if (ret) {
161 		kfree(p);
162 		pr_err("qbman_swp_sys_init() failed %d\n", ret);
163 		return NULL;
164 	}
165 	/* SDQCR needs to be initialized to 0 when no channels are
166 	 * being dequeued from or else the QMan HW will indicate an
167 	 * error.  The values that were calculated above will be
168 	 * applied when dequeues from a specific channel are enabled
169 	 */
170 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
171 	eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
172 	p->eqcr.pi = eqcr_pi & 0xF;
173 	p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
174 	p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
175 	p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
176 						p->eqcr.ci, p->eqcr.pi);
177 
178 	portal_idx_map[p->desc.idx] = p;
179 	return p;
180 }
181 
182 void qbman_swp_finish(struct qbman_swp *p)
183 {
184 #ifdef QBMAN_CHECKING
185 	QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
186 #endif
187 	qbman_swp_sys_finish(&p->sys);
188 	portal_idx_map[p->desc.idx] = NULL;
189 	kfree(p);
190 }
191 
192 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
193 {
194 	return &p->desc;
195 }
196 
197 /**************/
198 /* Interrupts */
199 /**************/
200 
201 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
202 {
203 	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
204 }
205 
206 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
207 {
208 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
209 }
210 
211 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
212 {
213 	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
214 }
215 
216 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
217 {
218 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
219 }
220 
221 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
222 {
223 	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
224 }
225 
226 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
227 {
228 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
229 }
230 
231 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
232 {
233 	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
234 }
235 
236 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
237 {
238 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
239 }
240 
241 /***********************/
242 /* Management commands */
243 /***********************/
244 
245 /*
246  * Internal code common to all types of management commands.
247  */
248 
249 void *qbman_swp_mc_start(struct qbman_swp *p)
250 {
251 	void *ret;
252 #ifdef QBMAN_CHECKING
253 	QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
254 #endif
255 	ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
256 #ifdef QBMAN_CHECKING
257 	if (!ret)
258 		p->mc.check = swp_mc_can_submit;
259 #endif
260 	return ret;
261 }
262 
263 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
264 {
265 	uint32_t *v = cmd;
266 #ifdef QBMAN_CHECKING
267 	QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
268 #endif
269 	/* TBD: "|=" is going to hurt performance. Need to move as many fields
270 	 * out of word zero, and for those that remain, the "OR" needs to occur
271 	 * at the caller side. This debug check helps to catch cases where the
272 	 * caller wants to OR but has forgotten to do so.
273 	 */
274 	QBMAN_BUG_ON((*v & cmd_verb) != *v);
275 	*v = cmd_verb | p->mc.valid_bit;
276 	qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
277 #ifdef QBMAN_CHECKING
278 	p->mc.check = swp_mc_can_poll;
279 #endif
280 }
281 
282 void *qbman_swp_mc_result(struct qbman_swp *p)
283 {
284 	uint32_t *ret, verb;
285 #ifdef QBMAN_CHECKING
286 	QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
287 #endif
288 	qbman_cena_invalidate_prefetch(&p->sys,
289 				       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
290 	ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
291 	/* Remove the valid-bit - command completed if the rest is non-zero */
292 	verb = ret[0] & ~QB_VALID_BIT;
293 	if (!verb)
294 		return NULL;
295 #ifdef QBMAN_CHECKING
296 	p->mc.check = swp_mc_can_start;
297 #endif
298 	p->mc.valid_bit ^= QB_VALID_BIT;
299 	return ret;
300 }
301 
302 /***********/
303 /* Enqueue */
304 /***********/
305 
306 /* These should be const, eventually */
307 static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
308 static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
309 static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
310 static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
311 /* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
312 static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
313 static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
314 static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
315 static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
316 static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
317 static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
318 /* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
319 static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
320 static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
321 static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
322 static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
323 static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
324 static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
325 
326 enum qbman_eq_cmd_e {
327 	/* No enqueue, primarily for plugging ORP gaps for dropped frames */
328 	qbman_eq_cmd_empty,
329 	/* DMA an enqueue response once complete */
330 	qbman_eq_cmd_respond,
331 	/* DMA an enqueue response only if the enqueue fails */
332 	qbman_eq_cmd_respond_reject
333 };
334 
335 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
336 {
337 	memset(d, 0, sizeof(*d));
338 }
339 
340 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
341 {
342 	uint32_t *cl = qb_cl(d);
343 
344 	qb_attr_code_encode(&code_eq_orp_en, cl, 0);
345 	qb_attr_code_encode(&code_eq_cmd, cl,
346 			    respond_success ? qbman_eq_cmd_respond :
347 					      qbman_eq_cmd_respond_reject);
348 }
349 
350 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
351 			   uint32_t opr_id, uint32_t seqnum, int incomplete)
352 {
353 	uint32_t *cl = qb_cl(d);
354 
355 	qb_attr_code_encode(&code_eq_orp_en, cl, 1);
356 	qb_attr_code_encode(&code_eq_cmd, cl,
357 			    respond_success ? qbman_eq_cmd_respond :
358 					      qbman_eq_cmd_respond_reject);
359 	qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
360 	qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
361 	qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
362 }
363 
364 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
365 				uint32_t seqnum)
366 {
367 	uint32_t *cl = qb_cl(d);
368 
369 	qb_attr_code_encode(&code_eq_orp_en, cl, 1);
370 	qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
371 	qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
372 	qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
373 	qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
374 	qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
375 }
376 
377 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
378 				uint32_t seqnum)
379 {
380 	uint32_t *cl = qb_cl(d);
381 
382 	qb_attr_code_encode(&code_eq_orp_en, cl, 1);
383 	qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
384 	qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
385 	qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
386 	qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
387 	qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
388 }
389 
390 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
391 				dma_addr_t storage_phys,
392 				int stash)
393 {
394 	uint32_t *cl = qb_cl(d);
395 
396 	qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
397 	qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
398 }
399 
400 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
401 {
402 	uint32_t *cl = qb_cl(d);
403 
404 	qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
405 }
406 
407 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
408 {
409 	uint32_t *cl = qb_cl(d);
410 
411 	qb_attr_code_encode(&code_eq_qd_en, cl, 0);
412 	qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
413 }
414 
415 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
416 			  uint32_t qd_bin, uint32_t qd_prio)
417 {
418 	uint32_t *cl = qb_cl(d);
419 
420 	qb_attr_code_encode(&code_eq_qd_en, cl, 1);
421 	qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
422 	qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
423 	qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
424 }
425 
426 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
427 {
428 	uint32_t *cl = qb_cl(d);
429 
430 	qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
431 }
432 
433 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
434 			   uint32_t dqrr_idx, int park)
435 {
436 	uint32_t *cl = qb_cl(d);
437 
438 	qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
439 	if (enable) {
440 		qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
441 		qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
442 	}
443 }
444 
445 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
446 #define EQAR_VB(eqar)      ((eqar) & 0x80)
447 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
448 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
449 					const struct qbman_eq_desc *d,
450 				 const struct qbman_fd *fd)
451 {
452 	uint32_t *p;
453 	const uint32_t *cl = qb_cl(d);
454 	uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
455 
456 	pr_debug("EQAR=%08x\n", eqar);
457 	if (!EQAR_SUCCESS(eqar))
458 		return -EBUSY;
459 	p = qbman_cena_write_start_wo_shadow(&s->sys,
460 			QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
461 	word_copy(&p[1], &cl[1], 7);
462 	word_copy(&p[8], fd, sizeof(*fd) >> 2);
463 	/* Set the verb byte, have to substitute in the valid-bit */
464 	lwsync();
465 	p[0] = cl[0] | EQAR_VB(eqar);
466 	qbman_cena_write_complete_wo_shadow(&s->sys,
467 			QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
468 	return 0;
469 }
470 
471 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
472 				       const struct qbman_eq_desc *d,
473 				const struct qbman_fd *fd)
474 {
475 	uint32_t *p;
476 	const uint32_t *cl = qb_cl(d);
477 	uint32_t eqcr_ci;
478 	uint8_t diff;
479 
480 	if (!s->eqcr.available) {
481 		eqcr_ci = s->eqcr.ci;
482 		s->eqcr.ci = qbman_cena_read_reg(&s->sys,
483 				QBMAN_CENA_SWP_EQCR_CI) & 0xF;
484 		diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
485 				   eqcr_ci, s->eqcr.ci);
486 		s->eqcr.available += diff;
487 		if (!diff)
488 			return -EBUSY;
489 	}
490 
491 	p = qbman_cena_write_start_wo_shadow(&s->sys,
492 		QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
493 	word_copy(&p[1], &cl[1], 7);
494 	word_copy(&p[8], fd, sizeof(*fd) >> 2);
495 	lwsync();
496 	/* Set the verb byte, have to substitute in the valid-bit */
497 	p[0] = cl[0] | s->eqcr.pi_vb;
498 	qbman_cena_write_complete_wo_shadow(&s->sys,
499 		QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
500 	s->eqcr.pi++;
501 	s->eqcr.pi &= 0xF;
502 	s->eqcr.available--;
503 	if (!(s->eqcr.pi & 7))
504 		s->eqcr.pi_vb ^= QB_VALID_BIT;
505 	return 0;
506 }
507 
508 int qbman_swp_fill_ring(struct qbman_swp *s,
509 			const struct qbman_eq_desc *d,
510 			const struct qbman_fd *fd,
511 			__attribute__((unused)) uint8_t burst_index)
512 {
513 	uint32_t *p;
514 	const uint32_t *cl = qb_cl(d);
515 	uint32_t eqcr_ci;
516 	uint8_t diff;
517 
518 	if (!s->eqcr.available) {
519 		eqcr_ci = s->eqcr.ci;
520 		s->eqcr.ci = qbman_cena_read_reg(&s->sys,
521 				QBMAN_CENA_SWP_EQCR_CI) & 0xF;
522 		diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
523 				   eqcr_ci, s->eqcr.ci);
524 		s->eqcr.available += diff;
525 		if (!diff)
526 			return -EBUSY;
527 	}
528 	p = qbman_cena_write_start_wo_shadow(&s->sys,
529 		QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
530 	/* word_copy(&p[1], &cl[1], 7); */
531 	memcpy(&p[1], &cl[1], 7 * 4);
532 	/* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
533 	memcpy(&p[8], fd, sizeof(struct qbman_fd));
534 
535 	/* lwsync(); */
536 	p[0] = cl[0] | s->eqcr.pi_vb;
537 
538 	s->eqcr.pi++;
539 	s->eqcr.pi &= 0xF;
540 	s->eqcr.available--;
541 	if (!(s->eqcr.pi & 7))
542 		s->eqcr.pi_vb ^= QB_VALID_BIT;
543 
544 	return 0;
545 }
546 
547 int qbman_swp_flush_ring(struct qbman_swp *s)
548 {
549 	void *ptr = s->sys.addr_cena;
550 
551 	dcbf((uint64_t)ptr);
552 	dcbf((uint64_t)ptr + 0x40);
553 	dcbf((uint64_t)ptr + 0x80);
554 	dcbf((uint64_t)ptr + 0xc0);
555 	dcbf((uint64_t)ptr + 0x100);
556 	dcbf((uint64_t)ptr + 0x140);
557 	dcbf((uint64_t)ptr + 0x180);
558 	dcbf((uint64_t)ptr + 0x1c0);
559 
560 	return 0;
561 }
562 
563 void qbman_sync(void)
564 {
565 	lwsync();
566 }
567 
568 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
569 		      const struct qbman_fd *fd)
570 {
571 	if (s->sys.eqcr_mode == qman_eqcr_vb_array)
572 		return qbman_swp_enqueue_array_mode(s, d, fd);
573 	else    /* Use ring mode by default */
574 		return qbman_swp_enqueue_ring_mode(s, d, fd);
575 }
576 
577 /*************************/
578 /* Static (push) dequeue */
579 /*************************/
580 
581 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
582 {
583 	struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
584 
585 	QBMAN_BUG_ON(channel_idx > 15);
586 	*enabled = (int)qb_attr_code_decode(&code, &s->sdq);
587 }
588 
589 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
590 {
591 	uint16_t dqsrc;
592 	struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
593 
594 	QBMAN_BUG_ON(channel_idx > 15);
595 	qb_attr_code_encode(&code, &s->sdq, !!enable);
596 	/* Read make the complete src map.  If no channels are enabled
597 	 * the SDQCR must be 0 or else QMan will assert errors
598 	 */
599 	dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
600 	if (dqsrc != 0)
601 		qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
602 	else
603 		qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
604 }
605 
606 /***************************/
607 /* Volatile (pull) dequeue */
608 /***************************/
609 
610 /* These should be const, eventually */
611 static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
612 static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
613 static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
614 static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
615 static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
616 static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
617 static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
618 static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
619 
620 enum qb_pull_dt_e {
621 	qb_pull_dt_channel,
622 	qb_pull_dt_workqueue,
623 	qb_pull_dt_framequeue
624 };
625 
626 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
627 {
628 	memset(d, 0, sizeof(*d));
629 }
630 
631 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
632 				 struct qbman_result *storage,
633 				 dma_addr_t storage_phys,
634 				 int stash)
635 {
636 	uint32_t *cl = qb_cl(d);
637 	/* Squiggle the pointer 'storage' into the extra 2 words of the
638 	 * descriptor (which aren't copied to the hw command)
639 	 */
640 	*(void **)&cl[4] = storage;
641 	if (!storage) {
642 		qb_attr_code_encode(&code_pull_rls, cl, 0);
643 		return;
644 	}
645 	qb_attr_code_encode(&code_pull_rls, cl, 1);
646 	qb_attr_code_encode(&code_pull_stash, cl, !!stash);
647 	qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
648 }
649 
650 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
651 {
652 	uint32_t *cl = qb_cl(d);
653 
654 	QBMAN_BUG_ON(!numframes || (numframes > 16));
655 	qb_attr_code_encode(&code_pull_numframes, cl,
656 			    (uint32_t)(numframes - 1));
657 }
658 
659 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
660 {
661 	uint32_t *cl = qb_cl(d);
662 
663 	qb_attr_code_encode(&code_pull_token, cl, token);
664 }
665 
666 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
667 {
668 	uint32_t *cl = qb_cl(d);
669 
670 	qb_attr_code_encode(&code_pull_dct, cl, 1);
671 	qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
672 	qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
673 }
674 
675 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
676 			    enum qbman_pull_type_e dct)
677 {
678 	uint32_t *cl = qb_cl(d);
679 
680 	qb_attr_code_encode(&code_pull_dct, cl, dct);
681 	qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
682 	qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
683 }
684 
685 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
686 				 enum qbman_pull_type_e dct)
687 {
688 	uint32_t *cl = qb_cl(d);
689 
690 	qb_attr_code_encode(&code_pull_dct, cl, dct);
691 	qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
692 	qb_attr_code_encode(&code_pull_dqsource, cl, chid);
693 }
694 
695 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
696 {
697 	uint32_t *p;
698 	uint32_t *cl = qb_cl(d);
699 
700 	if (!atomic_dec_and_test(&s->vdq.busy)) {
701 		atomic_inc(&s->vdq.busy);
702 		return -EBUSY;
703 	}
704 	s->vdq.storage = *(void **)&cl[4];
705 	/* We use portal index +1 as token so that 0 still indicates
706 	 * that the result isn't valid yet.
707 	 */
708 	qb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);
709 	p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
710 	word_copy(&p[1], &cl[1], 3);
711 	/* Set the verb byte, have to substitute in the valid-bit */
712 	lwsync();
713 	p[0] = cl[0] | s->vdq.valid_bit;
714 	s->vdq.valid_bit ^= QB_VALID_BIT;
715 	qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
716 	return 0;
717 }
718 
719 /****************/
720 /* Polling DQRR */
721 /****************/
722 
723 static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
724 static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
725 static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
726 static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
727 static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
728 /* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
729 static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
730 static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
731 static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
732 static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
733 
734 #define QBMAN_RESULT_DQ        0x60
735 #define QBMAN_RESULT_FQRN      0x21
736 #define QBMAN_RESULT_FQRNI     0x22
737 #define QBMAN_RESULT_FQPN      0x24
738 #define QBMAN_RESULT_FQDAN     0x25
739 #define QBMAN_RESULT_CDAN      0x26
740 #define QBMAN_RESULT_CSCN_MEM  0x27
741 #define QBMAN_RESULT_CGCU      0x28
742 #define QBMAN_RESULT_BPSCN     0x29
743 #define QBMAN_RESULT_CSCN_WQ   0x2a
744 
745 static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
746 
747 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
748  * only once, so repeated calls can return a sequence of DQRR entries, without
749  * requiring they be consumed immediately or in any particular order.
750  */
751 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
752 {
753 	uint32_t verb;
754 	uint32_t response_verb;
755 	uint32_t flags;
756 	const struct qbman_result *dq;
757 	const uint32_t *p;
758 
759 	/* Before using valid-bit to detect if something is there, we have to
760 	 * handle the case of the DQRR reset bug...
761 	 */
762 	if (unlikely(s->dqrr.reset_bug)) {
763 		/* We pick up new entries by cache-inhibited producer index,
764 		 * which means that a non-coherent mapping would require us to
765 		 * invalidate and read *only* once that PI has indicated that
766 		 * there's an entry here. The first trip around the DQRR ring
767 		 * will be much less efficient than all subsequent trips around
768 		 * it...
769 		 */
770 		uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
771 		uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
772 		/* there are new entries if pi != next_idx */
773 		if (pi == s->dqrr.next_idx)
774 			return NULL;
775 		/* if next_idx is/was the last ring index, and 'pi' is
776 		 * different, we can disable the workaround as all the ring
777 		 * entries have now been DMA'd to so valid-bit checking is
778 		 * repaired. Note: this logic needs to be based on next_idx
779 		 * (which increments one at a time), rather than on pi (which
780 		 * can burst and wrap-around between our snapshots of it).
781 		 */
782 		QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
783 		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
784 			pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
785 				 s->dqrr.next_idx, pi);
786 			s->dqrr.reset_bug = 0;
787 		}
788 		qbman_cena_invalidate_prefetch(&s->sys,
789 				QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
790 	}
791 	dq = qbman_cena_read_wo_shadow(&s->sys,
792 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
793 	p = qb_cl(dq);
794 	verb = qb_attr_code_decode(&code_dqrr_verb, p);
795 	/* If the valid-bit isn't of the expected polarity, nothing there. Note,
796 	 * in the DQRR reset bug workaround, we shouldn't need to skip these
797 	 * check, because we've already determined that a new entry is available
798 	 * and we've invalidated the cacheline before reading it, so the
799 	 * valid-bit behaviour is repaired and should tell us what we already
800 	 * knew from reading PI.
801 	 */
802 	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
803 		return NULL;
804 
805 	/* There's something there. Move "next_idx" attention to the next ring
806 	 * entry (and prefetch it) before returning what we found.
807 	 */
808 	s->dqrr.next_idx++;
809 	if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
810 		s->dqrr.next_idx = 0;
811 		s->dqrr.valid_bit ^= QB_VALID_BIT;
812 	}
813 	/* If this is the final response to a volatile dequeue command
814 	 * indicate that the vdq is no longer busy.
815 	 */
816 	flags = qbman_result_DQ_flags(dq);
817 	response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
818 	if ((response_verb == QBMAN_RESULT_DQ) &&
819 	    (flags & QBMAN_DQ_STAT_VOLATILE) &&
820 	    (flags & QBMAN_DQ_STAT_EXPIRED))
821 		atomic_inc(&s->vdq.busy);
822 
823 	return dq;
824 }
825 
826 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
827 void qbman_swp_dqrr_consume(struct qbman_swp *s,
828 			    const struct qbman_result *dq)
829 {
830 	qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
831 }
832 
833 /*********************************/
834 /* Polling user-provided storage */
835 /*********************************/
836 
837 int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
838 				const struct qbman_result *dq)
839 {
840 	/* To avoid converting the little-endian DQ entry to host-endian prior
841 	 * to us knowing whether there is a valid entry or not (and run the
842 	 * risk of corrupting the incoming hardware LE write), we detect in
843 	 * hardware endianness rather than host. This means we need a different
844 	 * "code" depending on whether we are BE or LE in software, which is
845 	 * where DQRR_TOK_OFFSET comes in...
846 	 */
847 	static struct qb_attr_code code_dqrr_tok_detect =
848 					QB_CODE(0, DQRR_TOK_OFFSET, 8);
849 	/* The user trying to poll for a result treats "dq" as const. It is
850 	 * however the same address that was provided to us non-const in the
851 	 * first place, for directing hardware DMA to. So we can cast away the
852 	 * const because it is mutable from our perspective.
853 	 */
854 	uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
855 	uint32_t token;
856 
857 	token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
858 	if (token == 0)
859 		return 0;
860 	/* Entry is valid - overwrite token back to 0 so
861 	 * a) If this memory is reused tokesn will be 0
862 	 * b) If someone calls "has_new_result()" again on this entry it
863 	 *    will not appear to be new
864 	 */
865 	qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
866 
867 	/* Only now do we convert from hardware to host endianness. Also, as we
868 	 * are returning success, the user has promised not to call us again, so
869 	 * there's no risk of us converting the endianness twice...
870 	 */
871 	make_le32_n(p, 16);
872 	return 1;
873 }
874 
875 int qbman_check_command_complete(struct qbman_swp *s,
876 				 const struct qbman_result *dq)
877 {
878 	/* To avoid converting the little-endian DQ entry to host-endian prior
879 	 * to us knowing whether there is a valid entry or not (and run the
880 	 * risk of corrupting the incoming hardware LE write), we detect in
881 	 * hardware endianness rather than host. This means we need a different
882 	 * "code" depending on whether we are BE or LE in software, which is
883 	 * where DQRR_TOK_OFFSET comes in...
884 	 */
885 	static struct qb_attr_code code_dqrr_tok_detect =
886 					QB_CODE(0, DQRR_TOK_OFFSET, 8);
887 	/* The user trying to poll for a result treats "dq" as const. It is
888 	 * however the same address that was provided to us non-const in the
889 	 * first place, for directing hardware DMA to. So we can cast away the
890 	 * const because it is mutable from our perspective.
891 	 */
892 	uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
893 	uint32_t token;
894 
895 	token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
896 	if (token == 0)
897 		return 0;
898 	/* TODO: Remove qbman_swp from parameters and make it a local
899 	 * once we've tested the reserve portal map change
900 	 */
901 	s = portal_idx_map[token - 1];
902 	/* When token is set it indicates that VDQ command has been fetched
903 	 * by qbman and is working on it. It is safe for software to issue
904 	 * another VDQ command, so incrementing the busy variable.
905 	 */
906 	if (s->vdq.storage == dq) {
907 		s->vdq.storage = NULL;
908 		atomic_inc(&s->vdq.busy);
909 	}
910 	return 1;
911 }
912 
913 /********************************/
914 /* Categorising qbman results   */
915 /********************************/
916 
917 static struct qb_attr_code code_result_in_mem =
918 			QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
919 
920 static inline int __qbman_result_is_x(const struct qbman_result *dq,
921 				      uint32_t x)
922 {
923 	const uint32_t *p = qb_cl(dq);
924 	uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
925 
926 	return (response_verb == x);
927 }
928 
929 static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
930 					     uint32_t x)
931 {
932 	const uint32_t *p = qb_cl(dq);
933 	uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
934 
935 	return (response_verb == x);
936 }
937 
938 int qbman_result_is_DQ(const struct qbman_result *dq)
939 {
940 	return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
941 }
942 
943 int qbman_result_is_FQDAN(const struct qbman_result *dq)
944 {
945 	return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
946 }
947 
948 int qbman_result_is_CDAN(const struct qbman_result *dq)
949 {
950 	return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
951 }
952 
953 int qbman_result_is_CSCN(const struct qbman_result *dq)
954 {
955 	return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
956 		__qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
957 }
958 
959 int qbman_result_is_BPSCN(const struct qbman_result *dq)
960 {
961 	return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
962 }
963 
964 int qbman_result_is_CGCU(const struct qbman_result *dq)
965 {
966 	return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
967 }
968 
969 int qbman_result_is_FQRN(const struct qbman_result *dq)
970 {
971 	return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
972 }
973 
974 int qbman_result_is_FQRNI(const struct qbman_result *dq)
975 {
976 	return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
977 }
978 
979 int qbman_result_is_FQPN(const struct qbman_result *dq)
980 {
981 	return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
982 }
983 
984 /*********************************/
985 /* Parsing frame dequeue results */
986 /*********************************/
987 
988 /* These APIs assume qbman_result_is_DQ() is TRUE */
989 
990 uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
991 {
992 	const uint32_t *p = qb_cl(dq);
993 
994 	return qb_attr_code_decode(&code_dqrr_stat, p);
995 }
996 
997 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
998 {
999 	const uint32_t *p = qb_cl(dq);
1000 
1001 	return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
1002 }
1003 
1004 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1005 {
1006 	const uint32_t *p = qb_cl(dq);
1007 
1008 	return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
1009 }
1010 
1011 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1012 {
1013 	const uint32_t *p = qb_cl(dq);
1014 
1015 	return qb_attr_code_decode(&code_dqrr_fqid, p);
1016 }
1017 
1018 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1019 {
1020 	const uint32_t *p = qb_cl(dq);
1021 
1022 	return qb_attr_code_decode(&code_dqrr_byte_count, p);
1023 }
1024 
1025 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1026 {
1027 	const uint32_t *p = qb_cl(dq);
1028 
1029 	return qb_attr_code_decode(&code_dqrr_frame_count, p);
1030 }
1031 
1032 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1033 {
1034 	const uint64_t *p = (const uint64_t *)qb_cl(dq);
1035 
1036 	return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
1037 }
1038 
1039 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1040 {
1041 	const uint32_t *p = qb_cl(dq);
1042 
1043 	return (const struct qbman_fd *)&p[8];
1044 }
1045 
1046 /**************************************/
1047 /* Parsing state-change notifications */
1048 /**************************************/
1049 
1050 static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
1051 static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
1052 static struct qb_attr_code code_scn_state_in_mem =
1053 			QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
1054 static struct qb_attr_code code_scn_rid_in_mem =
1055 			QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
1056 static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
1057 
1058 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1059 {
1060 	const uint32_t *p = qb_cl(scn);
1061 
1062 	return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
1063 }
1064 
1065 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1066 {
1067 	const uint32_t *p = qb_cl(scn);
1068 
1069 	return qb_attr_code_decode(&code_scn_rid, p);
1070 }
1071 
1072 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1073 {
1074 	const uint64_t *p = (const uint64_t *)qb_cl(scn);
1075 
1076 	return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
1077 }
1078 
1079 uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
1080 {
1081 	const uint32_t *p = qb_cl(scn);
1082 
1083 	return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
1084 }
1085 
1086 uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
1087 {
1088 	const uint32_t *p = qb_cl(scn);
1089 	uint32_t result_rid;
1090 
1091 	result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
1092 	return make_le24(result_rid);
1093 }
1094 
1095 /*****************/
1096 /* Parsing BPSCN */
1097 /*****************/
1098 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1099 {
1100 	return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
1101 }
1102 
1103 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1104 {
1105 	return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
1106 }
1107 
1108 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1109 {
1110 	return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
1111 }
1112 
1113 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1114 {
1115 	return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
1116 }
1117 
1118 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1119 {
1120 	uint64_t ctx;
1121 	uint32_t ctx_hi, ctx_lo;
1122 
1123 	ctx = qbman_result_SCN_ctx(scn);
1124 	ctx_hi = upper32(ctx);
1125 	ctx_lo = lower32(ctx);
1126 	return ((uint64_t)make_le32(ctx_hi) << 32 |
1127 		(uint64_t)make_le32(ctx_lo));
1128 }
1129 
1130 /*****************/
1131 /* Parsing CGCU  */
1132 /*****************/
1133 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1134 {
1135 	return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
1136 }
1137 
1138 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1139 {
1140 	uint64_t ctx;
1141 	uint32_t ctx_hi, ctx_lo;
1142 
1143 	ctx = qbman_result_SCN_ctx(scn);
1144 	ctx_hi = upper32(ctx);
1145 	ctx_lo = lower32(ctx);
1146 	return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
1147 		(uint64_t)make_le32(ctx_lo);
1148 }
1149 
1150 /******************/
1151 /* Buffer release */
1152 /******************/
1153 
1154 /* These should be const, eventually */
1155 /* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
1156 static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
1157 static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
1158 static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
1159 
1160 void qbman_release_desc_clear(struct qbman_release_desc *d)
1161 {
1162 	uint32_t *cl;
1163 
1164 	memset(d, 0, sizeof(*d));
1165 	cl = qb_cl(d);
1166 	qb_attr_code_encode(&code_release_set_me, cl, 1);
1167 }
1168 
1169 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
1170 {
1171 	uint32_t *cl = qb_cl(d);
1172 
1173 	qb_attr_code_encode(&code_release_bpid, cl, bpid);
1174 }
1175 
1176 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1177 {
1178 	uint32_t *cl = qb_cl(d);
1179 
1180 	qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
1181 }
1182 
1183 #define RAR_IDX(rar)     ((rar) & 0x7)
1184 #define RAR_VB(rar)      ((rar) & 0x80)
1185 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1186 
1187 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1188 		      const uint64_t *buffers, unsigned int num_buffers)
1189 {
1190 	uint32_t *p;
1191 	const uint32_t *cl = qb_cl(d);
1192 	uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1193 
1194 	pr_debug("RAR=%08x\n", rar);
1195 	if (!RAR_SUCCESS(rar))
1196 		return -EBUSY;
1197 	QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1198 	/* Start the release command */
1199 	p = qbman_cena_write_start_wo_shadow(&s->sys,
1200 					     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1201 	/* Copy the caller's buffer pointers to the command */
1202 	u64_to_le32_copy(&p[2], buffers, num_buffers);
1203 	/* Set the verb byte, have to substitute in the valid-bit and the number
1204 	 * of buffers.
1205 	 */
1206 	lwsync();
1207 	p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1208 	qbman_cena_write_complete_wo_shadow(&s->sys,
1209 					    QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1210 	return 0;
1211 }
1212 
1213 /*******************/
1214 /* Buffer acquires */
1215 /*******************/
1216 
1217 /* These should be const, eventually */
1218 static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
1219 static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
1220 static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
1221 
1222 int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
1223 		      unsigned int num_buffers)
1224 {
1225 	uint32_t *p;
1226 	uint32_t rslt, num;
1227 
1228 	QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1229 
1230 	/* Start the management command */
1231 	p = qbman_swp_mc_start(s);
1232 
1233 	if (!p)
1234 		return -EBUSY;
1235 
1236 	/* Encode the caller-provided attributes */
1237 	qb_attr_code_encode(&code_acquire_bpid, p, bpid);
1238 	qb_attr_code_encode(&code_acquire_num, p, num_buffers);
1239 
1240 	/* Complete the management command */
1241 	p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
1242 
1243 	/* Decode the outcome */
1244 	rslt = qb_attr_code_decode(&code_generic_rslt, p);
1245 	num = qb_attr_code_decode(&code_acquire_r_num, p);
1246 	QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) !=
1247 		     QBMAN_MC_ACQUIRE);
1248 
1249 	/* Determine success or failure */
1250 	if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1251 		pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1252 		       bpid, rslt);
1253 		return -EIO;
1254 	}
1255 	QBMAN_BUG_ON(num > num_buffers);
1256 	/* Copy the acquired buffers to the caller's array */
1257 	u64_from_le32_copy(buffers, &p[2], num);
1258 	return (int)num;
1259 }
1260 
1261 /*****************/
1262 /* FQ management */
1263 /*****************/
1264 
1265 static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
1266 
1267 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1268 				  uint8_t alt_fq_verb)
1269 {
1270 	uint32_t *p;
1271 	uint32_t rslt;
1272 
1273 	/* Start the management command */
1274 	p = qbman_swp_mc_start(s);
1275 	if (!p)
1276 		return -EBUSY;
1277 
1278 	qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
1279 	/* Complete the management command */
1280 	p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
1281 
1282 	/* Decode the outcome */
1283 	rslt = qb_attr_code_decode(&code_generic_rslt, p);
1284 	QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
1285 
1286 	/* Determine success or failure */
1287 	if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1288 		pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1289 		       fqid, alt_fq_verb, rslt);
1290 		return -EIO;
1291 	}
1292 
1293 	return 0;
1294 }
1295 
1296 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1297 {
1298 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1299 }
1300 
1301 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1302 {
1303 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1304 }
1305 
1306 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1307 {
1308 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1309 }
1310 
1311 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1312 {
1313 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1314 }
1315 
1316 /**********************/
1317 /* Channel management */
1318 /**********************/
1319 
1320 static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
1321 static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
1322 static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
1323 static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
1324 
1325 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1326  * would be irresponsible to expose it.
1327  */
1328 #define CODE_CDAN_WE_EN    0x1
1329 #define CODE_CDAN_WE_CTX   0x4
1330 
1331 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1332 			      uint8_t we_mask, uint8_t cdan_en,
1333 			      uint64_t ctx)
1334 {
1335 	uint32_t *p;
1336 	uint32_t rslt;
1337 
1338 	/* Start the management command */
1339 	p = qbman_swp_mc_start(s);
1340 	if (!p)
1341 		return -EBUSY;
1342 
1343 	/* Encode the caller-provided attributes */
1344 	qb_attr_code_encode(&code_cdan_cid, p, channelid);
1345 	qb_attr_code_encode(&code_cdan_we, p, we_mask);
1346 	qb_attr_code_encode(&code_cdan_en, p, cdan_en);
1347 	qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
1348 	/* Complete the management command */
1349 	p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
1350 
1351 	/* Decode the outcome */
1352 	rslt = qb_attr_code_decode(&code_generic_rslt, p);
1353 	QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
1354 					!= QBMAN_WQCHAN_CONFIGURE);
1355 
1356 	/* Determine success or failure */
1357 	if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1358 		pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1359 		       channelid, rslt);
1360 		return -EIO;
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1367 			       uint64_t ctx)
1368 {
1369 	return qbman_swp_CDAN_set(s, channelid,
1370 				  CODE_CDAN_WE_CTX,
1371 				  0, ctx);
1372 }
1373 
1374 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1375 {
1376 	return qbman_swp_CDAN_set(s, channelid,
1377 				  CODE_CDAN_WE_EN,
1378 				  1, 0);
1379 }
1380 
1381 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1382 {
1383 	return qbman_swp_CDAN_set(s, channelid,
1384 				  CODE_CDAN_WE_EN,
1385 				  0, 0);
1386 }
1387 
1388 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1389 				      uint64_t ctx)
1390 {
1391 	return qbman_swp_CDAN_set(s, channelid,
1392 				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1393 				  1, ctx);
1394 }
1395 
1396 uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)
1397 {
1398 	return QBMAN_IDX_FROM_DQRR(dqrr);
1399 }
1400 
1401 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1402 {
1403 	struct qbman_result *dq;
1404 
1405 	dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1406 	return dq;
1407 }
1408 
1409 int qbman_swp_send_multiple(struct qbman_swp *s,
1410 			    const struct qbman_eq_desc *d,
1411 			    const struct qbman_fd *fd,
1412 			    int frames_to_send)
1413 {
1414 	uint32_t *p;
1415 	const uint32_t *cl = qb_cl(d);
1416 	uint32_t eqcr_ci;
1417 	uint8_t diff;
1418 	int sent = 0;
1419 	int i;
1420 	int initial_pi = s->eqcr.pi;
1421 	uint64_t start_pointer;
1422 
1423 	if (!s->eqcr.available) {
1424 		eqcr_ci = s->eqcr.ci;
1425 		s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1426 				 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
1427 		diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
1428 				   eqcr_ci, s->eqcr.ci);
1429 		if (!diff)
1430 			goto done;
1431 		s->eqcr.available += diff;
1432 	}
1433 
1434 	/* we are trying to send frames_to_send,
1435 	 * if we have enough space in the ring
1436 	 */
1437 	while (s->eqcr.available && frames_to_send--) {
1438 		p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1439 					QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1440 		/* Write command (except of first byte) and FD */
1441 		memcpy(&p[1], &cl[1], 7 * 4);
1442 		memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
1443 
1444 		initial_pi++;
1445 		initial_pi &= 0xF;
1446 		s->eqcr.available--;
1447 		sent++;
1448 	}
1449 
1450 done:
1451 	initial_pi =  s->eqcr.pi;
1452 	lwsync();
1453 
1454 	/* in order for flushes to complete faster:
1455 	 * we use a following trick: we record all lines in 32 bit word
1456 	 */
1457 
1458 	initial_pi =  s->eqcr.pi;
1459 	for (i = 0; i < sent; i++) {
1460 		p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1461 					QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1462 
1463 		p[0] = cl[0] | s->eqcr.pi_vb;
1464 		initial_pi++;
1465 		initial_pi &= 0xF;
1466 
1467 		if (!(initial_pi & 7))
1468 			s->eqcr.pi_vb ^= QB_VALID_BIT;
1469 	}
1470 
1471 	initial_pi = s->eqcr.pi;
1472 
1473 	/* We need  to flush all the lines but without
1474 	 * load/store operations between them.
1475 	 * We assign start_pointer before we start loop so that
1476 	 * in loop we do not read it from memory
1477 	 */
1478 	start_pointer = (uint64_t)s->sys.addr_cena;
1479 	for (i = 0; i < sent; i++) {
1480 		p = (uint32_t *)(start_pointer
1481 				 + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
1482 		dcbf((uint64_t)p);
1483 		initial_pi++;
1484 		initial_pi &= 0xF;
1485 	}
1486 
1487 	/* Update producer index for the next call */
1488 	s->eqcr.pi = initial_pi;
1489 
1490 	return sent;
1491 }
1492 
1493 int qbman_get_version(void)
1494 {
1495 	return qman_version;
1496 }
1497