xref: /dpdk/drivers/bus/dpaa/include/fsl_qman.h (revision 2d0c29a37a9c080c1cccb1ad7941aba2ccf5437e)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2012 Freescale Semiconductor, Inc.
4  * Copyright 2019 NXP
5  *
6  */
7 
8 #ifndef __FSL_QMAN_H
9 #define __FSL_QMAN_H
10 
11 #ifdef __cplusplus
12 extern "C" {
13 #endif
14 
15 #include <dpaa_rbtree.h>
16 #include <rte_eventdev.h>
17 
18 /* FQ lookups (turn this on for 64bit user-space) */
19 #if (__WORDSIZE == 64)
20 #define CONFIG_FSL_QMAN_FQ_LOOKUP
21 /* if FQ lookups are supported, this controls the number of initialised,
22  * s/w-consumed FQs that can be supported at any one time.
23  */
24 #define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
25 #endif
26 
27 /* Last updated for v00.800 of the BG */
28 
29 /* Hardware constants */
30 #define QM_CHANNEL_SWPORTAL0 0
31 #define QMAN_CHANNEL_POOL1 0x21
32 #define QMAN_CHANNEL_CAAM 0x80
33 #define QMAN_CHANNEL_PME 0xa0
34 #define QMAN_CHANNEL_POOL1_REV3 0x401
35 #define QMAN_CHANNEL_CAAM_REV3 0x840
36 #define QMAN_CHANNEL_PME_REV3 0x860
37 extern u16 qm_channel_pool1;
38 extern u16 qm_channel_caam;
39 extern u16 qm_channel_pme;
40 enum qm_dc_portal {
41 	qm_dc_portal_fman0 = 0,
42 	qm_dc_portal_fman1 = 1,
43 	qm_dc_portal_caam = 2,
44 	qm_dc_portal_pme = 3
45 };
46 
47 /* Portal processing (interrupt) sources */
48 #define QM_PIRQ_CCSCI	0x00200000	/* CEETM Congestion State Change */
49 #define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
50 #define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
51 #define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
52 #define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
53 #define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
54 /*
55  * This mask contains all the interrupt sources that need handling except DQRI,
56  * ie. that if present should trigger slow-path processing.
57  */
58 #define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
59 			QM_PIRQ_MRI | QM_PIRQ_CCSCI)
60 
61 /* For qman_static_dequeue_*** APIs */
62 #define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
63 /* for n in [1,15] */
64 #define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
65 /* for conversion from n of qm_channel */
66 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
67 {
68 	return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
69 }
70 
71 /* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
72  * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
73  * FQID(n) to fill in the frame queue ID.
74  */
75 #define QM_VDQCR_PRECEDENCE_VDQCR	0x0
76 #define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
77 #define QM_VDQCR_EXACT			0x40000000
78 #define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
79 #define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
80 #define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
81 #define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
82 
83 /* --- QMan data structures (and associated constants) --- */
84 
85 /* Represents s/w corenet portal mapped data structures */
86 struct qm_eqcr_entry;	/* EQCR (EnQueue Command Ring) entries */
87 struct qm_dqrr_entry;	/* DQRR (DeQueue Response Ring) entries */
88 struct qm_mr_entry;	/* MR (Message Ring) entries */
89 struct qm_mc_command;	/* MC (Management Command) command */
90 struct qm_mc_result;	/* MC result */
91 
92 #define QM_FD_FORMAT_SG		0x4
93 #define QM_FD_FORMAT_LONG	0x2
94 #define QM_FD_FORMAT_COMPOUND	0x1
95 enum qm_fd_format {
96 	/*
97 	 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
98 	 * scatter-gather table. 'big' implies a 29-bit length with no offset
99 	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
100 	 * implies a s/g-like table, where each entry itself represents a frame
101 	 * (contiguous or scatter-gather) and the 29-bit "length" is
102 	 * interpreted purely for congestion calculations, ie. a "congestion
103 	 * weight".
104 	 */
105 	qm_fd_contig = 0,
106 	qm_fd_contig_big = QM_FD_FORMAT_LONG,
107 	qm_fd_sg = QM_FD_FORMAT_SG,
108 	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
109 	qm_fd_compound = QM_FD_FORMAT_COMPOUND
110 };
111 
112 /* Capitalised versions are un-typed but can be used in static expressions */
113 #define QM_FD_CONTIG	0
114 #define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
115 #define QM_FD_SG	QM_FD_FORMAT_SG
116 #define QM_FD_SG_BIG	(QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
117 #define QM_FD_COMPOUND	QM_FD_FORMAT_COMPOUND
118 
119 /* "Frame Descriptor (FD)" */
120 struct qm_fd {
121 	union {
122 		struct {
123 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
124 			u8 dd:2;	/* dynamic debug */
125 			u8 liodn_offset:6;
126 			u8 bpid:8;	/* Buffer Pool ID */
127 			u8 eliodn_offset:4;
128 			u8 __reserved:4;
129 			u8 addr_hi;	/* high 8-bits of 40-bit address */
130 			u32 addr_lo;	/* low 32-bits of 40-bit address */
131 #else
132 			u8 liodn_offset:6;
133 			u8 dd:2;	/* dynamic debug */
134 			u8 bpid:8;	/* Buffer Pool ID */
135 			u8 __reserved:4;
136 			u8 eliodn_offset:4;
137 			u8 addr_hi;	/* high 8-bits of 40-bit address */
138 			u32 addr_lo;	/* low 32-bits of 40-bit address */
139 #endif
140 		};
141 		struct {
142 			u64 __notaddress:24;
143 			/* More efficient address accessor */
144 			u64 addr:40;
145 		};
146 		u64 opaque_addr;
147 	};
148 	/* The 'format' field indicates the interpretation of the remaining 29
149 	 * bits of the 32-bit word. For packing reasons, it is duplicated in the
150 	 * other union elements. Note, union'd structs are difficult to use with
151 	 * static initialisation under gcc, in which case use the "opaque" form
152 	 * with one of the macros.
153 	 */
154 	union {
155 		/* For easier/faster copying of this part of the fd (eg. from a
156 		 * DQRR entry to an EQCR entry) copy 'opaque'
157 		 */
158 		u32 opaque;
159 		/* If 'format' is _contig or _sg, 20b length and 9b offset */
160 		struct {
161 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
162 			enum qm_fd_format format:3;
163 			u16 offset:9;
164 			u32 length20:20;
165 #else
166 			u32 length20:20;
167 			u16 offset:9;
168 			enum qm_fd_format format:3;
169 #endif
170 		};
171 		/* If 'format' is _contig_big or _sg_big, 29b length */
172 		struct {
173 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
174 			enum qm_fd_format _format1:3;
175 			u32 length29:29;
176 #else
177 			u32 length29:29;
178 			enum qm_fd_format _format1:3;
179 #endif
180 		};
181 		/* If 'format' is _compound, 29b "congestion weight" */
182 		struct {
183 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
184 			enum qm_fd_format _format2:3;
185 			u32 cong_weight:29;
186 #else
187 			u32 cong_weight:29;
188 			enum qm_fd_format _format2:3;
189 #endif
190 		};
191 	};
192 	union {
193 		u32 cmd;
194 		u32 status;
195 	};
196 } __attribute__((aligned(8)));
197 #define QM_FD_DD_NULL		0x00
198 #define QM_FD_PID_MASK		0x3f
199 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
200 {
201 	return fd->addr;
202 }
203 
204 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
205 {
206 	return (dma_addr_t)fd->addr;
207 }
208 
209 /* Macro, so we compile better if 'v' isn't always 64-bit */
210 #define qm_fd_addr_set64(fd, v) \
211 	do { \
212 		struct qm_fd *__fd931 = (fd); \
213 		__fd931->addr = v; \
214 	} while (0)
215 
216 /* Scatter/Gather table entry */
217 struct qm_sg_entry {
218 	union {
219 		struct {
220 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
221 			u8 __reserved1[3];
222 			u8 addr_hi;	/* high 8-bits of 40-bit address */
223 			u32 addr_lo;	/* low 32-bits of 40-bit address */
224 #else
225 			u32 addr_lo;	/* low 32-bits of 40-bit address */
226 			u8 addr_hi;	/* high 8-bits of 40-bit address */
227 			u8 __reserved1[3];
228 #endif
229 		};
230 		struct {
231 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
232 			u64 __notaddress:24;
233 			u64 addr:40;
234 #else
235 			u64 addr:40;
236 			u64 __notaddress:24;
237 #endif
238 		};
239 		u64 opaque;
240 	};
241 	union {
242 		struct {
243 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
244 			u32 extension:1;	/* Extension bit */
245 			u32 final:1;		/* Final bit */
246 			u32 length:30;
247 #else
248 			u32 length:30;
249 			u32 final:1;		/* Final bit */
250 			u32 extension:1;	/* Extension bit */
251 #endif
252 		};
253 		u32 val;
254 	};
255 	u8 __reserved2;
256 	u8 bpid;
257 	union {
258 		struct {
259 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
260 			u16 __reserved3:3;
261 			u16 offset:13;
262 #else
263 			u16 offset:13;
264 			u16 __reserved3:3;
265 #endif
266 		};
267 		u16 val_off;
268 	};
269 } __packed;
270 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
271 {
272 	return sg->addr;
273 }
274 
275 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
276 {
277 	return (dma_addr_t)sg->addr;
278 }
279 
280 /* Macro, so we compile better if 'v' isn't always 64-bit */
281 #define qm_sg_entry_set64(sg, v) \
282 	do { \
283 		struct qm_sg_entry *__sg931 = (sg); \
284 		__sg931->addr = v; \
285 	} while (0)
286 
287 /* See 1.5.8.1: "Enqueue Command" */
288 struct __rte_aligned(8) qm_eqcr_entry {
289 	u8 __dont_write_directly__verb;
290 	u8 dca;
291 	u16 seqnum;
292 	u32 orp;	/* 24-bit */
293 	u32 fqid;	/* 24-bit */
294 	u32 tag;
295 	struct qm_fd fd; /* this has alignment 8 */
296 	u8 __reserved3[32];
297 } __packed;
298 
299 
300 /* "Frame Dequeue Response" */
301 struct __rte_aligned(8) qm_dqrr_entry {
302 	u8 verb;
303 	u8 stat;
304 	u16 seqnum;	/* 15-bit */
305 	u8 tok;
306 	u8 __reserved2[3];
307 	u32 fqid;	/* 24-bit */
308 	u32 contextB;
309 	struct qm_fd fd; /* this has alignment 8 */
310 	u8 __reserved4[32];
311 };
312 
313 #define QM_DQRR_VERB_VBIT		0x80
314 #define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
315 #define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
316 #define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
317 #define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
318 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
319 #define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
320 #define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
321 #define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
322 
323 
324 /* "ERN Message Response" */
325 /* "FQ State Change Notification" */
326 struct qm_mr_entry {
327 	union {
328 		struct {
329 			u8 verb;
330 			u8 dca;
331 			u16 seqnum;
332 			u8 rc;		/* Rejection Code */
333 			u32 orp:24;
334 			u32 fqid;	/* 24-bit */
335 			u32 tag;
336 			struct qm_fd fd; /* this has alignment 8 */
337 		} __packed __rte_aligned(8) ern;
338 		struct {
339 			u8 verb;
340 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
341 			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
342 			u8 __reserved1:4;
343 			enum qm_dc_portal portal:2;
344 #else
345 			enum qm_dc_portal portal:3;
346 			u8 __reserved1:3;
347 			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
348 #endif
349 			u16 __reserved2;
350 			u8 rc;		/* Rejection Code */
351 			u32 __reserved3:24;
352 			u32 fqid;	/* 24-bit */
353 			u32 tag;
354 			struct qm_fd fd; /* this has alignment 8 */
355 		} __packed __rte_aligned(8) dcern;
356 		struct {
357 			u8 verb;
358 			u8 fqs;		/* Frame Queue Status */
359 			u8 __reserved1[6];
360 			u32 fqid;	/* 24-bit */
361 			u32 contextB;
362 			u8 __reserved2[16];
363 		} __packed __rte_aligned(8) fq;	/* FQRN/FQRNI/FQRL/FQPN */
364 	};
365 	u8 __reserved2[32];
366 } __packed __rte_aligned(8);
367 #define QM_MR_VERB_VBIT			0x80
368 /*
369  * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
370  * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
371  * from the other MR types by noting if the 0x20 bit is unset.
372  */
373 #define QM_MR_VERB_TYPE_MASK		0x27
374 #define QM_MR_VERB_DC_ERN		0x20
375 #define QM_MR_VERB_FQRN			0x21
376 #define QM_MR_VERB_FQRNI		0x22
377 #define QM_MR_VERB_FQRL			0x23
378 #define QM_MR_VERB_FQPN			0x24
379 #define QM_MR_RC_MASK			0xf0	/* contains one of; */
380 #define QM_MR_RC_CGR_TAILDROP		0x00
381 #define QM_MR_RC_WRED			0x10
382 #define QM_MR_RC_ERROR			0x20
383 #define QM_MR_RC_ORPWINDOW_EARLY	0x30
384 #define QM_MR_RC_ORPWINDOW_LATE		0x40
385 #define QM_MR_RC_FQ_TAILDROP		0x50
386 #define QM_MR_RC_ORPWINDOW_RETIRED	0x60
387 #define QM_MR_RC_ORP_ZERO		0x70
388 #define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
389 #define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
390 #define QM_MR_DCERN_COLOUR_GREEN	0x00
391 #define QM_MR_DCERN_COLOUR_YELLOW	0x01
392 #define QM_MR_DCERN_COLOUR_RED		0x02
393 #define QM_MR_DCERN_COLOUR_OVERRIDE	0x03
394 /*
395  * An identical structure of FQD fields is present in the "Init FQ" command and
396  * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
397  * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
398  * latter has two inlines to assist with converting to/from the mant+exp
399  * representation.
400  */
401 struct qm_fqd_stashing {
402 	/* See QM_STASHING_EXCL_<...> */
403 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
404 	u8 exclusive;
405 	u8 __reserved1:2;
406 	/* Numbers of cachelines */
407 	u8 annotation_cl:2;
408 	u8 data_cl:2;
409 	u8 context_cl:2;
410 #else
411 	u8 context_cl:2;
412 	u8 data_cl:2;
413 	u8 annotation_cl:2;
414 	u8 __reserved1:2;
415 	u8 exclusive;
416 #endif
417 } __packed;
418 struct qm_fqd_taildrop {
419 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
420 	u16 __reserved1:3;
421 	u16 mant:8;
422 	u16 exp:5;
423 #else
424 	u16 exp:5;
425 	u16 mant:8;
426 	u16 __reserved1:3;
427 #endif
428 } __packed;
429 struct qm_fqd_oac {
430 	/* "Overhead Accounting Control", see QM_OAC_<...> */
431 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
432 	u8 oac:2; /* "Overhead Accounting Control" */
433 	u8 __reserved1:6;
434 #else
435 	u8 __reserved1:6;
436 	u8 oac:2; /* "Overhead Accounting Control" */
437 #endif
438 	/* Two's-complement value (-128 to +127) */
439 	signed char oal; /* "Overhead Accounting Length" */
440 } __packed;
441 struct qm_fqd {
442 	union {
443 		u8 orpc;
444 		struct {
445 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
446 			u8 __reserved1:2;
447 			u8 orprws:3;
448 			u8 oa:1;
449 			u8 olws:2;
450 #else
451 			u8 olws:2;
452 			u8 oa:1;
453 			u8 orprws:3;
454 			u8 __reserved1:2;
455 #endif
456 		} __packed;
457 	};
458 	u8 cgid;
459 	u16 fq_ctrl;	/* See QM_FQCTRL_<...> */
460 	union {
461 		u16 dest_wq;
462 		struct {
463 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
464 			u16 channel:13; /* qm_channel */
465 			u16 wq:3;
466 #else
467 			u16 wq:3;
468 			u16 channel:13; /* qm_channel */
469 #endif
470 		} __packed dest;
471 	};
472 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
473 	u16 __reserved2:1;
474 	u16 ics_cred:15;
475 #else
476 	u16 __reserved2:1;
477 	u16 ics_cred:15;
478 #endif
479 	/*
480 	 * For "Initialize Frame Queue" commands, the write-enable mask
481 	 * determines whether 'td' or 'oac_init' is observed. For query
482 	 * commands, this field is always 'td', and 'oac_query' (below) reflects
483 	 * the Overhead ACcounting values.
484 	 */
485 	union {
486 		uint16_t opaque_td;
487 		struct qm_fqd_taildrop td;
488 		struct qm_fqd_oac oac_init;
489 	};
490 	u32 context_b;
491 	union {
492 		/* Treat it as 64-bit opaque */
493 		u64 opaque;
494 		struct {
495 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
496 			u32 hi;
497 			u32 lo;
498 #else
499 			u32 lo;
500 			u32 hi;
501 #endif
502 		};
503 		/* Treat it as s/w portal stashing config */
504 		/* see "FQD Context_A field used for [...]" */
505 		struct {
506 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
507 			struct qm_fqd_stashing stashing;
508 			/*
509 			 * 48-bit address of FQ context to
510 			 * stash, must be cacheline-aligned
511 			 */
512 			u16 context_hi;
513 			u32 context_lo;
514 #else
515 			u32 context_lo;
516 			u16 context_hi;
517 			struct qm_fqd_stashing stashing;
518 #endif
519 		} __packed;
520 	} context_a;
521 	struct qm_fqd_oac oac_query;
522 } __packed;
523 /* 64-bit converters for context_hi/lo */
524 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
525 {
526 	return ((u64)fqd->context_a.context_hi << 32) |
527 		(u64)fqd->context_a.context_lo;
528 }
529 
530 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
531 {
532 	return (dma_addr_t)qm_fqd_stashing_get64(fqd);
533 }
534 
535 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
536 {
537 	return ((u64)fqd->context_a.hi << 32) |
538 		(u64)fqd->context_a.lo;
539 }
540 
541 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
542 {
543 		fqd->context_a.context_hi = upper_32_bits(addr);
544 		fqd->context_a.context_lo = lower_32_bits(addr);
545 }
546 
547 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
548 {
549 	fqd->context_a.hi = upper_32_bits(addr);
550 	fqd->context_a.lo = lower_32_bits(addr);
551 }
552 
553 /* convert a threshold value into mant+exp representation */
554 static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
555 				      int roundup)
556 {
557 	u32 e = 0;
558 	int oddbit = 0;
559 
560 	if (val > 0xe0000000)
561 		return -ERANGE;
562 	while (val > 0xff) {
563 		oddbit = val & 1;
564 		val >>= 1;
565 		e++;
566 		if (roundup && oddbit)
567 			val++;
568 	}
569 	td->exp = e;
570 	td->mant = val;
571 	return 0;
572 }
573 
574 /* and the other direction */
575 static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
576 {
577 	return (u32)td->mant << td->exp;
578 }
579 
580 
581 /* See "Frame Queue Descriptor (FQD)" */
582 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
583 #define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
584 #define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
585 #define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
586 #define QM_FQCTRL_ORP		0x0100	/* ORP Enable */
587 #define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
588 #define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
589 #define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
590 #define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
591 #define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
592 #define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
593 #define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
594 
595 /* See "FQD Context_A field used for [...] */
596 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
597 #define QM_STASHING_EXCL_ANNOTATION	0x04
598 #define QM_STASHING_EXCL_DATA		0x02
599 #define QM_STASHING_EXCL_CTX		0x01
600 
601 /* See "Intra Class Scheduling" */
602 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
603 #define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
604 #define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
605 
606 /*
607  * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
608  * and associated commands/responses. The WRED parameters are calculated from
609  * these fields as follows;
610  *   MaxTH = MA * (2 ^ Mn)
611  *   Slope = SA / (2 ^ Sn)
612  *    MaxP = 4 * (Pn + 1)
613  */
614 struct qm_cgr_wr_parm {
615 	union {
616 		u32 word;
617 		struct {
618 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
619 			u32 MA:8;
620 			u32 Mn:5;
621 			u32 SA:7; /* must be between 64-127 */
622 			u32 Sn:6;
623 			u32 Pn:6;
624 #else
625 			u32 Pn:6;
626 			u32 Sn:6;
627 			u32 SA:7; /* must be between 64-127 */
628 			u32 Mn:5;
629 			u32 MA:8;
630 #endif
631 		} __packed;
632 	};
633 } __packed;
634 /*
635  * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
636  * management commands, this is padded to a 16-bit structure field, so that's
637  * how we represent it here. The congestion state threshold is calculated from
638  * these fields as follows;
639  *   CS threshold = TA * (2 ^ Tn)
640  */
641 struct qm_cgr_cs_thres {
642 	union {
643 		u16 hword;
644 		struct {
645 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
646 			u16 __reserved:3;
647 			u16 TA:8;
648 			u16 Tn:5;
649 #else
650 			u16 Tn:5;
651 			u16 TA:8;
652 			u16 __reserved:3;
653 #endif
654 		} __packed;
655 	};
656 } __packed;
657 /*
658  * This identical structure of CGR fields is present in the "Init/Modify CGR"
659  * commands and the "Query CGR" result. It's suctioned out here into its own
660  * struct.
661  */
662 struct __qm_mc_cgr {
663 	struct qm_cgr_wr_parm wr_parm_g;
664 	struct qm_cgr_wr_parm wr_parm_y;
665 	struct qm_cgr_wr_parm wr_parm_r;
666 	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
667 	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
668 	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
669 	u8 cscn_en;	/* boolean, use QM_CGR_EN */
670 	union {
671 		struct {
672 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
673 			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
674 			u16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */
675 #else
676 			u16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */
677 			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
678 #endif
679 		};
680 		u32 cscn_targ;	/* use QM_CGR_TARG_* */
681 	};
682 	u8 cstd_en;	/* boolean, use QM_CGR_EN */
683 	u8 cs;		/* boolean, only used in query response */
684 	union {
685 		struct qm_cgr_cs_thres cs_thres;
686 		/* use qm_cgr_cs_thres_set64() */
687 		u16 __cs_thres;
688 	};
689 	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
690 } __packed;
691 #define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
692 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
693 #define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
694 #define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
695 #define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
696 #define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
697 /* Convert CGR thresholds to/from "cs_thres" format */
698 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
699 {
700 	return (u64)th->TA << th->Tn;
701 }
702 
703 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
704 					int roundup)
705 {
706 	u32 e = 0;
707 	int oddbit = 0;
708 
709 	while (val > 0xff) {
710 		oddbit = val & 1;
711 		val >>= 1;
712 		e++;
713 		if (roundup && oddbit)
714 			val++;
715 	}
716 	th->Tn = e;
717 	th->TA = val;
718 	return 0;
719 }
720 
721 /* See 1.5.8.5.1: "Initialize FQ" */
722 /* See 1.5.8.5.2: "Query FQ" */
723 /* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
724 /* See 1.5.8.5.4: "Alter FQ State Commands " */
725 /* See 1.5.8.6.1: "Initialize/Modify CGR" */
726 /* See 1.5.8.6.2: "CGR Test Write" */
727 /* See 1.5.8.6.3: "Query CGR" */
728 /* See 1.5.8.6.4: "Query Congestion Group State" */
729 struct qm_mcc_initfq {
730 	u8 __reserved1;
731 	u16 we_mask;	/* Write Enable Mask */
732 	u32 fqid;	/* 24-bit */
733 	u16 count;	/* Initialises 'count+1' FQDs */
734 	struct qm_fqd fqd; /* the FQD fields go here */
735 	u8 __reserved3[30];
736 } __packed;
737 struct qm_mcc_queryfq {
738 	u8 __reserved1[3];
739 	u32 fqid;	/* 24-bit */
740 	u8 __reserved2[56];
741 } __packed;
742 struct qm_mcc_queryfq_np {
743 	u8 __reserved1[3];
744 	u32 fqid;	/* 24-bit */
745 	u8 __reserved2[56];
746 } __packed;
747 struct qm_mcc_alterfq {
748 	u8 __reserved1[3];
749 	u32 fqid;	/* 24-bit */
750 	u8 __reserved2;
751 	u8 count;	/* number of consecutive FQID */
752 	u8 __reserved3[10];
753 	u32 context_b;	/* frame queue context b */
754 	u8 __reserved4[40];
755 } __packed;
756 struct qm_mcc_initcgr {
757 	u8 __reserved1;
758 	u16 we_mask;	/* Write Enable Mask */
759 	struct __qm_mc_cgr cgr;	/* CGR fields */
760 	u8 __reserved2[2];
761 	u8 cgid;
762 	u8 __reserved4[32];
763 } __packed;
764 struct qm_mcc_cgrtestwrite {
765 	u8 __reserved1[2];
766 	u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
767 	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
768 	u8 __reserved2[23];
769 	u8 cgid;
770 	u8 __reserved3[32];
771 } __packed;
772 struct qm_mcc_querycgr {
773 	u8 __reserved1[30];
774 	u8 cgid;
775 	u8 __reserved2[32];
776 } __packed;
777 struct qm_mcc_querycongestion {
778 	u8 __reserved[63];
779 } __packed;
780 struct qm_mcc_querywq {
781 	u8 __reserved;
782 	/* select channel if verb != QUERYWQ_DEDICATED */
783 	union {
784 		u16 channel_wq; /* ignores wq (3 lsbits) */
785 		struct {
786 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
787 			u16 id:13; /* qm_channel */
788 			u16 __reserved1:3;
789 #else
790 			u16 __reserved1:3;
791 			u16 id:13; /* qm_channel */
792 #endif
793 		} __packed channel;
794 	};
795 	u8 __reserved2[60];
796 } __packed;
797 
798 struct qm_mc_command {
799 	u8 __dont_write_directly__verb;
800 	union {
801 		struct qm_mcc_initfq initfq;
802 		struct qm_mcc_queryfq queryfq;
803 		struct qm_mcc_queryfq_np queryfq_np;
804 		struct qm_mcc_alterfq alterfq;
805 		struct qm_mcc_initcgr initcgr;
806 		struct qm_mcc_cgrtestwrite cgrtestwrite;
807 		struct qm_mcc_querycgr querycgr;
808 		struct qm_mcc_querycongestion querycongestion;
809 		struct qm_mcc_querywq querywq;
810 	};
811 } __packed;
812 
813 /* INITFQ-specific flags */
814 #define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
815 #define QM_INITFQ_WE_OAC		0x0100
816 #define QM_INITFQ_WE_ORPC		0x0080
817 #define QM_INITFQ_WE_CGID		0x0040
818 #define QM_INITFQ_WE_FQCTRL		0x0020
819 #define QM_INITFQ_WE_DESTWQ		0x0010
820 #define QM_INITFQ_WE_ICSCRED		0x0008
821 #define QM_INITFQ_WE_TDTHRESH		0x0004
822 #define QM_INITFQ_WE_CONTEXTB		0x0002
823 #define QM_INITFQ_WE_CONTEXTA		0x0001
824 /* INITCGR/MODIFYCGR-specific flags */
825 #define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
826 #define QM_CGR_WE_WR_PARM_G		0x0400
827 #define QM_CGR_WE_WR_PARM_Y		0x0200
828 #define QM_CGR_WE_WR_PARM_R		0x0100
829 #define QM_CGR_WE_WR_EN_G		0x0080
830 #define QM_CGR_WE_WR_EN_Y		0x0040
831 #define QM_CGR_WE_WR_EN_R		0x0020
832 #define QM_CGR_WE_CSCN_EN		0x0010
833 #define QM_CGR_WE_CSCN_TARG		0x0008
834 #define QM_CGR_WE_CSTD_EN		0x0004
835 #define QM_CGR_WE_CS_THRES		0x0002
836 #define QM_CGR_WE_MODE			0x0001
837 
838 struct qm_mcr_initfq {
839 	u8 __reserved1[62];
840 } __packed;
841 struct qm_mcr_queryfq {
842 	u8 __reserved1[8];
843 	struct qm_fqd fqd;	/* the FQD fields are here */
844 	u8 __reserved2[30];
845 } __packed;
846 struct qm_mcr_queryfq_np {
847 	u8 __reserved1;
848 	u8 state;	/* QM_MCR_NP_STATE_*** */
849 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
850 	u8 __reserved2;
851 	u32 fqd_link:24;
852 	u16 __reserved3:2;
853 	u16 odp_seq:14;
854 	u16 __reserved4:2;
855 	u16 orp_nesn:14;
856 	u16 __reserved5:1;
857 	u16 orp_ea_hseq:15;
858 	u16 __reserved6:1;
859 	u16 orp_ea_tseq:15;
860 	u8 __reserved7;
861 	u32 orp_ea_hptr:24;
862 	u8 __reserved8;
863 	u32 orp_ea_tptr:24;
864 	u8 __reserved9;
865 	u32 pfdr_hptr:24;
866 	u8 __reserved10;
867 	u32 pfdr_tptr:24;
868 	u8 __reserved11[5];
869 	u8 __reserved12:7;
870 	u8 is:1;
871 	u16 ics_surp;
872 	u32 byte_cnt;
873 	u8 __reserved13;
874 	u32 frm_cnt:24;
875 	u32 __reserved14;
876 	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
877 	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
878 	u16 __reserved15;
879 	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
880 	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
881 	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
882 #else
883 	u8 __reserved2;
884 	u32 fqd_link:24;
885 
886 	u16 odp_seq:14;
887 	u16 __reserved3:2;
888 
889 	u16 orp_nesn:14;
890 	u16 __reserved4:2;
891 
892 	u16 orp_ea_hseq:15;
893 	u16 __reserved5:1;
894 
895 	u16 orp_ea_tseq:15;
896 	u16 __reserved6:1;
897 
898 	u8 __reserved7;
899 	u32 orp_ea_hptr:24;
900 
901 	u8 __reserved8;
902 	u32 orp_ea_tptr:24;
903 
904 	u8 __reserved9;
905 	u32 pfdr_hptr:24;
906 
907 	u8 __reserved10;
908 	u32 pfdr_tptr:24;
909 
910 	u8 __reserved11[5];
911 	u8 is:1;
912 	u8 __reserved12:7;
913 	u16 ics_surp;
914 	u32 byte_cnt;
915 	u8 __reserved13;
916 	u32 frm_cnt:24;
917 	u32 __reserved14;
918 	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
919 	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
920 	u16 __reserved15;
921 	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
922 	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
923 	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
924 #endif
925 } __packed;
926 
927 struct qm_mcr_alterfq {
928 	u8 fqs;		/* Frame Queue Status */
929 	u8 __reserved1[61];
930 } __packed;
931 struct qm_mcr_initcgr {
932 	u8 __reserved1[62];
933 } __packed;
934 struct qm_mcr_cgrtestwrite {
935 	u16 __reserved1;
936 	struct __qm_mc_cgr cgr; /* CGR fields */
937 	u8 __reserved2[3];
938 	u32 __reserved3:24;
939 	u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
940 	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
941 	u32 __reserved4:24;
942 	u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
943 	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
944 	u16 lgt;	/* Last Group Tick */
945 	u16 wr_prob_g;
946 	u16 wr_prob_y;
947 	u16 wr_prob_r;
948 	u8 __reserved5[8];
949 } __packed;
950 struct qm_mcr_querycgr {
951 	u16 __reserved1;
952 	struct __qm_mc_cgr cgr; /* CGR fields */
953 	u8 __reserved2[3];
954 	union {
955 		struct {
956 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
957 			u32 __reserved3:24;
958 			u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
959 			u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
960 #else
961 			u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
962 			u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
963 			u32 __reserved3:24;
964 #endif
965 		};
966 		u64 i_bcnt;
967 	};
968 	union {
969 		struct {
970 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
971 			u32 __reserved4:24;
972 			u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
973 			u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
974 #else
975 			u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
976 			u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
977 			u32 __reserved4:24;
978 #endif
979 		};
980 		u64 a_bcnt;
981 	};
982 	union {
983 		u32 cscn_targ_swp[4];
984 		u8 __reserved5[16];
985 	};
986 } __packed;
987 
988 struct __qm_mcr_querycongestion {
989 	u32 state[8];
990 };
991 
992 struct qm_mcr_querycongestion {
993 	u8 __reserved[30];
994 	/* Access this struct using QM_MCR_QUERYCONGESTION() */
995 	struct __qm_mcr_querycongestion state;
996 } __packed;
997 struct qm_mcr_querywq {
998 	union {
999 		u16 channel_wq; /* ignores wq (3 lsbits) */
1000 		struct {
1001 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1002 			u16 id:13; /* qm_channel */
1003 			u16 __reserved:3;
1004 #else
1005 			u16 __reserved:3;
1006 			u16 id:13; /* qm_channel */
1007 #endif
1008 		} __packed channel;
1009 	};
1010 	u8 __reserved[28];
1011 	u32 wq_len[8];
1012 } __packed;
1013 
1014 struct qm_mc_result {
1015 	u8 verb;
1016 	u8 result;
1017 	union {
1018 		struct qm_mcr_initfq initfq;
1019 		struct qm_mcr_queryfq queryfq;
1020 		struct qm_mcr_queryfq_np queryfq_np;
1021 		struct qm_mcr_alterfq alterfq;
1022 		struct qm_mcr_initcgr initcgr;
1023 		struct qm_mcr_cgrtestwrite cgrtestwrite;
1024 		struct qm_mcr_querycgr querycgr;
1025 		struct qm_mcr_querycongestion querycongestion;
1026 		struct qm_mcr_querywq querywq;
1027 	};
1028 } __packed;
1029 
1030 #define QM_MCR_VERB_RRID		0x80
1031 #define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
1032 #define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
1033 #define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
1034 #define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
1035 #define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
1036 #define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
1037 #define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
1038 #define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
1039 #define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
1040 #define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
1041 #define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
1042 #define QM_MCR_RESULT_NULL		0x00
1043 #define QM_MCR_RESULT_OK		0xf0
1044 #define QM_MCR_RESULT_ERR_FQID		0xf1
1045 #define QM_MCR_RESULT_ERR_FQSTATE	0xf2
1046 #define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
1047 #define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
1048 #define QM_MCR_RESULT_PENDING		0xf8
1049 #define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
1050 #define QM_MCR_NP_STATE_FE		0x10
1051 #define QM_MCR_NP_STATE_R		0x08
1052 #define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
1053 #define QM_MCR_NP_STATE_OOS		0x00
1054 #define QM_MCR_NP_STATE_RETIRED		0x01
1055 #define QM_MCR_NP_STATE_TEN_SCHED	0x02
1056 #define QM_MCR_NP_STATE_TRU_SCHED	0x03
1057 #define QM_MCR_NP_STATE_PARKED		0x04
1058 #define QM_MCR_NP_STATE_ACTIVE		0x05
1059 #define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
1060 #define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
1061 #define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
1062 #define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
1063 #define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
1064 #define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
1065 #define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
1066 /* This extracts the state for congestion group 'n' from a query response.
1067  * Eg.
1068  *   u8 cgr = [...];
1069  *   struct qm_mc_result *res = [...];
1070  *   printf("congestion group %d congestion state: %d\n", cgr,
1071  *       QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
1072  */
1073 #define __CGR_WORD(num)		(num >> 5)
1074 #define __CGR_SHIFT(num)	(num & 0x1f)
1075 #define __CGR_NUM		(sizeof(struct __qm_mcr_querycongestion) << 3)
1076 static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
1077 					 u8 cgr)
1078 {
1079 	return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
1080 }
1081 
1082 	/* Portal and Frame Queues */
1083 /* Represents a managed portal */
1084 struct qman_portal;
1085 
1086 /*
1087  * This object type represents QMan frame queue descriptors (FQD), it is
1088  * cacheline-aligned, and initialised by qman_create_fq(). The structure is
1089  * defined further down.
1090  */
1091 struct qman_fq;
1092 
1093 /*
1094  * This object type represents a QMan congestion group, it is defined further
1095  * down.
1096  */
1097 struct qman_cgr;
1098 
1099 /*
1100  * This enum, and the callback type that returns it, are used when handling
1101  * dequeued frames via DQRR. Note that for "null" callbacks registered with the
1102  * portal object (for handling dequeues that do not demux because context_b is
1103  * NULL), the return value *MUST* be qman_cb_dqrr_consume.
1104  */
1105 enum qman_cb_dqrr_result {
1106 	/* DQRR entry can be consumed */
1107 	qman_cb_dqrr_consume,
1108 	/* Like _consume, but requests parking - FQ must be held-active */
1109 	qman_cb_dqrr_park,
1110 	/* Does not consume, for DCA mode only. This allows out-of-order
1111 	 * consumes by explicit calls to qman_dca() and/or the use of implicit
1112 	 * DCA via EQCR entries.
1113 	 */
1114 	qman_cb_dqrr_defer,
1115 	/*
1116 	 * Stop processing without consuming this ring entry. Exits the current
1117 	 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
1118 	 * an interrupt handler, the callback would typically call
1119 	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
1120 	 * otherwise the interrupt will reassert immediately.
1121 	 */
1122 	qman_cb_dqrr_stop,
1123 	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
1124 	qman_cb_dqrr_consume_stop
1125 };
1126 
1127 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
1128 					struct qman_fq *fq,
1129 					const struct qm_dqrr_entry *dqrr);
1130 
1131 typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
1132 					struct qman_portal *qm,
1133 					struct qman_fq *fq,
1134 					const struct qm_dqrr_entry *dqrr,
1135 					void **bd);
1136 
1137 /* This callback type is used when handling buffers in dpdk pull mode */
1138 typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
1139 					struct qm_dqrr_entry **dqrr,
1140 					void **bufs,
1141 					int num_bufs);
1142 
1143 typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
1144 
1145 /*
1146  * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
1147  * are always consumed after the callback returns.
1148  */
1149 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
1150 				const struct qm_mr_entry *msg);
1151 
1152 /* This callback type is used when handling DCP ERNs */
1153 typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
1154 				const struct qm_mr_entry *msg);
1155 /*
1156  * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
1157  * held-active + held-suspended are just "sched". Things like "retired" will not
1158  * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
1159  * then, to indicate it's completing and to gate attempts to retry the retire
1160  * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
1161  * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
1162  * index rather than the FQ that ring entry corresponds to), so repeated park
1163  * commands are allowed (if you're silly enough to try) but won't change FQ
1164  * state, and the resulting park notifications move FQs from "sched" to
1165  * "parked".
1166  */
1167 enum qman_fq_state {
1168 	qman_fq_state_oos,
1169 	qman_fq_state_parked,
1170 	qman_fq_state_sched,
1171 	qman_fq_state_retired
1172 };
1173 
1174 
1175 /*
1176  * Frame queue objects (struct qman_fq) are stored within memory passed to
1177  * qman_create_fq(), as this allows stashing of caller-provided demux callback
1178  * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
1179  * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
1180  * they should;
1181  *
1182  * (a) extend the qman_fq structure with their state; eg.
1183  *
1184  *     // myfq is allocated and driver_fq callbacks filled in;
1185  *     struct my_fq {
1186  *	   struct qman_fq base;
1187  *	   int an_extra_field;
1188  *	   [ ... add other fields to be associated with each FQ ...]
1189  *     } *myfq = some_my_fq_allocator();
1190  *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
1191  *
1192  *     // in a dequeue callback, access extra fields from 'fq' via a cast;
1193  *     struct my_fq *myfq = (struct my_fq *)fq;
1194  *     do_something_with(myfq->an_extra_field);
1195  *     [...]
1196  *
1197  * (b) when and if configuring the FQ for context stashing, specify how ever
1198  *     many cachelines are required to stash 'struct my_fq', to accelerate not
1199  *     only the QMan driver but the callback as well.
1200  */
1201 
1202 struct qman_fq_cb {
1203 	union { /* for dequeued frames */
1204 		qman_dpdk_cb_dqrr dqrr_dpdk_cb;
1205 		qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
1206 		qman_cb_dqrr dqrr;
1207 	};
1208 	qman_dpdk_cb_prepare dqrr_prepare;
1209 	qman_cb_mr ern;		/* for s/w ERNs */
1210 	qman_cb_mr fqs;		/* frame-queue state changes*/
1211 };
1212 
1213 struct qman_fq {
1214 	/* Caller of qman_create_fq() provides these demux callbacks */
1215 	struct qman_fq_cb cb;
1216 
1217 	u32 fqid_le;
1218 	u16 ch_id;
1219 	u8 cgr_groupid;
1220 	u8 is_static;
1221 
1222 	/* DPDK Interface */
1223 	void *dpaa_intf;
1224 
1225 	struct rte_event ev;
1226 	/* affined portal in case of static queue */
1227 	struct qman_portal *qp;
1228 	struct dpaa_bp_info *bp_array;
1229 
1230 	volatile unsigned long flags;
1231 
1232 	enum qman_fq_state state;
1233 	u32 fqid;
1234 	spinlock_t fqlock;
1235 
1236 	struct rb_node node;
1237 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1238 	void **qman_fq_lookup_table;
1239 	u32 key;
1240 #endif
1241 };
1242 
1243 /*
1244  * This callback type is used when handling congestion group entry/exit.
1245  * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
1246  */
1247 typedef void (*qman_cb_cgr)(struct qman_portal *qm,
1248 			    struct qman_cgr *cgr, int congested);
1249 
1250 struct qman_cgr {
1251 	/* Set these prior to qman_create_cgr() */
1252 	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
1253 	qman_cb_cgr cb;
1254 	/* These are private to the driver */
1255 	u16 chan; /* portal channel this object is created on */
1256 	struct list_head node;
1257 };
1258 
1259 /* Flags to qman_create_fq() */
1260 #define QMAN_FQ_FLAG_NO_ENQUEUE      0x00000001 /* can't enqueue */
1261 #define QMAN_FQ_FLAG_NO_MODIFY       0x00000002 /* can only enqueue */
1262 #define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
1263 #define QMAN_FQ_FLAG_LOCKED          0x00000008 /* multi-core locking */
1264 #define QMAN_FQ_FLAG_AS_IS           0x00000010 /* query h/w state */
1265 #define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
1266 
1267 /* Flags to qman_destroy_fq() */
1268 #define QMAN_FQ_DESTROY_PARKED       0x00000001 /* FQ can be parked or OOS */
1269 
1270 /* Flags from qman_fq_state() */
1271 #define QMAN_FQ_STATE_CHANGING       0x80000000 /* 'state' is changing */
1272 #define QMAN_FQ_STATE_NE             0x40000000 /* retired FQ isn't empty */
1273 #define QMAN_FQ_STATE_ORL            0x20000000 /* retired FQ has ORL */
1274 #define QMAN_FQ_STATE_BLOCKOOS       0xe0000000 /* if any are set, no OOS */
1275 #define QMAN_FQ_STATE_CGR_EN         0x10000000 /* CGR enabled */
1276 #define QMAN_FQ_STATE_VDQCR          0x08000000 /* being volatile dequeued */
1277 
1278 /* Flags to qman_init_fq() */
1279 #define QMAN_INITFQ_FLAG_SCHED       0x00000001 /* schedule rather than park */
1280 #define QMAN_INITFQ_FLAG_LOCAL       0x00000004 /* set dest portal */
1281 
1282 /* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
1283  * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
1284  * any change here should be audited in PME.)
1285  */
1286 #define QMAN_ENQUEUE_FLAG_WATCH_CGR  0x00080000 /* watch congestion state */
1287 #define QMAN_ENQUEUE_FLAG_DCA        0x00008000 /* perform enqueue-DCA */
1288 #define QMAN_ENQUEUE_FLAG_DCA_PARK   0x00004000 /* If DCA, requests park */
1289 #define QMAN_ENQUEUE_FLAG_DCA_PTR(p)		/* If DCA, p is DQRR entry */ \
1290 		(((u32)(p) << 2) & 0x00000f00)
1291 #define QMAN_ENQUEUE_FLAG_C_GREEN    0x00000000 /* choose one C_*** flag */
1292 #define QMAN_ENQUEUE_FLAG_C_YELLOW   0x00000008
1293 #define QMAN_ENQUEUE_FLAG_C_RED      0x00000010
1294 #define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
1295 /* For the ORP-specific qman_enqueue_orp() variant;
1296  * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
1297  *   of a frame.
1298  */
1299 #define QMAN_ENQUEUE_FLAG_NLIS       0x01000000
1300 /* - this flag performs no enqueue but fills in an ORP sequence number that
1301  *   would otherwise block it (eg. if a frame has been dropped).
1302  */
1303 #define QMAN_ENQUEUE_FLAG_HOLE       0x02000000
1304 /* - this flag performs no enqueue but advances NESN to the given sequence
1305  *   number.
1306  */
1307 #define QMAN_ENQUEUE_FLAG_NESN       0x04000000
1308 
1309 /* Flags to qman_modify_cgr() */
1310 #define QMAN_CGR_FLAG_USE_INIT       0x00000001
1311 #define QMAN_CGR_MODE_FRAME          0x00000001
1312 
1313 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1314 void qman_set_fq_lookup_table(void **table);
1315 #endif
1316 
1317 /**
1318  * qman_get_portal_index - get portal configuration index
1319  */
1320 int qman_get_portal_index(void);
1321 
1322 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1323 			void **bufs);
1324 
1325 /**
1326  * qman_irqsource_add - add processing sources to be interrupt-driven
1327  * @bits: bitmask of QM_PIRQ_**I processing sources
1328  *
1329  * Adds processing sources that should be interrupt-driven (rather than
1330  * processed via qman_poll_***() functions). Returns zero for success, or
1331  * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
1332  */
1333 int qman_irqsource_add(u32 bits);
1334 
1335 /**
1336  * qman_irqsource_remove - remove processing sources from being interrupt-driven
1337  * @bits: bitmask of QM_PIRQ_**I processing sources
1338  *
1339  * Removes processing sources from being interrupt-driven, so that they will
1340  * instead be processed via qman_poll_***() functions. Returns zero for success,
1341  * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
1342  */
1343 int qman_irqsource_remove(u32 bits);
1344 
1345 /**
1346  * qman_affine_channel - return the channel ID of an portal
1347  * @cpu: the cpu whose affine portal is the subject of the query
1348  *
1349  * If @cpu is -1, the affine portal for the current CPU will be used. It is a
1350  * bug to call this function for any value of @cpu (other than -1) that is not a
1351  * member of the cpu mask.
1352  */
1353 u16 qman_affine_channel(int cpu);
1354 
1355 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1356 				 void **bufs, struct qman_portal *q);
1357 
1358 /**
1359  * qman_set_vdq - Issue a volatile dequeue command
1360  * @fq: Frame Queue on which the volatile dequeue command is issued
1361  * @num: Number of Frames requested for volatile dequeue
1362  * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
1363  *
1364  * This function will issue a volatile dequeue command to the QMAN.
1365  */
1366 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
1367 
1368 /**
1369  * qman_dequeue - Get the DQRR entry after volatile dequeue command
1370  * @fq: Frame Queue on which the volatile dequeue command is issued
1371  *
1372  * This function will return the DQRR entry after a volatile dequeue command
1373  * is issued. It will keep returning NULL until there is no packet available on
1374  * the DQRR.
1375  */
1376 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
1377 
1378 /**
1379  * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
1380  * @fq: Frame Queue on which the volatile dequeue command is issued
1381  * @dq: DQRR entry to consume. This is the one which is provided by the
1382  *    'qbman_dequeue' command.
1383  *
1384  * This will consume the DQRR enrey and make it available for next volatile
1385  * dequeue.
1386  */
1387 void qman_dqrr_consume(struct qman_fq *fq,
1388 		       struct qm_dqrr_entry *dq);
1389 
1390 /**
1391  * qman_poll_dqrr - process DQRR (fast-path) entries
1392  * @limit: the maximum number of DQRR entries to process
1393  *
1394  * Use of this function requires that DQRR processing not be interrupt-driven.
1395  * Ie. the value returned by qman_irqsource_get() should not include
1396  * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
1397  * this function will return -EINVAL, otherwise the return value is >=0 and
1398  * represents the number of DQRR entries processed.
1399  */
1400 int qman_poll_dqrr(unsigned int limit);
1401 
1402 /**
1403  * qman_poll
1404  *
1405  * Dispatcher logic on a cpu can use this to trigger any maintenance of the
1406  * affine portal. There are two classes of portal processing in question;
1407  * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
1408  * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
1409  * thresholds, congestion state changes, etc). This function does whatever
1410  * processing is not triggered by interrupts.
1411  *
1412  * Note, if DQRR and some slow-path processing are poll-driven (rather than
1413  * interrupt-driven) then this function uses a heuristic to determine how often
1414  * to run slow-path processing - as slow-path processing introduces at least a
1415  * minimum latency each time it is run, whereas fast-path (DQRR) processing is
1416  * close to zero-cost if there is no work to be done.
1417  */
1418 void qman_poll(void);
1419 
1420 /**
1421  * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
1422  *
1423  * Disables DQRR processing of the portal. This is reference-counted, so
1424  * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1425  * truly re-enable dequeuing.
1426  */
1427 void qman_stop_dequeues(void);
1428 
1429 /**
1430  * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
1431  *
1432  * Enables DQRR processing of the portal. This is reference-counted, so
1433  * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1434  * truly re-enable dequeuing.
1435  */
1436 void qman_start_dequeues(void);
1437 
1438 /**
1439  * qman_static_dequeue_add - Add pool channels to the portal SDQCR
1440  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1441  *
1442  * Adds a set of pool channels to the portal's static dequeue command register
1443  * (SDQCR). The requested pools are limited to those the portal has dequeue
1444  * access to.
1445  */
1446 void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
1447 
1448 /**
1449  * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
1450  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1451  *
1452  * Removes a set of pool channels from the portal's static dequeue command
1453  * register (SDQCR). The requested pools are limited to those the portal has
1454  * dequeue access to.
1455  */
1456 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
1457 
1458 /**
1459  * qman_static_dequeue_get - return the portal's current SDQCR
1460  *
1461  * Returns the portal's current static dequeue command register (SDQCR). The
1462  * entire register is returned, so if only the currently-enabled pool channels
1463  * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
1464  */
1465 u32 qman_static_dequeue_get(struct qman_portal *qp);
1466 
1467 /**
1468  * qman_dca - Perform a Discrete Consumption Acknowledgment
1469  * @dq: the DQRR entry to be consumed
1470  * @park_request: indicates whether the held-active @fq should be parked
1471  *
1472  * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1473  * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1474  * does not take a 'portal' argument but implies the core affine portal from the
1475  * cpu that is currently executing the function. For reasons of locking, this
1476  * function must be called from the same CPU as that which processed the DQRR
1477  * entry in the first place.
1478  */
1479 void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
1480 
1481 /**
1482  * qman_dca_index - Perform a Discrete Consumption Acknowledgment
1483  * @index: the DQRR index to be consumed
1484  * @park_request: indicates whether the held-active @fq should be parked
1485  *
1486  * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1487  * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1488  * does not take a 'portal' argument but implies the core affine portal from the
1489  * cpu that is currently executing the function. For reasons of locking, this
1490  * function must be called from the same CPU as that which processed the DQRR
1491  * entry in the first place.
1492  */
1493 void qman_dca_index(u8 index, int park_request);
1494 
1495 /**
1496  * qman_eqcr_is_empty - Determine if portal's EQCR is empty
1497  *
1498  * For use in situations where a cpu-affine caller needs to determine when all
1499  * enqueues for the local portal have been processed by Qman but can't use the
1500  * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
1501  * The function forces tracking of EQCR consumption (which normally doesn't
1502  * happen until enqueue processing needs to find space to put new enqueue
1503  * commands), and returns zero if the ring still has unprocessed entries,
1504  * non-zero if it is empty.
1505  */
1506 int qman_eqcr_is_empty(void);
1507 
1508 /**
1509  * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
1510  * @handler: callback for processing DCP ERNs
1511  * @affine: whether this handler is specific to the locally affine portal
1512  *
1513  * If a hardware block's interface to Qman (ie. its direct-connect portal, or
1514  * DCP) is configured not to receive enqueue rejections, then any enqueues
1515  * through that DCP that are rejected will be sent to a given software portal.
1516  * If @affine is non-zero, then this handler will only be used for DCP ERNs
1517  * received on the portal affine to the current CPU. If multiple CPUs share a
1518  * portal and they all call this function, they will be setting the handler for
1519  * the same portal! If @affine is zero, then this handler will be global to all
1520  * portals handled by this instance of the driver. Only those portals that do
1521  * not have their own affine handler will use the global handler.
1522  */
1523 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
1524 
1525 	/* FQ management */
1526 	/* ------------- */
1527 /**
1528  * qman_create_fq - Allocates a FQ
1529  * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
1530  * @flags: bit-mask of QMAN_FQ_FLAG_*** options
1531  * @fq: memory for storing the 'fq', with callbacks filled in
1532  *
1533  * Creates a frame queue object for the given @fqid, unless the
1534  * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
1535  * dynamically allocated (or the function fails if none are available). Once
1536  * created, the caller should not touch the memory at 'fq' except as extended to
1537  * adjacent memory for user-defined fields (see the definition of "struct
1538  * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
1539  * pre-existing frame-queues that aren't to be otherwise interfered with, it
1540  * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
1541  * causes the driver to honour any contextB modifications requested in the
1542  * qm_init_fq() API, as this indicates the frame queue will be consumed by a
1543  * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
1544  * software portals, the contextB field is controlled by the driver and can't be
1545  * modified by the caller. If the AS_IS flag is specified, management commands
1546  * will be used on portal @p to query state for frame queue @fqid and construct
1547  * a frame queue object based on that, rather than assuming/requiring that it be
1548  * Out of Service.
1549  */
1550 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
1551 
1552 /**
1553  * qman_destroy_fq - Deallocates a FQ
1554  * @fq: the frame queue object to release
1555  * @flags: bit-mask of QMAN_FQ_FREE_*** options
1556  *
1557  * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
1558  * not deallocated but the caller regains ownership, to do with as desired. The
1559  * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
1560  * is specified, in which case it may also be in the 'parked' state.
1561  */
1562 void qman_destroy_fq(struct qman_fq *fq, u32 flags);
1563 
1564 /**
1565  * qman_fq_fqid - Queries the frame queue ID of a FQ object
1566  * @fq: the frame queue object to query
1567  */
1568 u32 qman_fq_fqid(struct qman_fq *fq);
1569 
1570 /**
1571  * qman_fq_state - Queries the state of a FQ object
1572  * @fq: the frame queue object to query
1573  * @state: pointer to state enum to return the FQ scheduling state
1574  * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
1575  *
1576  * Queries the state of the FQ object, without performing any h/w commands.
1577  * This captures the state, as seen by the driver, at the time the function
1578  * executes.
1579  */
1580 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
1581 
1582 /**
1583  * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
1584  * @fq: the frame queue object to modify, must be 'parked' or new.
1585  * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
1586  * @opts: the FQ-modification settings, as defined in the low-level API
1587  *
1588  * The @opts parameter comes from the low-level portal API. Select
1589  * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
1590  * rather than parked. NB, @opts can be NULL.
1591  *
1592  * Note that some fields and options within @opts may be ignored or overwritten
1593  * by the driver;
1594  * 1. the 'count' and 'fqid' fields are always ignored (this operation only
1595  * affects one frame queue: @fq).
1596  * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
1597  * 'fqd' structure's 'context_b' field are sometimes overwritten;
1598  *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
1599  *     initialised to a value used by the driver for demux.
1600  *   - if context_b is initialised for demux, so is context_a in case stashing
1601  *     is requested (see item 4).
1602  * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
1603  * objects.)
1604  * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
1605  * 'dest::channel' field will be overwritten to match the portal used to issue
1606  * the command. If the WE_DESTWQ write-enable bit had already been set by the
1607  * caller, the channel workqueue will be left as-is, otherwise the write-enable
1608  * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
1609  * isn't set, the destination channel/workqueue fields and the write-enable bit
1610  * are left as-is.
1611  * 4. if the driver overwrites context_a/b for demux, then if
1612  * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
1613  * context_a.address fields and will leave the stashing fields provided by the
1614  * user alone, otherwise it will zero out the context_a.stashing fields.
1615  */
1616 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1617 
1618 /**
1619  * qman_schedule_fq - Schedules a FQ
1620  * @fq: the frame queue object to schedule, must be 'parked'
1621  *
1622  * Schedules the frame queue, which must be Parked, which takes it to
1623  * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
1624  */
1625 int qman_schedule_fq(struct qman_fq *fq);
1626 
1627 /**
1628  * qman_retire_fq - Retires a FQ
1629  * @fq: the frame queue object to retire
1630  * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
1631  *
1632  * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
1633  * the retirement was started asynchronously, otherwise it returns negative for
1634  * failure. When this function returns zero, @flags is set to indicate whether
1635  * the retired FQ is empty and/or whether it has any ORL fragments (to show up
1636  * as ERNs). Otherwise the corresponding flags will be known when a subsequent
1637  * FQRN message shows up on the portal's message ring.
1638  *
1639  * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
1640  * Active state), the completion will be via the message ring as a FQRN - but
1641  * the corresponding callback may occur before this function returns!! Ie. the
1642  * caller should be prepared to accept the callback as the function is called,
1643  * not only once it has returned.
1644  */
1645 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1646 
1647 /**
1648  * qman_oos_fq - Puts a FQ "out of service"
1649  * @fq: the frame queue object to be put out-of-service, must be 'retired'
1650  *
1651  * The frame queue must be retired and empty, and if any order restoration list
1652  * was released as ERNs at the time of retirement, they must all be consumed.
1653  */
1654 int qman_oos_fq(struct qman_fq *fq);
1655 
1656 /**
1657  * qman_fq_flow_control - Set the XON/XOFF state of a FQ
1658  * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
1659  * or 'retired' or 'parked' state
1660  * @xon: boolean to set fq in XON or XOFF state
1661  *
1662  * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
1663  * otherwise the IFSI interrupt will be asserted.
1664  */
1665 int qman_fq_flow_control(struct qman_fq *fq, int xon);
1666 
1667 /**
1668  * qman_query_fq - Queries FQD fields (via h/w query command)
1669  * @fq: the frame queue object to be queried
1670  * @fqd: storage for the queried FQD fields
1671  */
1672 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
1673 
1674 /**
1675  * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
1676  * if packets are in the frame queue. If there are no packets on frame
1677  * queue '0' is returned.
1678  * @fq: the frame queue object to be queried
1679  */
1680 int qman_query_fq_has_pkts(struct qman_fq *fq);
1681 
1682 /**
1683  * qman_query_fq_np - Queries non-programmable FQD fields
1684  * @fq: the frame queue object to be queried
1685  * @np: storage for the queried FQD fields
1686  */
1687 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1688 
1689 /**
1690  * qman_query_fq_frmcnt - Queries fq frame count
1691  * @fq: the frame queue object to be queried
1692  * @frm_cnt: number of frames in the queue
1693  */
1694 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
1695 
1696 /**
1697  * qman_query_wq - Queries work queue lengths
1698  * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
1699  *		to this software portal. Otherwise, query length of WQs in a
1700  *		channel  specified in wq.
1701  * @wq: storage for the queried WQs lengths. Also specified the channel to
1702  *	to query if query_dedicated is zero.
1703  */
1704 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
1705 
1706 /**
1707  * qman_volatile_dequeue - Issue a volatile dequeue command
1708  * @fq: the frame queue object to dequeue from
1709  * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1710  * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1711  *
1712  * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1713  * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1714  * the VDQCR is already in use, otherwise returns non-zero for failure. If
1715  * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1716  * the VDQCR command has finished executing (ie. once the callback for the last
1717  * DQRR entry resulting from the VDQCR command has been called). If not using
1718  * the FINISH flag, completion can be determined either by detecting the
1719  * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1720  * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
1721  * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
1722  * "flags" retrieved from qman_fq_state().
1723  */
1724 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1725 
1726 /**
1727  * qman_enqueue - Enqueue a frame to a frame queue
1728  * @fq: the frame queue object to enqueue to
1729  * @fd: a descriptor of the frame to be enqueued
1730  * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1731  *
1732  * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
1733  * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
1734  * field is ignored. The return value is non-zero on error, such as ring full
1735  * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
1736  * specified), etc. If the ring is full and FLAG_WAIT is specified, this
1737  * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
1738  * interrupt will assert when Qman consumes the EQCR entry (subject to "status
1739  * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
1740  * perform an implied "discrete consumption acknowledgment" on the dequeue
1741  * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
1742  * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
1743  * this implicit DCA can delay the release of a "held active" frame queue
1744  * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
1745  * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
1746  * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
1747  * acknowledgment should "park request" the "held active" frame queue. Ie.
1748  * when the portal eventually releases that frame queue, it will be left in the
1749  * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
1750  * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
1751  * is requested, and the FQ is a member of a congestion group, then this
1752  * function returns -EAGAIN if the congestion group is currently congested.
1753  * Note, this does not eliminate ERNs, as the async interface means we can be
1754  * sending enqueue commands to an un-congested FQ that becomes congested before
1755  * the enqueue commands are processed, but it does minimise needless thrashing
1756  * of an already busy hardware resource by throttling many of the to-be-dropped
1757  * enqueues "at the source".
1758  */
1759 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
1760 
1761 int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
1762 		       int frames_to_send);
1763 
1764 /**
1765  * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
1766  * queues.
1767  * @fq[]: Array of frame queue objects to enqueue to
1768  * @fd: pointer to first descriptor of frame to be enqueued
1769  * @frames_to_send: number of frames to be sent.
1770  *
1771  * This API is similar to qman_enqueue_multi(), but it takes fd which needs
1772  * to be processed by different frame queues.
1773  */
1774 int
1775 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
1776 		      int frames_to_send);
1777 
1778 typedef int (*qman_cb_precommit) (void *arg);
1779 
1780 /**
1781  * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
1782  * @fq: the frame queue object to enqueue to
1783  * @fd: a descriptor of the frame to be enqueued
1784  * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1785  * @orp: the frame queue object used as an order restoration point.
1786  * @orp_seqnum: the sequence number of this frame in the order restoration path
1787  *
1788  * Similar to qman_enqueue(), but with the addition of an Order Restoration
1789  * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
1790  * enqueue operation to employ order restoration. Each frame queue object acts
1791  * as an Order Definition Point (ODP) by providing each frame dequeued from it
1792  * with an incrementing sequence number, this value is generally ignored unless
1793  * that sequence of dequeued frames will need order restoration later. Each
1794  * frame queue object also encapsulates an Order Restoration Point (ORP), which
1795  * is a re-assembly context for re-ordering frames relative to their sequence
1796  * numbers as they are enqueued. The ORP does not have to be within the frame
1797  * queue that receives the enqueued frame, in fact it is usually the frame
1798  * queue from which the frames were originally dequeued. For the purposes of
1799  * order restoration, multiple frames (or "fragments") can be enqueued for a
1800  * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
1801  * enqueues except the final fragment of a given sequence number. Ordering
1802  * between sequence numbers is guaranteed, even if fragments of different
1803  * sequence numbers are interlaced with one another. Fragments of the same
1804  * sequence number will retain the order in which they are enqueued. If no
1805  * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
1806  * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
1807  * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
1808  * sequence number should become the ORP's "Next Expected Sequence Number".
1809  *
1810  * Side note: a frame queue object can be used purely as an ORP, without
1811  * carrying any frames at all. Care should be taken not to deallocate a frame
1812  * queue object that is being actively used as an ORP, as a future allocation
1813  * of the frame queue object may start using the internal ORP before the
1814  * previous use has finished.
1815  */
1816 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
1817 		     struct qman_fq *orp, u16 orp_seqnum);
1818 
1819 /**
1820  * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
1821  * @result: is set by the API to the base FQID of the allocated range
1822  * @count: the number of FQIDs required
1823  * @align: required alignment of the allocated range
1824  * @partial: non-zero if the API can return fewer than @count FQIDs
1825  *
1826  * Returns the number of frame queues allocated, or a negative error code. If
1827  * @partial is non zero, the allocation request may return a smaller range of
1828  * FQs than requested (though alignment will be as requested). If @partial is
1829  * zero, the return value will either be 'count' or negative.
1830  */
1831 int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
1832 static inline int qman_alloc_fqid(u32 *result)
1833 {
1834 	int ret = qman_alloc_fqid_range(result, 1, 0, 0);
1835 
1836 	return (ret > 0) ? 0 : ret;
1837 }
1838 
1839 /**
1840  * qman_release_fqid_range - Release the specified range of frame queue IDs
1841  * @fqid: the base FQID of the range to deallocate
1842  * @count: the number of FQIDs in the range
1843  *
1844  * This function can also be used to seed the allocator with ranges of FQIDs
1845  * that it can subsequently allocate from.
1846  */
1847 void qman_release_fqid_range(u32 fqid, unsigned int count);
1848 static inline void qman_release_fqid(u32 fqid)
1849 {
1850 	qman_release_fqid_range(fqid, 1);
1851 }
1852 
1853 void qman_seed_fqid_range(u32 fqid, unsigned int count);
1854 
1855 int qman_shutdown_fq(u32 fqid);
1856 
1857 /**
1858  * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
1859  * @fqid: the base FQID of the range to deallocate
1860  * @count: the number of FQIDs in the range
1861  */
1862 int qman_reserve_fqid_range(u32 fqid, unsigned int count);
1863 static inline int qman_reserve_fqid(u32 fqid)
1864 {
1865 	return qman_reserve_fqid_range(fqid, 1);
1866 }
1867 
1868 /* Pool-channel management */
1869 /**
1870  * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
1871  * @result: is set by the API to the base pool-channel ID of the allocated range
1872  * @count: the number of pool-channel IDs required
1873  * @align: required alignment of the allocated range
1874  * @partial: non-zero if the API can return fewer than @count
1875  *
1876  * Returns the number of pool-channel IDs allocated, or a negative error code.
1877  * If @partial is non zero, the allocation request may return a smaller range of
1878  * than requested (though alignment will be as requested). If @partial is zero,
1879  * the return value will either be 'count' or negative.
1880  */
1881 int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
1882 static inline int qman_alloc_pool(u32 *result)
1883 {
1884 	int ret = qman_alloc_pool_range(result, 1, 0, 0);
1885 
1886 	return (ret > 0) ? 0 : ret;
1887 }
1888 
1889 /**
1890  * qman_release_pool_range - Release the specified range of pool-channel IDs
1891  * @id: the base pool-channel ID of the range to deallocate
1892  * @count: the number of pool-channel IDs in the range
1893  */
1894 void qman_release_pool_range(u32 id, unsigned int count);
1895 static inline void qman_release_pool(u32 id)
1896 {
1897 	qman_release_pool_range(id, 1);
1898 }
1899 
1900 /**
1901  * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
1902  * @id: the base pool-channel ID of the range to reserve
1903  * @count: the number of pool-channel IDs in the range
1904  */
1905 int qman_reserve_pool_range(u32 id, unsigned int count);
1906 static inline int qman_reserve_pool(u32 id)
1907 {
1908 	return qman_reserve_pool_range(id, 1);
1909 }
1910 
1911 void qman_seed_pool_range(u32 id, unsigned int count);
1912 
1913 	/* CGR management */
1914 	/* -------------- */
1915 /**
1916  * qman_create_cgr - Register a congestion group object
1917  * @cgr: the 'cgr' object, with fields filled in
1918  * @flags: QMAN_CGR_FLAG_* values
1919  * @opts: optional state of CGR settings
1920  *
1921  * Registers this object to receiving congestion entry/exit callbacks on the
1922  * portal affine to the cpu portal on which this API is executed. If opts is
1923  * NULL then only the callback (cgr->cb) function is registered. If @flags
1924  * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1925  * any unspecified parameters) will be used rather than a modify hw hardware
1926  * (which only modifies the specified parameters).
1927  */
1928 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1929 		    struct qm_mcc_initcgr *opts);
1930 
1931 /**
1932  * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
1933  * @cgr: the 'cgr' object, with fields filled in
1934  * @flags: QMAN_CGR_FLAG_* values
1935  * @dcp_portal: the DCP portal to which the cgr object is registered.
1936  * @opts: optional state of CGR settings
1937  *
1938  */
1939 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
1940 			   struct qm_mcc_initcgr *opts);
1941 
1942 /**
1943  * qman_delete_cgr - Deregisters a congestion group object
1944  * @cgr: the 'cgr' object to deregister
1945  *
1946  * "Unplugs" this CGR object from the portal affine to the cpu on which this API
1947  * is executed. This must be excuted on the same affine portal on which it was
1948  * created.
1949  */
1950 int qman_delete_cgr(struct qman_cgr *cgr);
1951 
1952 /**
1953  * qman_modify_cgr - Modify CGR fields
1954  * @cgr: the 'cgr' object to modify
1955  * @flags: QMAN_CGR_FLAG_* values
1956  * @opts: the CGR-modification settings
1957  *
1958  * The @opts parameter comes from the low-level portal API, and can be NULL.
1959  * Note that some fields and options within @opts may be ignored or overwritten
1960  * by the driver, in particular the 'cgrid' field is ignored (this operation
1961  * only affects the given CGR object). If @flags contains
1962  * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
1963  * unspecified parameters) will be used rather than a modify hw hardware (which
1964  * only modifies the specified parameters).
1965  */
1966 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
1967 		    struct qm_mcc_initcgr *opts);
1968 
1969 /**
1970  * qman_query_cgr - Queries CGR fields
1971  * @cgr: the 'cgr' object to query
1972  * @result: storage for the queried congestion group record
1973  */
1974 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
1975 
1976 /**
1977  * qman_query_congestion - Queries the state of all congestion groups
1978  * @congestion: storage for the queried state of all congestion groups
1979  */
1980 int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
1981 
1982 /**
1983  * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
1984  * @result: is set by the API to the base CGR ID of the allocated range
1985  * @count: the number of CGR IDs required
1986  * @align: required alignment of the allocated range
1987  * @partial: non-zero if the API can return fewer than @count
1988  *
1989  * Returns the number of CGR IDs allocated, or a negative error code.
1990  * If @partial is non zero, the allocation request may return a smaller range of
1991  * than requested (though alignment will be as requested). If @partial is zero,
1992  * the return value will either be 'count' or negative.
1993  */
1994 int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
1995 static inline int qman_alloc_cgrid(u32 *result)
1996 {
1997 	int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
1998 
1999 	return (ret > 0) ? 0 : ret;
2000 }
2001 
2002 /**
2003  * qman_release_cgrid_range - Release the specified range of CGR IDs
2004  * @id: the base CGR ID of the range to deallocate
2005  * @count: the number of CGR IDs in the range
2006  */
2007 void qman_release_cgrid_range(u32 id, unsigned int count);
2008 static inline void qman_release_cgrid(u32 id)
2009 {
2010 	qman_release_cgrid_range(id, 1);
2011 }
2012 
2013 /**
2014  * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
2015  * @id: the base CGR ID of the range to reserve
2016  * @count: the number of CGR IDs in the range
2017  */
2018 int qman_reserve_cgrid_range(u32 id, unsigned int count);
2019 static inline int qman_reserve_cgrid(u32 id)
2020 {
2021 	return qman_reserve_cgrid_range(id, 1);
2022 }
2023 
2024 void qman_seed_cgrid_range(u32 id, unsigned int count);
2025 
2026 	/* Helpers */
2027 	/* ------- */
2028 /**
2029  * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
2030  * @fqid: the FQID that will be initialised by other s/w
2031  *
2032  * In many situations, a FQID is provided for communication between s/w
2033  * entities, and whilst the consumer is responsible for initialising and
2034  * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
2035  * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
2036  *     qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
2037  * However, data can not be enqueued to the FQ until it is initialised out of
2038  * the OOS state - this function polls for that condition. It is particularly
2039  * useful for users of IPC functions - each endpoint's Rx FQ is the other
2040  * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
2041  * and then use this API on the (NO_MODIFY) Tx FQ object in order to
2042  * synchronise. The function returns zero for success, +1 if the FQ is still in
2043  * the OOS state, or negative if there was an error.
2044  */
2045 static inline int qman_poll_fq_for_init(struct qman_fq *fq)
2046 {
2047 	struct qm_mcr_queryfq_np np;
2048 	int err;
2049 
2050 	err = qman_query_fq_np(fq, &np);
2051 	if (err)
2052 		return err;
2053 	if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
2054 		return 1;
2055 	return 0;
2056 }
2057 
2058 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
2059 #define cpu_to_hw_sg(x)
2060 #define hw_sg_to_cpu(x)
2061 #else
2062 #define cpu_to_hw_sg(x)  __cpu_to_hw_sg(x)
2063 #define hw_sg_to_cpu(x)  __hw_sg_to_cpu(x)
2064 
2065 static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
2066 {
2067 	sgentry->opaque = cpu_to_be64(sgentry->opaque);
2068 	sgentry->val = cpu_to_be32(sgentry->val);
2069 	sgentry->val_off = cpu_to_be16(sgentry->val_off);
2070 }
2071 
2072 static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
2073 {
2074 	sgentry->opaque = be64_to_cpu(sgentry->opaque);
2075 	sgentry->val = be32_to_cpu(sgentry->val);
2076 	sgentry->val_off = be16_to_cpu(sgentry->val_off);
2077 }
2078 #endif
2079 
2080 #ifdef __cplusplus
2081 }
2082 #endif
2083 
2084 #endif /* __FSL_QMAN_H */
2085