xref: /dpdk/drivers/bus/dpaa/include/fsl_qman.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2012 Freescale Semiconductor, Inc.
4  * Copyright 2019-2022 NXP
5  *
6  */
7 
8 #ifndef __FSL_QMAN_H
9 #define __FSL_QMAN_H
10 
11 #include <dpaa_rbtree.h>
12 #include <rte_compat.h>
13 #include <rte_eventdev.h>
14 
15 #ifdef __cplusplus
16 extern "C" {
17 #endif
18 
19 /* FQ lookups (turn this on for 64bit user-space) */
20 #ifdef RTE_ARCH_64
21 #define CONFIG_FSL_QMAN_FQ_LOOKUP
22 /* if FQ lookups are supported, this controls the number of initialised,
23  * s/w-consumed FQs that can be supported at any one time.
24  */
25 #define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
26 #endif
27 
28 /* Last updated for v00.800 of the BG */
29 
30 /* Hardware constants */
31 #define QM_CHANNEL_SWPORTAL0 0
32 #define QMAN_CHANNEL_POOL1 0x21
33 #define QMAN_CHANNEL_CAAM 0x80
34 #define QMAN_CHANNEL_PME 0xa0
35 #define QMAN_CHANNEL_POOL1_REV3 0x401
36 #define QMAN_CHANNEL_CAAM_REV3 0x840
37 #define QMAN_CHANNEL_PME_REV3 0x860
38 extern u16 qm_channel_pool1;
39 extern u16 qm_channel_caam;
40 extern u16 qm_channel_pme;
41 enum qm_dc_portal {
42 	qm_dc_portal_fman0 = 0,
43 	qm_dc_portal_fman1 = 1,
44 	qm_dc_portal_caam = 2,
45 	qm_dc_portal_pme = 3
46 };
47 
48 __rte_internal
49 u16 dpaa_get_qm_channel_caam(void);
50 
51 __rte_internal
52 u16 dpaa_get_qm_channel_pool(void);
53 
54 /* Portal processing (interrupt) sources */
55 #define QM_PIRQ_CCSCI	0x00200000	/* CEETM Congestion State Change */
56 #define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
57 #define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
58 #define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
59 #define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
60 #define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
61 /*
62  * This mask contains all the interrupt sources that need handling except DQRI,
63  * ie. that if present should trigger slow-path processing.
64  */
65 #define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
66 			QM_PIRQ_MRI | QM_PIRQ_CCSCI)
67 
68 /* For qman_static_dequeue_*** APIs */
69 #define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
70 /* for n in [1,15] */
71 #define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
72 /* for conversion from n of qm_channel */
73 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
74 {
75 	return QM_SDQCR_CHANNELS_POOL(channel + 1 - dpaa_get_qm_channel_pool());
76 }
77 
78 /* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
79  * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
80  * FQID(n) to fill in the frame queue ID.
81  */
82 #define QM_VDQCR_PRECEDENCE_VDQCR	0x0
83 #define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
84 #define QM_VDQCR_EXACT			0x40000000
85 #define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
86 #define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
87 #define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
88 #define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
89 
90 /* --- QMan data structures (and associated constants) --- */
91 
92 /* Represents s/w corenet portal mapped data structures */
93 struct qm_eqcr_entry;	/* EQCR (EnQueue Command Ring) entries */
94 struct qm_dqrr_entry;	/* DQRR (DeQueue Response Ring) entries */
95 struct qm_mr_entry;	/* MR (Message Ring) entries */
96 struct qm_mc_command;	/* MC (Management Command) command */
97 struct qm_mc_result;	/* MC result */
98 
99 #define QM_FD_FORMAT_SG		0x4
100 #define QM_FD_FORMAT_LONG	0x2
101 #define QM_FD_FORMAT_COMPOUND	0x1
102 enum qm_fd_format {
103 	/*
104 	 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
105 	 * scatter-gather table. 'big' implies a 29-bit length with no offset
106 	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
107 	 * implies a s/g-like table, where each entry itself represents a frame
108 	 * (contiguous or scatter-gather) and the 29-bit "length" is
109 	 * interpreted purely for congestion calculations, ie. a "congestion
110 	 * weight".
111 	 */
112 	qm_fd_contig = 0,
113 	qm_fd_contig_big = QM_FD_FORMAT_LONG,
114 	qm_fd_sg = QM_FD_FORMAT_SG,
115 	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
116 	qm_fd_compound = QM_FD_FORMAT_COMPOUND
117 };
118 
119 /* Capitalised versions are un-typed but can be used in static expressions */
120 #define QM_FD_CONTIG	0
121 #define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
122 #define QM_FD_SG	QM_FD_FORMAT_SG
123 #define QM_FD_SG_BIG	(QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
124 #define QM_FD_COMPOUND	QM_FD_FORMAT_COMPOUND
125 
126 /* "Frame Descriptor (FD)" */
127 struct __rte_aligned(8) qm_fd {
128 	union {
129 		struct {
130 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
131 			u8 dd:2;	/* dynamic debug */
132 			u8 liodn_offset:6;
133 			u8 bpid:8;	/* Buffer Pool ID */
134 			u8 eliodn_offset:4;
135 			u8 __reserved:4;
136 			u8 addr_hi;	/* high 8-bits of 40-bit address */
137 			u32 addr_lo;	/* low 32-bits of 40-bit address */
138 #else
139 			u8 liodn_offset:6;
140 			u8 dd:2;	/* dynamic debug */
141 			u8 bpid:8;	/* Buffer Pool ID */
142 			u8 __reserved:4;
143 			u8 eliodn_offset:4;
144 			u8 addr_hi;	/* high 8-bits of 40-bit address */
145 			u32 addr_lo;	/* low 32-bits of 40-bit address */
146 #endif
147 		};
148 		struct {
149 			u64 __notaddress:24;
150 			/* More efficient address accessor */
151 			u64 addr:40;
152 		};
153 		u64 opaque_addr;
154 	};
155 	/* The 'format' field indicates the interpretation of the remaining 29
156 	 * bits of the 32-bit word. For packing reasons, it is duplicated in the
157 	 * other union elements. Note, union'd structs are difficult to use with
158 	 * static initialisation under gcc, in which case use the "opaque" form
159 	 * with one of the macros.
160 	 */
161 	union {
162 		/* For easier/faster copying of this part of the fd (eg. from a
163 		 * DQRR entry to an EQCR entry) copy 'opaque'
164 		 */
165 		u32 opaque;
166 		/* If 'format' is _contig or _sg, 20b length and 9b offset */
167 		struct {
168 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
169 			enum qm_fd_format format:3;
170 			u16 offset:9;
171 			u32 length20:20;
172 #else
173 			u32 length20:20;
174 			u16 offset:9;
175 			enum qm_fd_format format:3;
176 #endif
177 		};
178 		/* If 'format' is _contig_big or _sg_big, 29b length */
179 		struct {
180 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
181 			enum qm_fd_format _format1:3;
182 			u32 length29:29;
183 #else
184 			u32 length29:29;
185 			enum qm_fd_format _format1:3;
186 #endif
187 		};
188 		/* If 'format' is _compound, 29b "congestion weight" */
189 		struct {
190 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
191 			enum qm_fd_format _format2:3;
192 			u32 cong_weight:29;
193 #else
194 			u32 cong_weight:29;
195 			enum qm_fd_format _format2:3;
196 #endif
197 		};
198 	};
199 	union {
200 		u32 cmd;
201 		u32 status;
202 	};
203 };
204 #define QM_FD_DD_NULL		0x00
205 #define QM_FD_PID_MASK		0x3f
206 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
207 {
208 	return fd->addr;
209 }
210 
211 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
212 {
213 	return (dma_addr_t)fd->addr;
214 }
215 
216 /* Macro, so we compile better if 'v' isn't always 64-bit */
217 #define qm_fd_addr_set64(fd, v) \
218 	do { \
219 		struct qm_fd *__fd931 = (fd); \
220 		__fd931->addr = v; \
221 	} while (0)
222 
223 /* Scatter/Gather table entry */
224 struct __rte_packed_begin qm_sg_entry {
225 	union {
226 		struct {
227 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
228 			u8 __reserved1[3];
229 			u8 addr_hi;	/* high 8-bits of 40-bit address */
230 			u32 addr_lo;	/* low 32-bits of 40-bit address */
231 #else
232 			u32 addr_lo;	/* low 32-bits of 40-bit address */
233 			u8 addr_hi;	/* high 8-bits of 40-bit address */
234 			u8 __reserved1[3];
235 #endif
236 		};
237 		struct {
238 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
239 			u64 __notaddress:24;
240 			u64 addr:40;
241 #else
242 			u64 addr:40;
243 			u64 __notaddress:24;
244 #endif
245 		};
246 		u64 opaque;
247 	};
248 	union {
249 		struct {
250 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
251 			u32 extension:1;	/* Extension bit */
252 			u32 final:1;		/* Final bit */
253 			u32 length:30;
254 #else
255 			u32 length:30;
256 			u32 final:1;		/* Final bit */
257 			u32 extension:1;	/* Extension bit */
258 #endif
259 		};
260 		u32 val;
261 	};
262 	u8 __reserved2;
263 	u8 bpid;
264 	union {
265 		struct {
266 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
267 			u16 __reserved3:3;
268 			u16 offset:13;
269 #else
270 			u16 offset:13;
271 			u16 __reserved3:3;
272 #endif
273 		};
274 		u16 val_off;
275 	};
276 } __rte_packed_end;
277 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
278 {
279 	return sg->addr;
280 }
281 
282 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
283 {
284 	return (dma_addr_t)sg->addr;
285 }
286 
287 /* Macro, so we compile better if 'v' isn't always 64-bit */
288 #define qm_sg_entry_set64(sg, v) \
289 	do { \
290 		struct qm_sg_entry *__sg931 = (sg); \
291 		__sg931->addr = v; \
292 	} while (0)
293 
294 /* See 1.5.8.1: "Enqueue Command" */
295 struct __rte_aligned(8) __rte_packed_begin qm_eqcr_entry {
296 	u8 __dont_write_directly__verb;
297 	u8 dca;
298 	u16 seqnum;
299 	u32 orp;	/* 24-bit */
300 	u32 fqid;	/* 24-bit */
301 	u32 tag;
302 	struct qm_fd fd; /* this has alignment 8 */
303 	u8 __reserved3[32];
304 } __rte_packed_end;
305 
306 
307 /* "Frame Dequeue Response" */
308 struct __rte_aligned(8) qm_dqrr_entry {
309 	u8 verb;
310 	u8 stat;
311 	u16 seqnum;	/* 15-bit */
312 	u8 tok;
313 	u8 __reserved2[3];
314 	u32 fqid;	/* 24-bit */
315 	u32 contextB;
316 	struct qm_fd fd; /* this has alignment 8 */
317 	u8 __reserved4[32];
318 };
319 
320 #define QM_DQRR_VERB_VBIT		0x80
321 #define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
322 #define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
323 #define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
324 #define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
325 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
326 #define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
327 #define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
328 #define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
329 
330 
331 /* "ERN Message Response" */
332 /* "FQ State Change Notification" */
333 struct __rte_aligned(8) __rte_packed_begin qm_mr_entry {
334 	union {
335 		alignas(8) struct __rte_packed_begin {
336 			u8 verb;
337 			u8 dca;
338 			u16 seqnum;
339 			u8 rc;		/* Rejection Code */
340 			u32 orp:24;
341 			u32 fqid;	/* 24-bit */
342 			u32 tag;
343 			struct qm_fd fd; /* this has alignment 8 */
344 		} __rte_packed_end ern;
345 		alignas(8) struct __rte_packed_begin {
346 			u8 verb;
347 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
348 			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
349 			u8 __reserved1:4;
350 			enum qm_dc_portal portal:2;
351 #else
352 			enum qm_dc_portal portal:3;
353 			u8 __reserved1:3;
354 			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
355 #endif
356 			u16 __reserved2;
357 			u8 rc;		/* Rejection Code */
358 			u32 __reserved3:24;
359 			u32 fqid;	/* 24-bit */
360 			u32 tag;
361 			struct qm_fd fd; /* this has alignment 8 */
362 		} __rte_packed_end dcern;
363 		alignas(8) struct __rte_packed_begin {
364 			u8 verb;
365 			u8 fqs;		/* Frame Queue Status */
366 			u8 __reserved1[6];
367 			u32 fqid;	/* 24-bit */
368 			u32 contextB;
369 			u8 __reserved2[16];
370 		} __rte_packed_end fq;	/* FQRN/FQRNI/FQRL/FQPN */
371 	};
372 	u8 __reserved2[32];
373 } __rte_packed_end;
374 #define QM_MR_VERB_VBIT			0x80
375 /*
376  * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
377  * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
378  * from the other MR types by noting if the 0x20 bit is unset.
379  */
380 #define QM_MR_VERB_TYPE_MASK		0x27
381 #define QM_MR_VERB_DC_ERN		0x20
382 #define QM_MR_VERB_FQRN			0x21
383 #define QM_MR_VERB_FQRNI		0x22
384 #define QM_MR_VERB_FQRL			0x23
385 #define QM_MR_VERB_FQPN			0x24
386 #define QM_MR_RC_MASK			0xf0	/* contains one of; */
387 #define QM_MR_RC_CGR_TAILDROP		0x00
388 #define QM_MR_RC_WRED			0x10
389 #define QM_MR_RC_ERROR			0x20
390 #define QM_MR_RC_ORPWINDOW_EARLY	0x30
391 #define QM_MR_RC_ORPWINDOW_LATE		0x40
392 #define QM_MR_RC_FQ_TAILDROP		0x50
393 #define QM_MR_RC_ORPWINDOW_RETIRED	0x60
394 #define QM_MR_RC_ORP_ZERO		0x70
395 #define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
396 #define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
397 #define QM_MR_DCERN_COLOUR_GREEN	0x00
398 #define QM_MR_DCERN_COLOUR_YELLOW	0x01
399 #define QM_MR_DCERN_COLOUR_RED		0x02
400 #define QM_MR_DCERN_COLOUR_OVERRIDE	0x03
401 /*
402  * An identical structure of FQD fields is present in the "Init FQ" command and
403  * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
404  * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
405  * latter has two inlines to assist with converting to/from the mant+exp
406  * representation.
407  */
408 struct __rte_packed_begin qm_fqd_stashing {
409 	/* See QM_STASHING_EXCL_<...> */
410 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
411 	u8 exclusive;
412 	u8 __reserved1:2;
413 	/* Numbers of cachelines */
414 	u8 annotation_cl:2;
415 	u8 data_cl:2;
416 	u8 context_cl:2;
417 #else
418 	u8 context_cl:2;
419 	u8 data_cl:2;
420 	u8 annotation_cl:2;
421 	u8 __reserved1:2;
422 	u8 exclusive;
423 #endif
424 } __rte_packed_end;
425 struct __rte_packed_begin qm_fqd_taildrop {
426 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
427 	u16 __reserved1:3;
428 	u16 mant:8;
429 	u16 exp:5;
430 #else
431 	u16 exp:5;
432 	u16 mant:8;
433 	u16 __reserved1:3;
434 #endif
435 } __rte_packed_end;
436 struct __rte_packed_begin qm_fqd_oac {
437 	/* "Overhead Accounting Control", see QM_OAC_<...> */
438 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
439 	u8 oac:2; /* "Overhead Accounting Control" */
440 	u8 __reserved1:6;
441 #else
442 	u8 __reserved1:6;
443 	u8 oac:2; /* "Overhead Accounting Control" */
444 #endif
445 	/* Two's-complement value (-128 to +127) */
446 	signed char oal; /* "Overhead Accounting Length" */
447 } __rte_packed_end;
448 struct __rte_packed_begin qm_fqd {
449 	union {
450 		u8 orpc;
451 		struct __rte_packed_begin {
452 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
453 			u8 __reserved1:2;
454 			u8 orprws:3;
455 			u8 oa:1;
456 			u8 olws:2;
457 #else
458 			u8 olws:2;
459 			u8 oa:1;
460 			u8 orprws:3;
461 			u8 __reserved1:2;
462 #endif
463 		} __rte_packed_end;
464 	};
465 	u8 cgid;
466 	u16 fq_ctrl;	/* See QM_FQCTRL_<...> */
467 	union {
468 		u16 dest_wq;
469 		struct __rte_packed_begin {
470 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
471 			u16 channel:13; /* qm_channel */
472 			u16 wq:3;
473 #else
474 			u16 wq:3;
475 			u16 channel:13; /* qm_channel */
476 #endif
477 		} __rte_packed_end dest;
478 	};
479 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
480 	u16 __reserved2:1;
481 	u16 ics_cred:15;
482 #else
483 	u16 __reserved2:1;
484 	u16 ics_cred:15;
485 #endif
486 	/*
487 	 * For "Initialize Frame Queue" commands, the write-enable mask
488 	 * determines whether 'td' or 'oac_init' is observed. For query
489 	 * commands, this field is always 'td', and 'oac_query' (below) reflects
490 	 * the Overhead ACcounting values.
491 	 */
492 	union {
493 		uint16_t opaque_td;
494 		struct qm_fqd_taildrop td;
495 		struct qm_fqd_oac oac_init;
496 	};
497 	u32 context_b;
498 	union {
499 		/* Treat it as 64-bit opaque */
500 		u64 opaque;
501 		struct {
502 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
503 			u32 hi;
504 			u32 lo;
505 #else
506 			u32 lo;
507 			u32 hi;
508 #endif
509 		};
510 		/* Treat it as s/w portal stashing config */
511 		/* see "FQD Context_A field used for [...]" */
512 		struct __rte_packed_begin {
513 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
514 			struct qm_fqd_stashing stashing;
515 			/*
516 			 * 48-bit address of FQ context to
517 			 * stash, must be cacheline-aligned
518 			 */
519 			u16 context_hi;
520 			u32 context_lo;
521 #else
522 			u32 context_lo;
523 			u16 context_hi;
524 			struct qm_fqd_stashing stashing;
525 #endif
526 		} __rte_packed_end;
527 	} context_a;
528 	struct qm_fqd_oac oac_query;
529 } __rte_packed_end;
530 /* 64-bit converters for context_hi/lo */
531 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
532 {
533 	return ((u64)fqd->context_a.context_hi << 32) |
534 		(u64)fqd->context_a.context_lo;
535 }
536 
537 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
538 {
539 	return (dma_addr_t)qm_fqd_stashing_get64(fqd);
540 }
541 
542 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
543 {
544 	return ((u64)fqd->context_a.hi << 32) |
545 		(u64)fqd->context_a.lo;
546 }
547 
548 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
549 {
550 		fqd->context_a.context_hi = upper_32_bits(addr);
551 		fqd->context_a.context_lo = lower_32_bits(addr);
552 }
553 
554 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
555 {
556 	fqd->context_a.hi = upper_32_bits(addr);
557 	fqd->context_a.lo = lower_32_bits(addr);
558 }
559 
560 /* convert a threshold value into mant+exp representation */
561 static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
562 				      int roundup)
563 {
564 	u32 e = 0;
565 	int oddbit = 0;
566 
567 	if (val > 0xe0000000)
568 		return -ERANGE;
569 	while (val > 0xff) {
570 		oddbit = val & 1;
571 		val >>= 1;
572 		e++;
573 		if (roundup && oddbit)
574 			val++;
575 	}
576 	td->exp = e;
577 	td->mant = val;
578 	return 0;
579 }
580 
581 /* and the other direction */
582 static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
583 {
584 	return (u32)td->mant << td->exp;
585 }
586 
587 
588 /* See "Frame Queue Descriptor (FQD)" */
589 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
590 #define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
591 #define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
592 #define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
593 #define QM_FQCTRL_ORP		0x0100	/* ORP Enable */
594 #define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
595 #define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
596 #define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
597 #define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
598 #define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
599 #define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
600 #define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
601 
602 /* See "FQD Context_A field used for [...] */
603 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
604 #define QM_STASHING_EXCL_ANNOTATION	0x04
605 #define QM_STASHING_EXCL_DATA		0x02
606 #define QM_STASHING_EXCL_CTX		0x01
607 
608 /* See "Intra Class Scheduling" */
609 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
610 #define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
611 #define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
612 
613 /*
614  * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
615  * and associated commands/responses. The WRED parameters are calculated from
616  * these fields as follows;
617  *   MaxTH = MA * (2 ^ Mn)
618  *   Slope = SA / (2 ^ Sn)
619  *    MaxP = 4 * (Pn + 1)
620  */
621 struct __rte_packed_begin qm_cgr_wr_parm {
622 	union {
623 		u32 word;
624 		struct __rte_packed_begin {
625 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
626 			u32 MA:8;
627 			u32 Mn:5;
628 			u32 SA:7; /* must be between 64-127 */
629 			u32 Sn:6;
630 			u32 Pn:6;
631 #else
632 			u32 Pn:6;
633 			u32 Sn:6;
634 			u32 SA:7; /* must be between 64-127 */
635 			u32 Mn:5;
636 			u32 MA:8;
637 #endif
638 		} __rte_packed_end;
639 	};
640 } __rte_packed_end;
641 /*
642  * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
643  * management commands, this is padded to a 16-bit structure field, so that's
644  * how we represent it here. The congestion state threshold is calculated from
645  * these fields as follows;
646  *   CS threshold = TA * (2 ^ Tn)
647  */
648 struct __rte_packed_begin qm_cgr_cs_thres {
649 	union {
650 		u16 hword;
651 		struct __rte_packed_begin {
652 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
653 			u16 __reserved:3;
654 			u16 TA:8;
655 			u16 Tn:5;
656 #else
657 			u16 Tn:5;
658 			u16 TA:8;
659 			u16 __reserved:3;
660 #endif
661 		} __rte_packed_end;
662 	};
663 } __rte_packed_end;
664 /*
665  * This identical structure of CGR fields is present in the "Init/Modify CGR"
666  * commands and the "Query CGR" result. It's suctioned out here into its own
667  * struct.
668  */
669 struct __rte_packed_begin __qm_mc_cgr {
670 	struct qm_cgr_wr_parm wr_parm_g;
671 	struct qm_cgr_wr_parm wr_parm_y;
672 	struct qm_cgr_wr_parm wr_parm_r;
673 	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
674 	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
675 	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
676 	u8 cscn_en;	/* boolean, use QM_CGR_EN */
677 	union {
678 		struct {
679 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
680 			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
681 			u16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */
682 #else
683 			u16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */
684 			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
685 #endif
686 		};
687 		u32 cscn_targ;	/* use QM_CGR_TARG_* */
688 	};
689 	u8 cstd_en;	/* boolean, use QM_CGR_EN */
690 	u8 cs;		/* boolean, only used in query response */
691 	union {
692 		struct qm_cgr_cs_thres cs_thres;
693 		/* use qm_cgr_cs_thres_set64() */
694 		u16 __cs_thres;
695 	};
696 	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
697 } __rte_packed_end;
698 #define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
699 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
700 #define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
701 #define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
702 #define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
703 #define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
704 /* Convert CGR thresholds to/from "cs_thres" format */
705 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
706 {
707 	return (u64)th->TA << th->Tn;
708 }
709 
710 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
711 					int roundup)
712 {
713 	u32 e = 0;
714 	int oddbit = 0;
715 
716 	while (val > 0xff) {
717 		oddbit = val & 1;
718 		val >>= 1;
719 		e++;
720 		if (roundup && oddbit)
721 			val++;
722 	}
723 	th->Tn = e;
724 	th->TA = val;
725 	return 0;
726 }
727 
728 /* See 1.5.8.5.1: "Initialize FQ" */
729 /* See 1.5.8.5.2: "Query FQ" */
730 /* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
731 /* See 1.5.8.5.4: "Alter FQ State Commands " */
732 /* See 1.5.8.6.1: "Initialize/Modify CGR" */
733 /* See 1.5.8.6.2: "CGR Test Write" */
734 /* See 1.5.8.6.3: "Query CGR" */
735 /* See 1.5.8.6.4: "Query Congestion Group State" */
736 struct __rte_packed_begin qm_mcc_initfq {
737 	u8 __reserved1;
738 	u16 we_mask;	/* Write Enable Mask */
739 	u32 fqid;	/* 24-bit */
740 	u16 count;	/* Initialises 'count+1' FQDs */
741 	struct qm_fqd fqd; /* the FQD fields go here */
742 	u8 __reserved3[30];
743 } __rte_packed_end;
744 struct __rte_packed_begin qm_mcc_queryfq {
745 	u8 __reserved1[3];
746 	u32 fqid;	/* 24-bit */
747 	u8 __reserved2[56];
748 } __rte_packed_end;
749 struct __rte_packed_begin qm_mcc_queryfq_np {
750 	u8 __reserved1[3];
751 	u32 fqid;	/* 24-bit */
752 	u8 __reserved2[56];
753 } __rte_packed_end;
754 struct __rte_packed_begin qm_mcc_alterfq {
755 	u8 __reserved1[3];
756 	u32 fqid;	/* 24-bit */
757 	u8 __reserved2;
758 	u8 count;	/* number of consecutive FQID */
759 	u8 __reserved3[10];
760 	u32 context_b;	/* frame queue context b */
761 	u8 __reserved4[40];
762 } __rte_packed_end;
763 struct __rte_packed_begin qm_mcc_initcgr {
764 	u8 __reserved1;
765 	u16 we_mask;	/* Write Enable Mask */
766 	struct __qm_mc_cgr cgr;	/* CGR fields */
767 	u8 __reserved2[2];
768 	u8 cgid;
769 	u8 __reserved4[32];
770 } __rte_packed_end;
771 struct __rte_packed_begin qm_mcc_cgrtestwrite {
772 	u8 __reserved1[2];
773 	u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
774 	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
775 	u8 __reserved2[23];
776 	u8 cgid;
777 	u8 __reserved3[32];
778 } __rte_packed_end;
779 struct __rte_packed_begin qm_mcc_querycgr {
780 	u8 __reserved1[30];
781 	u8 cgid;
782 	u8 __reserved2[32];
783 } __rte_packed_end;
784 struct __rte_packed_begin qm_mcc_querycongestion {
785 	u8 __reserved[63];
786 } __rte_packed_end;
787 struct __rte_packed_begin qm_mcc_querywq {
788 	u8 __reserved;
789 	/* select channel if verb != QUERYWQ_DEDICATED */
790 	union {
791 		u16 channel_wq; /* ignores wq (3 lsbits) */
792 		struct __rte_packed_begin {
793 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
794 			u16 id:13; /* qm_channel */
795 			u16 __reserved1:3;
796 #else
797 			u16 __reserved1:3;
798 			u16 id:13; /* qm_channel */
799 #endif
800 		} __rte_packed_end channel;
801 	};
802 	u8 __reserved2[60];
803 } __rte_packed_end;
804 
805 struct __rte_packed_begin qm_mc_command {
806 	u8 __dont_write_directly__verb;
807 	union {
808 		struct qm_mcc_initfq initfq;
809 		struct qm_mcc_queryfq queryfq;
810 		struct qm_mcc_queryfq_np queryfq_np;
811 		struct qm_mcc_alterfq alterfq;
812 		struct qm_mcc_initcgr initcgr;
813 		struct qm_mcc_cgrtestwrite cgrtestwrite;
814 		struct qm_mcc_querycgr querycgr;
815 		struct qm_mcc_querycongestion querycongestion;
816 		struct qm_mcc_querywq querywq;
817 	};
818 } __rte_packed_end;
819 
820 /* INITFQ-specific flags */
821 #define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
822 #define QM_INITFQ_WE_OAC		0x0100
823 #define QM_INITFQ_WE_ORPC		0x0080
824 #define QM_INITFQ_WE_CGID		0x0040
825 #define QM_INITFQ_WE_FQCTRL		0x0020
826 #define QM_INITFQ_WE_DESTWQ		0x0010
827 #define QM_INITFQ_WE_ICSCRED		0x0008
828 #define QM_INITFQ_WE_TDTHRESH		0x0004
829 #define QM_INITFQ_WE_CONTEXTB		0x0002
830 #define QM_INITFQ_WE_CONTEXTA		0x0001
831 /* INITCGR/MODIFYCGR-specific flags */
832 #define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
833 #define QM_CGR_WE_WR_PARM_G		0x0400
834 #define QM_CGR_WE_WR_PARM_Y		0x0200
835 #define QM_CGR_WE_WR_PARM_R		0x0100
836 #define QM_CGR_WE_WR_EN_G		0x0080
837 #define QM_CGR_WE_WR_EN_Y		0x0040
838 #define QM_CGR_WE_WR_EN_R		0x0020
839 #define QM_CGR_WE_CSCN_EN		0x0010
840 #define QM_CGR_WE_CSCN_TARG		0x0008
841 #define QM_CGR_WE_CSTD_EN		0x0004
842 #define QM_CGR_WE_CS_THRES		0x0002
843 #define QM_CGR_WE_MODE			0x0001
844 
845 struct __rte_packed_begin qm_mcr_initfq {
846 	u8 __reserved1[62];
847 } __rte_packed_end;
848 struct __rte_packed_begin qm_mcr_queryfq {
849 	u8 __reserved1[8];
850 	struct qm_fqd fqd;	/* the FQD fields are here */
851 	u8 __reserved2[30];
852 } __rte_packed_end;
853 struct __rte_packed_begin qm_mcr_queryfq_np {
854 	u8 __reserved1;
855 	u8 state;	/* QM_MCR_NP_STATE_*** */
856 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
857 	u8 __reserved2;
858 	u32 fqd_link:24;
859 	u16 __reserved3:2;
860 	u16 odp_seq:14;
861 	u16 __reserved4:2;
862 	u16 orp_nesn:14;
863 	u16 __reserved5:1;
864 	u16 orp_ea_hseq:15;
865 	u16 __reserved6:1;
866 	u16 orp_ea_tseq:15;
867 	u8 __reserved7;
868 	u32 orp_ea_hptr:24;
869 	u8 __reserved8;
870 	u32 orp_ea_tptr:24;
871 	u8 __reserved9;
872 	u32 pfdr_hptr:24;
873 	u8 __reserved10;
874 	u32 pfdr_tptr:24;
875 	u8 __reserved11[5];
876 	u8 __reserved12:7;
877 	u8 is:1;
878 	u16 ics_surp;
879 	u32 byte_cnt;
880 	u8 __reserved13;
881 	u32 frm_cnt:24;
882 	u32 __reserved14;
883 	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
884 	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
885 	u16 __reserved15;
886 	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
887 	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
888 	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
889 #else
890 	u8 __reserved2;
891 	u32 fqd_link:24;
892 
893 	u16 odp_seq:14;
894 	u16 __reserved3:2;
895 
896 	u16 orp_nesn:14;
897 	u16 __reserved4:2;
898 
899 	u16 orp_ea_hseq:15;
900 	u16 __reserved5:1;
901 
902 	u16 orp_ea_tseq:15;
903 	u16 __reserved6:1;
904 
905 	u8 __reserved7;
906 	u32 orp_ea_hptr:24;
907 
908 	u8 __reserved8;
909 	u32 orp_ea_tptr:24;
910 
911 	u8 __reserved9;
912 	u32 pfdr_hptr:24;
913 
914 	u8 __reserved10;
915 	u32 pfdr_tptr:24;
916 
917 	u8 __reserved11[5];
918 	u8 is:1;
919 	u8 __reserved12:7;
920 	u16 ics_surp;
921 	u32 byte_cnt;
922 	u8 __reserved13;
923 	u32 frm_cnt:24;
924 	u32 __reserved14;
925 	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
926 	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
927 	u16 __reserved15;
928 	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
929 	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
930 	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
931 #endif
932 } __rte_packed_end;
933 
934 struct __rte_packed_begin qm_mcr_alterfq {
935 	u8 fqs;		/* Frame Queue Status */
936 	u8 __reserved1[61];
937 } __rte_packed_end;
938 struct __rte_packed_begin qm_mcr_initcgr {
939 	u8 __reserved1[62];
940 } __rte_packed_end;
941 struct __rte_packed_begin qm_mcr_cgrtestwrite {
942 	u16 __reserved1;
943 	struct __qm_mc_cgr cgr; /* CGR fields */
944 	u8 __reserved2[3];
945 	u32 __reserved3:24;
946 	u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
947 	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
948 	u32 __reserved4:24;
949 	u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
950 	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
951 	u16 lgt;	/* Last Group Tick */
952 	u16 wr_prob_g;
953 	u16 wr_prob_y;
954 	u16 wr_prob_r;
955 	u8 __reserved5[8];
956 } __rte_packed_end;
957 struct __rte_packed_begin qm_mcr_querycgr {
958 	u16 __reserved1;
959 	struct __qm_mc_cgr cgr; /* CGR fields */
960 	u8 __reserved2[3];
961 	union {
962 		struct {
963 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
964 			u32 __reserved3:24;
965 			u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
966 			u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
967 #else
968 			u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
969 			u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
970 			u32 __reserved3:24;
971 #endif
972 		};
973 		u64 i_bcnt;
974 	};
975 	union {
976 		struct {
977 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
978 			u32 __reserved4:24;
979 			u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
980 			u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
981 #else
982 			u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
983 			u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
984 			u32 __reserved4:24;
985 #endif
986 		};
987 		u64 a_bcnt;
988 	};
989 	union {
990 		u32 cscn_targ_swp[4];
991 		u8 __reserved5[16];
992 	};
993 } __rte_packed_end;
994 
995 struct __qm_mcr_querycongestion {
996 	u32 state[8];
997 };
998 
999 struct __rte_packed_begin qm_mcr_querycongestion {
1000 	u8 __reserved[30];
1001 	/* Access this struct using QM_MCR_QUERYCONGESTION() */
1002 	struct __qm_mcr_querycongestion state;
1003 } __rte_packed_end;
1004 struct __rte_packed_begin qm_mcr_querywq {
1005 	union {
1006 		u16 channel_wq; /* ignores wq (3 lsbits) */
1007 		struct __rte_packed_begin {
1008 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1009 			u16 id:13; /* qm_channel */
1010 			u16 __reserved:3;
1011 #else
1012 			u16 __reserved:3;
1013 			u16 id:13; /* qm_channel */
1014 #endif
1015 		} __rte_packed_end channel;
1016 	};
1017 	u8 __reserved[28];
1018 	u32 wq_len[8];
1019 } __rte_packed_end;
1020 
1021 struct __rte_packed_begin qm_mc_result {
1022 	u8 verb;
1023 	u8 result;
1024 	union {
1025 		struct qm_mcr_initfq initfq;
1026 		struct qm_mcr_queryfq queryfq;
1027 		struct qm_mcr_queryfq_np queryfq_np;
1028 		struct qm_mcr_alterfq alterfq;
1029 		struct qm_mcr_initcgr initcgr;
1030 		struct qm_mcr_cgrtestwrite cgrtestwrite;
1031 		struct qm_mcr_querycgr querycgr;
1032 		struct qm_mcr_querycongestion querycongestion;
1033 		struct qm_mcr_querywq querywq;
1034 	};
1035 } __rte_packed_end;
1036 
1037 #define QM_MCR_VERB_RRID		0x80
1038 #define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
1039 #define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
1040 #define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
1041 #define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
1042 #define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
1043 #define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
1044 #define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
1045 #define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
1046 #define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
1047 #define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
1048 #define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
1049 #define QM_MCR_RESULT_NULL		0x00
1050 #define QM_MCR_RESULT_OK		0xf0
1051 #define QM_MCR_RESULT_ERR_FQID		0xf1
1052 #define QM_MCR_RESULT_ERR_FQSTATE	0xf2
1053 #define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
1054 #define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
1055 #define QM_MCR_RESULT_PENDING		0xf8
1056 #define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
1057 #define QM_MCR_NP_STATE_FE		0x10
1058 #define QM_MCR_NP_STATE_R		0x08
1059 #define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
1060 #define QM_MCR_NP_STATE_OOS		0x00
1061 #define QM_MCR_NP_STATE_RETIRED		0x01
1062 #define QM_MCR_NP_STATE_TEN_SCHED	0x02
1063 #define QM_MCR_NP_STATE_TRU_SCHED	0x03
1064 #define QM_MCR_NP_STATE_PARKED		0x04
1065 #define QM_MCR_NP_STATE_ACTIVE		0x05
1066 #define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
1067 #define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
1068 #define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
1069 #define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
1070 #define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
1071 #define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
1072 #define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
1073 /* This extracts the state for congestion group 'n' from a query response.
1074  * Eg.
1075  *   u8 cgr = [...];
1076  *   struct qm_mc_result *res = [...];
1077  *   printf("congestion group %d congestion state: %d\n", cgr,
1078  *       QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
1079  */
1080 #define __CGR_WORD(num)		(num >> 5)
1081 #define __CGR_SHIFT(num)	(num & 0x1f)
1082 #define __CGR_NUM		(sizeof(struct __qm_mcr_querycongestion) << 3)
1083 static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
1084 					 u8 cgr)
1085 {
1086 	return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
1087 }
1088 
1089 	/* Portal and Frame Queues */
1090 /* Represents a managed portal */
1091 struct qman_portal;
1092 
1093 /*
1094  * This object type represents QMan frame queue descriptors (FQD), it is
1095  * cacheline-aligned, and initialised by qman_create_fq(). The structure is
1096  * defined further down.
1097  */
1098 struct qman_fq;
1099 
1100 /*
1101  * This object type represents a QMan congestion group, it is defined further
1102  * down.
1103  */
1104 struct qman_cgr;
1105 
1106 /*
1107  * This enum, and the callback type that returns it, are used when handling
1108  * dequeued frames via DQRR. Note that for "null" callbacks registered with the
1109  * portal object (for handling dequeues that do not demux because context_b is
1110  * NULL), the return value *MUST* be qman_cb_dqrr_consume.
1111  */
1112 enum qman_cb_dqrr_result {
1113 	/* DQRR entry can be consumed */
1114 	qman_cb_dqrr_consume,
1115 	/* Like _consume, but requests parking - FQ must be held-active */
1116 	qman_cb_dqrr_park,
1117 	/* Does not consume, for DCA mode only. This allows out-of-order
1118 	 * consumes by explicit calls to qman_dca() and/or the use of implicit
1119 	 * DCA via EQCR entries.
1120 	 */
1121 	qman_cb_dqrr_defer,
1122 	/*
1123 	 * Stop processing without consuming this ring entry. Exits the current
1124 	 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
1125 	 * an interrupt handler, the callback would typically call
1126 	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
1127 	 * otherwise the interrupt will reassert immediately.
1128 	 */
1129 	qman_cb_dqrr_stop,
1130 	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
1131 	qman_cb_dqrr_consume_stop
1132 };
1133 
1134 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
1135 					struct qman_fq *fq,
1136 					const struct qm_dqrr_entry *dqrr);
1137 
1138 typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
1139 					struct qman_portal *qm,
1140 					struct qman_fq *fq,
1141 					const struct qm_dqrr_entry *dqrr,
1142 					void **bd);
1143 
1144 /* This callback type is used when handling buffers in dpdk pull mode */
1145 typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
1146 					struct qm_dqrr_entry **dqrr,
1147 					void **bufs,
1148 					int num_bufs);
1149 
1150 typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
1151 
1152 /*
1153  * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
1154  * are always consumed after the callback returns.
1155  */
1156 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
1157 				const struct qm_mr_entry *msg);
1158 
1159 /* This callback type is used when handling DCP ERNs */
1160 typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
1161 				const struct qm_mr_entry *msg);
1162 
1163 /* This callback function will be used to free mbufs of ERN */
1164 typedef uint16_t (*qman_cb_free_mbuf)(const struct qm_fd *fd);
1165 
1166 /*
1167  * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
1168  * held-active + held-suspended are just "sched". Things like "retired" will not
1169  * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
1170  * then, to indicate it's completing and to gate attempts to retry the retire
1171  * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
1172  * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
1173  * index rather than the FQ that ring entry corresponds to), so repeated park
1174  * commands are allowed (if you're silly enough to try) but won't change FQ
1175  * state, and the resulting park notifications move FQs from "sched" to
1176  * "parked".
1177  */
1178 enum qman_fq_state {
1179 	qman_fq_state_oos,
1180 	qman_fq_state_parked,
1181 	qman_fq_state_sched,
1182 	qman_fq_state_retired
1183 };
1184 
1185 
1186 /*
1187  * Frame queue objects (struct qman_fq) are stored within memory passed to
1188  * qman_create_fq(), as this allows stashing of caller-provided demux callback
1189  * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
1190  * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
1191  * they should;
1192  *
1193  * (a) extend the qman_fq structure with their state; eg.
1194  *
1195  *     // myfq is allocated and driver_fq callbacks filled in;
1196  *     struct my_fq {
1197  *	   struct qman_fq base;
1198  *	   int an_extra_field;
1199  *	   [ ... add other fields to be associated with each FQ ...]
1200  *     } *myfq = some_my_fq_allocator();
1201  *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
1202  *
1203  *     // in a dequeue callback, access extra fields from 'fq' via a cast;
1204  *     struct my_fq *myfq = (struct my_fq *)fq;
1205  *     do_something_with(myfq->an_extra_field);
1206  *     [...]
1207  *
1208  * (b) when and if configuring the FQ for context stashing, specify how ever
1209  *     many cachelines are required to stash 'struct my_fq', to accelerate not
1210  *     only the QMan driver but the callback as well.
1211  */
1212 
1213 struct qman_fq_cb {
1214 	union { /* for dequeued frames */
1215 		qman_dpdk_cb_dqrr dqrr_dpdk_cb;
1216 		qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
1217 		qman_cb_dqrr dqrr;
1218 	};
1219 	qman_dpdk_cb_prepare dqrr_prepare;
1220 	qman_cb_mr ern;		/* for s/w ERNs */
1221 	qman_cb_mr fqs;		/* frame-queue state changes*/
1222 };
1223 
1224 struct qman_fq {
1225 	/* Caller of qman_create_fq() provides these demux callbacks */
1226 	struct qman_fq_cb cb;
1227 
1228 	u32 fqid_le;
1229 	u32 fqid;
1230 
1231 	int q_fd;
1232 	u16 ch_id;
1233 	int8_t vsp_id;
1234 	u8 cgr_groupid;
1235 	u8 is_static:4;
1236 	u8 qp_initialized:4;
1237 
1238 	/* DPDK Interface */
1239 	void *dpaa_intf;
1240 	/*to store tx_conf_queue corresponding to tx_queue*/
1241 	struct qman_fq *tx_conf_queue;
1242 
1243 	struct rte_event ev;
1244 	/* affined portal in case of static queue */
1245 	struct qman_portal *qp;
1246 	struct dpaa_bp_info *bp_array;
1247 
1248 	volatile unsigned long flags;
1249 
1250 	enum qman_fq_state state;
1251 	spinlock_t fqlock;
1252 
1253 	struct rb_node node;
1254 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1255 	void **qman_fq_lookup_table;
1256 	u32 key;
1257 #endif
1258 	u16 nb_desc;
1259 	u16 resv;
1260 	u64 offloads;
1261 };
1262 
1263 /*
1264  * This callback type is used when handling congestion group entry/exit.
1265  * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
1266  */
1267 typedef void (*qman_cb_cgr)(struct qman_portal *qm,
1268 			    struct qman_cgr *cgr, int congested);
1269 
1270 struct qman_cgr {
1271 	/* Set these prior to qman_create_cgr() */
1272 	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
1273 	qman_cb_cgr cb;
1274 	/* These are private to the driver */
1275 	u16 chan; /* portal channel this object is created on */
1276 	struct list_head node;
1277 };
1278 
1279 /* Flags to qman_create_fq() */
1280 #define QMAN_FQ_FLAG_NO_ENQUEUE      0x00000001 /* can't enqueue */
1281 #define QMAN_FQ_FLAG_NO_MODIFY       0x00000002 /* can only enqueue */
1282 #define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
1283 #define QMAN_FQ_FLAG_LOCKED          0x00000008 /* multi-core locking */
1284 #define QMAN_FQ_FLAG_AS_IS           0x00000010 /* query h/w state */
1285 #define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
1286 
1287 /* Flags to qman_destroy_fq() */
1288 #define QMAN_FQ_DESTROY_PARKED       0x00000001 /* FQ can be parked or OOS */
1289 
1290 /* Flags from qman_fq_state() */
1291 #define QMAN_FQ_STATE_CHANGING       0x80000000 /* 'state' is changing */
1292 #define QMAN_FQ_STATE_NE             0x40000000 /* retired FQ isn't empty */
1293 #define QMAN_FQ_STATE_ORL            0x20000000 /* retired FQ has ORL */
1294 #define QMAN_FQ_STATE_BLOCKOOS       0xe0000000 /* if any are set, no OOS */
1295 #define QMAN_FQ_STATE_CGR_EN         0x10000000 /* CGR enabled */
1296 #define QMAN_FQ_STATE_VDQCR          0x08000000 /* being volatile dequeued */
1297 
1298 /* Flags to qman_init_fq() */
1299 #define QMAN_INITFQ_FLAG_SCHED       0x00000001 /* schedule rather than park */
1300 #define QMAN_INITFQ_FLAG_LOCAL       0x00000004 /* set dest portal */
1301 
1302 /* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
1303  * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
1304  * any change here should be audited in PME.)
1305  */
1306 #define QMAN_ENQUEUE_FLAG_WATCH_CGR  0x00080000 /* watch congestion state */
1307 #define QMAN_ENQUEUE_FLAG_DCA        0x00008000 /* perform enqueue-DCA */
1308 #define QMAN_ENQUEUE_FLAG_DCA_PARK   0x00004000 /* If DCA, requests park */
1309 #define QMAN_ENQUEUE_FLAG_DCA_PTR(p)		/* If DCA, p is DQRR entry */ \
1310 		(((u32)(p) << 2) & 0x00000f00)
1311 #define QMAN_ENQUEUE_FLAG_C_GREEN    0x00000000 /* choose one C_*** flag */
1312 #define QMAN_ENQUEUE_FLAG_C_YELLOW   0x00000008
1313 #define QMAN_ENQUEUE_FLAG_C_RED      0x00000010
1314 #define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
1315 /* For the ORP-specific qman_enqueue_orp() variant;
1316  * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
1317  *   of a frame.
1318  */
1319 #define QMAN_ENQUEUE_FLAG_NLIS       0x01000000
1320 /* - this flag performs no enqueue but fills in an ORP sequence number that
1321  *   would otherwise block it (eg. if a frame has been dropped).
1322  */
1323 #define QMAN_ENQUEUE_FLAG_HOLE       0x02000000
1324 /* - this flag performs no enqueue but advances NESN to the given sequence
1325  *   number.
1326  */
1327 #define QMAN_ENQUEUE_FLAG_NESN       0x04000000
1328 
1329 /* Flags to qman_modify_cgr() */
1330 #define QMAN_CGR_FLAG_USE_INIT       0x00000001
1331 #define QMAN_CGR_MODE_FRAME          0x00000001
1332 
1333 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1334 __rte_internal
1335 void qman_set_fq_lookup_table(void **table);
1336 #endif
1337 
1338 /**
1339  * qman_get_portal_index - get portal configuration index
1340  */
1341 int qman_get_portal_index(void);
1342 
1343 __rte_internal
1344 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1345 			void **bufs);
1346 
1347 /**
1348  * qman_irqsource_add - add processing sources to be interrupt-driven
1349  * @bits: bitmask of QM_PIRQ_**I processing sources
1350  *
1351  * Adds processing sources that should be interrupt-driven (rather than
1352  * processed via qman_poll_***() functions). Returns zero for success, or
1353  * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
1354  */
1355 __rte_internal
1356 int qman_irqsource_add(u32 bits);
1357 
1358 /**
1359  * qman_fq_portal_irqsource_add - similar to qman_irqsource_add, but it
1360  * takes portal (fq specific) as input rather than using the thread affined
1361  * portal.
1362  */
1363 __rte_internal
1364 int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits);
1365 
1366 /**
1367  * qman_irqsource_remove - remove processing sources from being interrupt-driven
1368  * @bits: bitmask of QM_PIRQ_**I processing sources
1369  *
1370  * Removes processing sources from being interrupt-driven, so that they will
1371  * instead be processed via qman_poll_***() functions. Returns zero for success,
1372  * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
1373  */
1374 __rte_internal
1375 int qman_irqsource_remove(u32 bits);
1376 
1377 /**
1378  * qman_fq_portal_irqsource_remove - similar to qman_irqsource_remove, but it
1379  * takes portal (fq specific) as input rather than using the thread affined
1380  * portal.
1381  */
1382 __rte_internal
1383 int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
1384 
1385 /**
1386  * qman_affine_channel - return the channel ID of an portal
1387  * @cpu: the cpu whose affine portal is the subject of the query
1388  *
1389  * If @cpu is -1, the affine portal for the current CPU will be used. It is a
1390  * bug to call this function for any value of @cpu (other than -1) that is not a
1391  * member of the cpu mask.
1392  */
1393 u16 qman_affine_channel(int cpu);
1394 
1395 __rte_internal
1396 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1397 				 void **bufs, struct qman_portal *q);
1398 
1399 /**
1400  * qman_set_vdq - Issue a volatile dequeue command
1401  * @fq: Frame Queue on which the volatile dequeue command is issued
1402  * @num: Number of Frames requested for volatile dequeue
1403  * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
1404  *
1405  * This function will issue a volatile dequeue command to the QMAN.
1406  */
1407 __rte_internal
1408 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
1409 
1410 /**
1411  * qman_dequeue - Get the DQRR entry after volatile dequeue command
1412  * @fq: Frame Queue on which the volatile dequeue command is issued
1413  *
1414  * This function will return the DQRR entry after a volatile dequeue command
1415  * is issued. It will keep returning NULL until there is no packet available on
1416  * the DQRR.
1417  */
1418 __rte_internal
1419 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
1420 
1421 /**
1422  * qman_dqrr_consume - Consume the DQRR entry after volatile dequeue
1423  * @fq: Frame Queue on which the volatile dequeue command is issued
1424  * @dq: DQRR entry to consume. This is the one which is provided by the
1425  *    'qbman_dequeue' command.
1426  *
1427  * This will consume the DQRR enrey and make it available for next volatile
1428  * dequeue.
1429  */
1430 __rte_internal
1431 void qman_dqrr_consume(struct qman_fq *fq,
1432 		       struct qm_dqrr_entry *dq);
1433 
1434 /**
1435  * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
1436  *
1437  * Disables DQRR processing of the portal. This is reference-counted, so
1438  * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1439  * truly re-enable dequeuing.
1440  */
1441 void qman_stop_dequeues(void);
1442 
1443 /**
1444  * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
1445  *
1446  * Enables DQRR processing of the portal. This is reference-counted, so
1447  * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1448  * truly re-enable dequeuing.
1449  */
1450 void qman_start_dequeues(void);
1451 
1452 /**
1453  * qman_static_dequeue_add - Add pool channels to the portal SDQCR
1454  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1455  *
1456  * Adds a set of pool channels to the portal's static dequeue command register
1457  * (SDQCR). The requested pools are limited to those the portal has dequeue
1458  * access to.
1459  */
1460 __rte_internal
1461 void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
1462 
1463 /**
1464  * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
1465  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1466  *
1467  * Removes a set of pool channels from the portal's static dequeue command
1468  * register (SDQCR). The requested pools are limited to those the portal has
1469  * dequeue access to.
1470  */
1471 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
1472 
1473 /**
1474  * qman_static_dequeue_get - return the portal's current SDQCR
1475  *
1476  * Returns the portal's current static dequeue command register (SDQCR). The
1477  * entire register is returned, so if only the currently-enabled pool channels
1478  * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
1479  */
1480 u32 qman_static_dequeue_get(struct qman_portal *qp);
1481 
1482 /**
1483  * qman_dca - Perform a Discrete Consumption Acknowledgment
1484  * @dq: the DQRR entry to be consumed
1485  * @park_request: indicates whether the held-active @fq should be parked
1486  *
1487  * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1488  * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1489  * does not take a 'portal' argument but implies the core affine portal from the
1490  * cpu that is currently executing the function. For reasons of locking, this
1491  * function must be called from the same CPU as that which processed the DQRR
1492  * entry in the first place.
1493  */
1494 void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
1495 
1496 /**
1497  * qman_dca_index - Perform a Discrete Consumption Acknowledgment
1498  * @index: the DQRR index to be consumed
1499  * @park_request: indicates whether the held-active @fq should be parked
1500  *
1501  * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1502  * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1503  * does not take a 'portal' argument but implies the core affine portal from the
1504  * cpu that is currently executing the function. For reasons of locking, this
1505  * function must be called from the same CPU as that which processed the DQRR
1506  * entry in the first place.
1507  */
1508 __rte_internal
1509 void qman_dca_index(u8 index, int park_request);
1510 
1511 /**
1512  * qman_eqcr_is_empty - Determine if portal's EQCR is empty
1513  *
1514  * For use in situations where a cpu-affine caller needs to determine when all
1515  * enqueues for the local portal have been processed by Qman but can't use the
1516  * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
1517  * The function forces tracking of EQCR consumption (which normally doesn't
1518  * happen until enqueue processing needs to find space to put new enqueue
1519  * commands), and returns zero if the ring still has unprocessed entries,
1520  * non-zero if it is empty.
1521  */
1522 int qman_eqcr_is_empty(void);
1523 
1524 /**
1525  * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
1526  * @handler: callback for processing DCP ERNs
1527  * @affine: whether this handler is specific to the locally affine portal
1528  *
1529  * If a hardware block's interface to Qman (ie. its direct-connect portal, or
1530  * DCP) is configured not to receive enqueue rejections, then any enqueues
1531  * through that DCP that are rejected will be sent to a given software portal.
1532  * If @affine is non-zero, then this handler will only be used for DCP ERNs
1533  * received on the portal affine to the current CPU. If multiple CPUs share a
1534  * portal and they all call this function, they will be setting the handler for
1535  * the same portal! If @affine is zero, then this handler will be global to all
1536  * portals handled by this instance of the driver. Only those portals that do
1537  * not have their own affine handler will use the global handler.
1538  */
1539 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
1540 
1541 	/* FQ management */
1542 	/* ------------- */
1543 /**
1544  * qman_create_fq - Allocates a FQ
1545  * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
1546  * @flags: bit-mask of QMAN_FQ_FLAG_*** options
1547  * @fq: memory for storing the 'fq', with callbacks filled in
1548  *
1549  * Creates a frame queue object for the given @fqid, unless the
1550  * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
1551  * dynamically allocated (or the function fails if none are available). Once
1552  * created, the caller should not touch the memory at 'fq' except as extended to
1553  * adjacent memory for user-defined fields (see the definition of "struct
1554  * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
1555  * pre-existing frame-queues that aren't to be otherwise interfered with, it
1556  * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
1557  * causes the driver to honour any contextB modifications requested in the
1558  * qm_init_fq() API, as this indicates the frame queue will be consumed by a
1559  * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
1560  * software portals, the contextB field is controlled by the driver and can't be
1561  * modified by the caller. If the AS_IS flag is specified, management commands
1562  * will be used on portal @p to query state for frame queue @fqid and construct
1563  * a frame queue object based on that, rather than assuming/requiring that it be
1564  * Out of Service.
1565  */
1566 __rte_internal
1567 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
1568 
1569 /**
1570  * qman_destroy_fq - Deallocates a FQ
1571  * @fq: the frame queue object to release
1572  * @flags: bit-mask of QMAN_FQ_FREE_*** options
1573  *
1574  * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
1575  * not deallocated but the caller regains ownership, to do with as desired. The
1576  * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
1577  * is specified, in which case it may also be in the 'parked' state.
1578  */
1579 void qman_destroy_fq(struct qman_fq *fq, u32 flags);
1580 
1581 /**
1582  * qman_fq_fqid - Queries the frame queue ID of a FQ object
1583  * @fq: the frame queue object to query
1584  */
1585 __rte_internal
1586 u32 qman_fq_fqid(struct qman_fq *fq);
1587 
1588 /**
1589  * qman_fq_state - Queries the state of a FQ object
1590  * @fq: the frame queue object to query
1591  * @state: pointer to state enum to return the FQ scheduling state
1592  * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
1593  *
1594  * Queries the state of the FQ object, without performing any h/w commands.
1595  * This captures the state, as seen by the driver, at the time the function
1596  * executes.
1597  */
1598 __rte_internal
1599 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
1600 
1601 /**
1602  * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
1603  * @fq: the frame queue object to modify, must be 'parked' or new.
1604  * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
1605  * @opts: the FQ-modification settings, as defined in the low-level API
1606  *
1607  * The @opts parameter comes from the low-level portal API. Select
1608  * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
1609  * rather than parked. NB, @opts can be NULL.
1610  *
1611  * Note that some fields and options within @opts may be ignored or overwritten
1612  * by the driver;
1613  * 1. the 'count' and 'fqid' fields are always ignored (this operation only
1614  * affects one frame queue: @fq).
1615  * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
1616  * 'fqd' structure's 'context_b' field are sometimes overwritten;
1617  *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
1618  *     initialised to a value used by the driver for demux.
1619  *   - if context_b is initialised for demux, so is context_a in case stashing
1620  *     is requested (see item 4).
1621  * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
1622  * objects.)
1623  * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
1624  * 'dest::channel' field will be overwritten to match the portal used to issue
1625  * the command. If the WE_DESTWQ write-enable bit had already been set by the
1626  * caller, the channel workqueue will be left as-is, otherwise the write-enable
1627  * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
1628  * isn't set, the destination channel/workqueue fields and the write-enable bit
1629  * are left as-is.
1630  * 4. if the driver overwrites context_a/b for demux, then if
1631  * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
1632  * context_a.address fields and will leave the stashing fields provided by the
1633  * user alone, otherwise it will zero out the context_a.stashing fields.
1634  */
1635 __rte_internal
1636 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1637 
1638 /**
1639  * qman_schedule_fq - Schedules a FQ
1640  * @fq: the frame queue object to schedule, must be 'parked'
1641  *
1642  * Schedules the frame queue, which must be Parked, which takes it to
1643  * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
1644  */
1645 int qman_schedule_fq(struct qman_fq *fq);
1646 
1647 /**
1648  * qman_retire_fq - Retires a FQ
1649  * @fq: the frame queue object to retire
1650  * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
1651  *
1652  * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
1653  * the retirement was started asynchronously, otherwise it returns negative for
1654  * failure. When this function returns zero, @flags is set to indicate whether
1655  * the retired FQ is empty and/or whether it has any ORL fragments (to show up
1656  * as ERNs). Otherwise the corresponding flags will be known when a subsequent
1657  * FQRN message shows up on the portal's message ring.
1658  *
1659  * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
1660  * Active state), the completion will be via the message ring as a FQRN - but
1661  * the corresponding callback may occur before this function returns!! Ie. the
1662  * caller should be prepared to accept the callback as the function is called,
1663  * not only once it has returned.
1664  */
1665 __rte_internal
1666 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1667 
1668 /**
1669  * qman_oos_fq - Puts a FQ "out of service"
1670  * @fq: the frame queue object to be put out-of-service, must be 'retired'
1671  *
1672  * The frame queue must be retired and empty, and if any order restoration list
1673  * was released as ERNs at the time of retirement, they must all be consumed.
1674  */
1675 __rte_internal
1676 int qman_oos_fq(struct qman_fq *fq);
1677 
1678 /**
1679  * qman_fq_flow_control - Set the XON/XOFF state of a FQ
1680  * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
1681  * or 'retired' or 'parked' state
1682  * @xon: boolean to set fq in XON or XOFF state
1683  *
1684  * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
1685  * otherwise the IFSI interrupt will be asserted.
1686  */
1687 int qman_fq_flow_control(struct qman_fq *fq, int xon);
1688 
1689 /**
1690  * qman_query_fq - Queries FQD fields (via h/w query command)
1691  * @fq: the frame queue object to be queried
1692  * @fqd: storage for the queried FQD fields
1693  */
1694 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
1695 
1696 /**
1697  * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
1698  * if packets are in the frame queue. If there are no packets on frame
1699  * queue '0' is returned.
1700  * @fq: the frame queue object to be queried
1701  */
1702 int qman_query_fq_has_pkts(struct qman_fq *fq);
1703 
1704 /**
1705  * qman_query_fq_np - Queries non-programmable FQD fields
1706  * @fq: the frame queue object to be queried
1707  * @np: storage for the queried FQD fields
1708  */
1709 __rte_internal
1710 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1711 
1712 /**
1713  * qman_query_fq_frmcnt - Queries fq frame count
1714  * @fq: the frame queue object to be queried
1715  * @frm_cnt: number of frames in the queue
1716  */
1717 __rte_internal
1718 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
1719 
1720 /**
1721  * qman_query_wq - Queries work queue lengths
1722  * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
1723  *		to this software portal. Otherwise, query length of WQs in a
1724  *		channel  specified in wq.
1725  * @wq: storage for the queried WQs lengths. Also specified the channel to
1726  *	to query if query_dedicated is zero.
1727  */
1728 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
1729 
1730 /**
1731  * qman_volatile_dequeue - Issue a volatile dequeue command
1732  * @fq: the frame queue object to dequeue from
1733  * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1734  * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1735  *
1736  * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1737  * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1738  * the VDQCR is already in use, otherwise returns non-zero for failure. If
1739  * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1740  * the VDQCR command has finished executing (ie. once the callback for the last
1741  * DQRR entry resulting from the VDQCR command has been called). If not using
1742  * the FINISH flag, completion can be determined either by detecting the
1743  * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1744  * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
1745  * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
1746  * "flags" retrieved from qman_fq_state().
1747  */
1748 __rte_internal
1749 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1750 
1751 /**
1752  * qman_enqueue - Enqueue a frame to a frame queue
1753  * @fq: the frame queue object to enqueue to
1754  * @fd: a descriptor of the frame to be enqueued
1755  * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1756  *
1757  * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
1758  * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
1759  * field is ignored. The return value is non-zero on error, such as ring full
1760  * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
1761  * specified), etc. If the ring is full and FLAG_WAIT is specified, this
1762  * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
1763  * interrupt will assert when Qman consumes the EQCR entry (subject to "status
1764  * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
1765  * perform an implied "discrete consumption acknowledgment" on the dequeue
1766  * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
1767  * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
1768  * this implicit DCA can delay the release of a "held active" frame queue
1769  * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
1770  * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
1771  * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
1772  * acknowledgment should "park request" the "held active" frame queue. Ie.
1773  * when the portal eventually releases that frame queue, it will be left in the
1774  * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
1775  * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
1776  * is requested, and the FQ is a member of a congestion group, then this
1777  * function returns -EAGAIN if the congestion group is currently congested.
1778  * Note, this does not eliminate ERNs, as the async interface means we can be
1779  * sending enqueue commands to an un-congested FQ that becomes congested before
1780  * the enqueue commands are processed, but it does minimise needless thrashing
1781  * of an already busy hardware resource by throttling many of the to-be-dropped
1782  * enqueues "at the source".
1783  */
1784 __rte_internal
1785 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
1786 
1787 __rte_internal
1788 int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
1789 		       int frames_to_send);
1790 
1791 /**
1792  * qman_ern_poll_free - Polling on MR and calling a callback function to free
1793  * mbufs when SW ERNs received.
1794  */
1795 __rte_internal
1796 void qman_ern_poll_free(void);
1797 
1798 /**
1799  * qman_ern_register_cb - Register a callback function to free buffers.
1800  */
1801 __rte_internal
1802 void qman_ern_register_cb(qman_cb_free_mbuf cb);
1803 
1804 /**
1805  * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
1806  * queues.
1807  * @fq[]: Array of frame queue objects to enqueue to
1808  * @fd: pointer to first descriptor of frame to be enqueued
1809  * @frames_to_send: number of frames to be sent.
1810  *
1811  * This API is similar to qman_enqueue_multi(), but it takes fd which needs
1812  * to be processed by different frame queues.
1813  */
1814 __rte_internal
1815 int
1816 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
1817 		      u32 *flags, int frames_to_send);
1818 
1819 typedef int (*qman_cb_precommit) (void *arg);
1820 
1821 /**
1822  * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
1823  * @fq: the frame queue object to enqueue to
1824  * @fd: a descriptor of the frame to be enqueued
1825  * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1826  * @orp: the frame queue object used as an order restoration point.
1827  * @orp_seqnum: the sequence number of this frame in the order restoration path
1828  *
1829  * Similar to qman_enqueue(), but with the addition of an Order Restoration
1830  * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
1831  * enqueue operation to employ order restoration. Each frame queue object acts
1832  * as an Order Definition Point (ODP) by providing each frame dequeued from it
1833  * with an incrementing sequence number, this value is generally ignored unless
1834  * that sequence of dequeued frames will need order restoration later. Each
1835  * frame queue object also encapsulates an Order Restoration Point (ORP), which
1836  * is a re-assembly context for re-ordering frames relative to their sequence
1837  * numbers as they are enqueued. The ORP does not have to be within the frame
1838  * queue that receives the enqueued frame, in fact it is usually the frame
1839  * queue from which the frames were originally dequeued. For the purposes of
1840  * order restoration, multiple frames (or "fragments") can be enqueued for a
1841  * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
1842  * enqueues except the final fragment of a given sequence number. Ordering
1843  * between sequence numbers is guaranteed, even if fragments of different
1844  * sequence numbers are interlaced with one another. Fragments of the same
1845  * sequence number will retain the order in which they are enqueued. If no
1846  * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
1847  * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
1848  * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
1849  * sequence number should become the ORP's "Next Expected Sequence Number".
1850  *
1851  * Side note: a frame queue object can be used purely as an ORP, without
1852  * carrying any frames at all. Care should be taken not to deallocate a frame
1853  * queue object that is being actively used as an ORP, as a future allocation
1854  * of the frame queue object may start using the internal ORP before the
1855  * previous use has finished.
1856  */
1857 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
1858 		     struct qman_fq *orp, u16 orp_seqnum);
1859 
1860 /**
1861  * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
1862  * @result: is set by the API to the base FQID of the allocated range
1863  * @count: the number of FQIDs required
1864  * @align: required alignment of the allocated range
1865  * @partial: non-zero if the API can return fewer than @count FQIDs
1866  *
1867  * Returns the number of frame queues allocated, or a negative error code. If
1868  * @partial is non zero, the allocation request may return a smaller range of
1869  * FQs than requested (though alignment will be as requested). If @partial is
1870  * zero, the return value will either be 'count' or negative.
1871  */
1872 __rte_internal
1873 int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
1874 static inline int qman_alloc_fqid(u32 *result)
1875 {
1876 	int ret = qman_alloc_fqid_range(result, 1, 0, 0);
1877 
1878 	return (ret > 0) ? 0 : ret;
1879 }
1880 
1881 /**
1882  * qman_release_fqid_range - Release the specified range of frame queue IDs
1883  * @fqid: the base FQID of the range to deallocate
1884  * @count: the number of FQIDs in the range
1885  *
1886  * This function can also be used to seed the allocator with ranges of FQIDs
1887  * that it can subsequently allocate from.
1888  */
1889 void qman_release_fqid_range(u32 fqid, unsigned int count);
1890 static inline void qman_release_fqid(u32 fqid)
1891 {
1892 	qman_release_fqid_range(fqid, 1);
1893 }
1894 
1895 void qman_seed_fqid_range(u32 fqid, unsigned int count);
1896 
1897 int qman_shutdown_fq(u32 fqid);
1898 
1899 /**
1900  * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
1901  * @fqid: the base FQID of the range to deallocate
1902  * @count: the number of FQIDs in the range
1903  */
1904 __rte_internal
1905 int qman_reserve_fqid_range(u32 fqid, unsigned int count);
1906 static inline int qman_reserve_fqid(u32 fqid)
1907 {
1908 	return qman_reserve_fqid_range(fqid, 1);
1909 }
1910 
1911 /* Pool-channel management */
1912 /**
1913  * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
1914  * @result: is set by the API to the base pool-channel ID of the allocated range
1915  * @count: the number of pool-channel IDs required
1916  * @align: required alignment of the allocated range
1917  * @partial: non-zero if the API can return fewer than @count
1918  *
1919  * Returns the number of pool-channel IDs allocated, or a negative error code.
1920  * If @partial is non zero, the allocation request may return a smaller range of
1921  * than requested (though alignment will be as requested). If @partial is zero,
1922  * the return value will either be 'count' or negative.
1923  */
1924 __rte_internal
1925 int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
1926 static inline int qman_alloc_pool(u32 *result)
1927 {
1928 	int ret = qman_alloc_pool_range(result, 1, 0, 0);
1929 
1930 	return (ret > 0) ? 0 : ret;
1931 }
1932 
1933 /**
1934  * qman_release_pool_range - Release the specified range of pool-channel IDs
1935  * @id: the base pool-channel ID of the range to deallocate
1936  * @count: the number of pool-channel IDs in the range
1937  */
1938 void qman_release_pool_range(u32 id, unsigned int count);
1939 static inline void qman_release_pool(u32 id)
1940 {
1941 	qman_release_pool_range(id, 1);
1942 }
1943 
1944 /**
1945  * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
1946  * @id: the base pool-channel ID of the range to reserve
1947  * @count: the number of pool-channel IDs in the range
1948  */
1949 int qman_reserve_pool_range(u32 id, unsigned int count);
1950 static inline int qman_reserve_pool(u32 id)
1951 {
1952 	return qman_reserve_pool_range(id, 1);
1953 }
1954 
1955 void qman_seed_pool_range(u32 id, unsigned int count);
1956 
1957 	/* CGR management */
1958 	/* -------------- */
1959 /**
1960  * qman_create_cgr - Register a congestion group object
1961  * @cgr: the 'cgr' object, with fields filled in
1962  * @flags: QMAN_CGR_FLAG_* values
1963  * @opts: optional state of CGR settings
1964  *
1965  * Registers this object to receiving congestion entry/exit callbacks on the
1966  * portal affine to the cpu portal on which this API is executed. If opts is
1967  * NULL then only the callback (cgr->cb) function is registered. If @flags
1968  * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1969  * any unspecified parameters) will be used rather than a modify hw hardware
1970  * (which only modifies the specified parameters).
1971  */
1972 __rte_internal
1973 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1974 		    struct qm_mcc_initcgr *opts);
1975 
1976 /**
1977  * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
1978  * @cgr: the 'cgr' object, with fields filled in
1979  * @flags: QMAN_CGR_FLAG_* values
1980  * @dcp_portal: the DCP portal to which the cgr object is registered.
1981  * @opts: optional state of CGR settings
1982  *
1983  */
1984 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
1985 			   struct qm_mcc_initcgr *opts);
1986 
1987 /**
1988  * qman_delete_cgr - Deregisters a congestion group object
1989  * @cgr: the 'cgr' object to deregister
1990  *
1991  * "Unplugs" this CGR object from the portal affine to the cpu on which this API
1992  * is executed. This must be executed on the same affine portal on which it was
1993  * created.
1994  */
1995 __rte_internal
1996 int qman_delete_cgr(struct qman_cgr *cgr);
1997 
1998 /**
1999  * qman_modify_cgr - Modify CGR fields
2000  * @cgr: the 'cgr' object to modify
2001  * @flags: QMAN_CGR_FLAG_* values
2002  * @opts: the CGR-modification settings
2003  *
2004  * The @opts parameter comes from the low-level portal API, and can be NULL.
2005  * Note that some fields and options within @opts may be ignored or overwritten
2006  * by the driver, in particular the 'cgrid' field is ignored (this operation
2007  * only affects the given CGR object). If @flags contains
2008  * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
2009  * unspecified parameters) will be used rather than a modify hw hardware (which
2010  * only modifies the specified parameters).
2011  */
2012 __rte_internal
2013 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2014 		    struct qm_mcc_initcgr *opts);
2015 
2016 /**
2017  * qman_query_cgr - Queries CGR fields
2018  * @cgr: the 'cgr' object to query
2019  * @result: storage for the queried congestion group record
2020  */
2021 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
2022 
2023 /**
2024  * qman_query_congestion - Queries the state of all congestion groups
2025  * @congestion: storage for the queried state of all congestion groups
2026  */
2027 int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
2028 
2029 /**
2030  * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
2031  * @result: is set by the API to the base CGR ID of the allocated range
2032  * @count: the number of CGR IDs required
2033  * @align: required alignment of the allocated range
2034  * @partial: non-zero if the API can return fewer than @count
2035  *
2036  * Returns the number of CGR IDs allocated, or a negative error code.
2037  * If @partial is non zero, the allocation request may return a smaller range of
2038  * than requested (though alignment will be as requested). If @partial is zero,
2039  * the return value will either be 'count' or negative.
2040  */
2041 __rte_internal
2042 int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
2043 static inline int qman_alloc_cgrid(u32 *result)
2044 {
2045 	int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
2046 
2047 	return (ret > 0) ? 0 : ret;
2048 }
2049 
2050 /**
2051  * qman_release_cgrid_range - Release the specified range of CGR IDs
2052  * @id: the base CGR ID of the range to deallocate
2053  * @count: the number of CGR IDs in the range
2054  */
2055 __rte_internal
2056 void qman_release_cgrid_range(u32 id, unsigned int count);
2057 static inline void qman_release_cgrid(u32 id)
2058 {
2059 	qman_release_cgrid_range(id, 1);
2060 }
2061 
2062 /**
2063  * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
2064  * @id: the base CGR ID of the range to reserve
2065  * @count: the number of CGR IDs in the range
2066  */
2067 int qman_reserve_cgrid_range(u32 id, unsigned int count);
2068 static inline int qman_reserve_cgrid(u32 id)
2069 {
2070 	return qman_reserve_cgrid_range(id, 1);
2071 }
2072 
2073 void qman_seed_cgrid_range(u32 id, unsigned int count);
2074 
2075 	/* Helpers */
2076 	/* ------- */
2077 /**
2078  * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
2079  * @fqid: the FQID that will be initialised by other s/w
2080  *
2081  * In many situations, a FQID is provided for communication between s/w
2082  * entities, and whilst the consumer is responsible for initialising and
2083  * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
2084  * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
2085  *     qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
2086  * However, data can not be enqueued to the FQ until it is initialised out of
2087  * the OOS state - this function polls for that condition. It is particularly
2088  * useful for users of IPC functions - each endpoint's Rx FQ is the other
2089  * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
2090  * and then use this API on the (NO_MODIFY) Tx FQ object in order to
2091  * synchronise. The function returns zero for success, +1 if the FQ is still in
2092  * the OOS state, or negative if there was an error.
2093  */
2094 static inline int qman_poll_fq_for_init(struct qman_fq *fq)
2095 {
2096 	struct qm_mcr_queryfq_np np;
2097 	int err;
2098 
2099 	err = qman_query_fq_np(fq, &np);
2100 	if (err)
2101 		return err;
2102 	if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
2103 		return 1;
2104 	return 0;
2105 }
2106 
2107 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
2108 #define cpu_to_hw_sg(x)
2109 #define hw_sg_to_cpu(x)
2110 #else
2111 #define cpu_to_hw_sg(x)  __cpu_to_hw_sg(x)
2112 #define hw_sg_to_cpu(x)  __hw_sg_to_cpu(x)
2113 
2114 static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
2115 {
2116 	sgentry->opaque = cpu_to_be64(sgentry->opaque);
2117 	sgentry->val = cpu_to_be32(sgentry->val);
2118 	sgentry->val_off = cpu_to_be16(sgentry->val_off);
2119 }
2120 
2121 static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
2122 {
2123 	sgentry->opaque = be64_to_cpu(sgentry->opaque);
2124 	sgentry->val = be32_to_cpu(sgentry->val);
2125 	sgentry->val_off = be16_to_cpu(sgentry->val_off);
2126 }
2127 #endif
2128 
2129 #ifdef __cplusplus
2130 }
2131 #endif
2132 
2133 #endif /* __FSL_QMAN_H */
2134