xref: /dpdk/drivers/bus/dpaa/include/fsl_qman.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1d81734caSHemant Agrawal /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2f6fadc3eSShreyansh Jain  *
3f6fadc3eSShreyansh Jain  * Copyright 2008-2012 Freescale Semiconductor, Inc.
4d11482d9SVanshika Shukla  * Copyright 2019-2022 NXP
5f6fadc3eSShreyansh Jain  *
6f6fadc3eSShreyansh Jain  */
7f6fadc3eSShreyansh Jain 
8f6fadc3eSShreyansh Jain #ifndef __FSL_QMAN_H
9f6fadc3eSShreyansh Jain #define __FSL_QMAN_H
10f6fadc3eSShreyansh Jain 
11f6fadc3eSShreyansh Jain #include <dpaa_rbtree.h>
121094dd94SDavid Marchand #include <rte_compat.h>
1343797e7bSSunil Kumar Kori #include <rte_eventdev.h>
14f6fadc3eSShreyansh Jain 
15719834a6SMattias Rönnblom #ifdef __cplusplus
16719834a6SMattias Rönnblom extern "C" {
17719834a6SMattias Rönnblom #endif
18719834a6SMattias Rönnblom 
19847ee3bdSShreyansh Jain /* FQ lookups (turn this on for 64bit user-space) */
20a4ab65e7SNatanael Copa #ifdef RTE_ARCH_64
21847ee3bdSShreyansh Jain #define CONFIG_FSL_QMAN_FQ_LOOKUP
22847ee3bdSShreyansh Jain /* if FQ lookups are supported, this controls the number of initialised,
23847ee3bdSShreyansh Jain  * s/w-consumed FQs that can be supported at any one time.
24847ee3bdSShreyansh Jain  */
25847ee3bdSShreyansh Jain #define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
26847ee3bdSShreyansh Jain #endif
27847ee3bdSShreyansh Jain 
28f6fadc3eSShreyansh Jain /* Last updated for v00.800 of the BG */
29f6fadc3eSShreyansh Jain 
30f6fadc3eSShreyansh Jain /* Hardware constants */
31f6fadc3eSShreyansh Jain #define QM_CHANNEL_SWPORTAL0 0
32f6fadc3eSShreyansh Jain #define QMAN_CHANNEL_POOL1 0x21
33f6fadc3eSShreyansh Jain #define QMAN_CHANNEL_CAAM 0x80
34f6fadc3eSShreyansh Jain #define QMAN_CHANNEL_PME 0xa0
35f6fadc3eSShreyansh Jain #define QMAN_CHANNEL_POOL1_REV3 0x401
36f6fadc3eSShreyansh Jain #define QMAN_CHANNEL_CAAM_REV3 0x840
37f6fadc3eSShreyansh Jain #define QMAN_CHANNEL_PME_REV3 0x860
38f6fadc3eSShreyansh Jain extern u16 qm_channel_pool1;
39f6fadc3eSShreyansh Jain extern u16 qm_channel_caam;
40f6fadc3eSShreyansh Jain extern u16 qm_channel_pme;
41f6fadc3eSShreyansh Jain enum qm_dc_portal {
42f6fadc3eSShreyansh Jain 	qm_dc_portal_fman0 = 0,
43f6fadc3eSShreyansh Jain 	qm_dc_portal_fman1 = 1,
44f6fadc3eSShreyansh Jain 	qm_dc_portal_caam = 2,
45f6fadc3eSShreyansh Jain 	qm_dc_portal_pme = 3
46f6fadc3eSShreyansh Jain };
47f6fadc3eSShreyansh Jain 
48a8ee206aSHemant Agrawal __rte_internal
49a8ee206aSHemant Agrawal u16 dpaa_get_qm_channel_caam(void);
50a8ee206aSHemant Agrawal 
51a8ee206aSHemant Agrawal __rte_internal
52a8ee206aSHemant Agrawal u16 dpaa_get_qm_channel_pool(void);
53a8ee206aSHemant Agrawal 
54f6fadc3eSShreyansh Jain /* Portal processing (interrupt) sources */
55f6fadc3eSShreyansh Jain #define QM_PIRQ_CCSCI	0x00200000	/* CEETM Congestion State Change */
56f6fadc3eSShreyansh Jain #define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
57f6fadc3eSShreyansh Jain #define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
58f6fadc3eSShreyansh Jain #define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
59f6fadc3eSShreyansh Jain #define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
60f6fadc3eSShreyansh Jain #define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
61f6fadc3eSShreyansh Jain /*
62f6fadc3eSShreyansh Jain  * This mask contains all the interrupt sources that need handling except DQRI,
63f6fadc3eSShreyansh Jain  * ie. that if present should trigger slow-path processing.
64f6fadc3eSShreyansh Jain  */
65f6fadc3eSShreyansh Jain #define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
66f6fadc3eSShreyansh Jain 			QM_PIRQ_MRI | QM_PIRQ_CCSCI)
67f6fadc3eSShreyansh Jain 
68f6fadc3eSShreyansh Jain /* For qman_static_dequeue_*** APIs */
69f6fadc3eSShreyansh Jain #define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
70f6fadc3eSShreyansh Jain /* for n in [1,15] */
71f6fadc3eSShreyansh Jain #define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
72f6fadc3eSShreyansh Jain /* for conversion from n of qm_channel */
73f6fadc3eSShreyansh Jain static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
74f6fadc3eSShreyansh Jain {
75a8ee206aSHemant Agrawal 	return QM_SDQCR_CHANNELS_POOL(channel + 1 - dpaa_get_qm_channel_pool());
76f6fadc3eSShreyansh Jain }
77f6fadc3eSShreyansh Jain 
78f6fadc3eSShreyansh Jain /* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
79f6fadc3eSShreyansh Jain  * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
80f6fadc3eSShreyansh Jain  * FQID(n) to fill in the frame queue ID.
81f6fadc3eSShreyansh Jain  */
82f6fadc3eSShreyansh Jain #define QM_VDQCR_PRECEDENCE_VDQCR	0x0
83f6fadc3eSShreyansh Jain #define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
84f6fadc3eSShreyansh Jain #define QM_VDQCR_EXACT			0x40000000
85f6fadc3eSShreyansh Jain #define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
86f6fadc3eSShreyansh Jain #define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
87f6fadc3eSShreyansh Jain #define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
88f6fadc3eSShreyansh Jain #define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
89f6fadc3eSShreyansh Jain 
90f6fadc3eSShreyansh Jain /* --- QMan data structures (and associated constants) --- */
91f6fadc3eSShreyansh Jain 
92f6fadc3eSShreyansh Jain /* Represents s/w corenet portal mapped data structures */
93f6fadc3eSShreyansh Jain struct qm_eqcr_entry;	/* EQCR (EnQueue Command Ring) entries */
94f6fadc3eSShreyansh Jain struct qm_dqrr_entry;	/* DQRR (DeQueue Response Ring) entries */
95f6fadc3eSShreyansh Jain struct qm_mr_entry;	/* MR (Message Ring) entries */
96f6fadc3eSShreyansh Jain struct qm_mc_command;	/* MC (Management Command) command */
97f6fadc3eSShreyansh Jain struct qm_mc_result;	/* MC result */
98f6fadc3eSShreyansh Jain 
99f6fadc3eSShreyansh Jain #define QM_FD_FORMAT_SG		0x4
100f6fadc3eSShreyansh Jain #define QM_FD_FORMAT_LONG	0x2
101f6fadc3eSShreyansh Jain #define QM_FD_FORMAT_COMPOUND	0x1
102f6fadc3eSShreyansh Jain enum qm_fd_format {
103f6fadc3eSShreyansh Jain 	/*
104f6fadc3eSShreyansh Jain 	 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
105f6fadc3eSShreyansh Jain 	 * scatter-gather table. 'big' implies a 29-bit length with no offset
106f6fadc3eSShreyansh Jain 	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
107f6fadc3eSShreyansh Jain 	 * implies a s/g-like table, where each entry itself represents a frame
108f6fadc3eSShreyansh Jain 	 * (contiguous or scatter-gather) and the 29-bit "length" is
109f6fadc3eSShreyansh Jain 	 * interpreted purely for congestion calculations, ie. a "congestion
110f6fadc3eSShreyansh Jain 	 * weight".
111f6fadc3eSShreyansh Jain 	 */
112f6fadc3eSShreyansh Jain 	qm_fd_contig = 0,
113f6fadc3eSShreyansh Jain 	qm_fd_contig_big = QM_FD_FORMAT_LONG,
114f6fadc3eSShreyansh Jain 	qm_fd_sg = QM_FD_FORMAT_SG,
115f6fadc3eSShreyansh Jain 	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
116f6fadc3eSShreyansh Jain 	qm_fd_compound = QM_FD_FORMAT_COMPOUND
117f6fadc3eSShreyansh Jain };
118f6fadc3eSShreyansh Jain 
119f6fadc3eSShreyansh Jain /* Capitalised versions are un-typed but can be used in static expressions */
120f6fadc3eSShreyansh Jain #define QM_FD_CONTIG	0
121f6fadc3eSShreyansh Jain #define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
122f6fadc3eSShreyansh Jain #define QM_FD_SG	QM_FD_FORMAT_SG
123f6fadc3eSShreyansh Jain #define QM_FD_SG_BIG	(QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
124f6fadc3eSShreyansh Jain #define QM_FD_COMPOUND	QM_FD_FORMAT_COMPOUND
125f6fadc3eSShreyansh Jain 
126f6fadc3eSShreyansh Jain /* "Frame Descriptor (FD)" */
12727595cd8STyler Retzlaff struct __rte_aligned(8) qm_fd {
128f6fadc3eSShreyansh Jain 	union {
129f6fadc3eSShreyansh Jain 		struct {
130f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
131f6fadc3eSShreyansh Jain 			u8 dd:2;	/* dynamic debug */
132f6fadc3eSShreyansh Jain 			u8 liodn_offset:6;
133f6fadc3eSShreyansh Jain 			u8 bpid:8;	/* Buffer Pool ID */
134f6fadc3eSShreyansh Jain 			u8 eliodn_offset:4;
135f6fadc3eSShreyansh Jain 			u8 __reserved:4;
136f6fadc3eSShreyansh Jain 			u8 addr_hi;	/* high 8-bits of 40-bit address */
137f6fadc3eSShreyansh Jain 			u32 addr_lo;	/* low 32-bits of 40-bit address */
138f6fadc3eSShreyansh Jain #else
139f6fadc3eSShreyansh Jain 			u8 liodn_offset:6;
140f6fadc3eSShreyansh Jain 			u8 dd:2;	/* dynamic debug */
141f6fadc3eSShreyansh Jain 			u8 bpid:8;	/* Buffer Pool ID */
142f6fadc3eSShreyansh Jain 			u8 __reserved:4;
143f6fadc3eSShreyansh Jain 			u8 eliodn_offset:4;
144f6fadc3eSShreyansh Jain 			u8 addr_hi;	/* high 8-bits of 40-bit address */
145f6fadc3eSShreyansh Jain 			u32 addr_lo;	/* low 32-bits of 40-bit address */
146f6fadc3eSShreyansh Jain #endif
147f6fadc3eSShreyansh Jain 		};
148f6fadc3eSShreyansh Jain 		struct {
149f6fadc3eSShreyansh Jain 			u64 __notaddress:24;
150f6fadc3eSShreyansh Jain 			/* More efficient address accessor */
151f6fadc3eSShreyansh Jain 			u64 addr:40;
152f6fadc3eSShreyansh Jain 		};
153f6fadc3eSShreyansh Jain 		u64 opaque_addr;
154f6fadc3eSShreyansh Jain 	};
155f6fadc3eSShreyansh Jain 	/* The 'format' field indicates the interpretation of the remaining 29
156f6fadc3eSShreyansh Jain 	 * bits of the 32-bit word. For packing reasons, it is duplicated in the
157f6fadc3eSShreyansh Jain 	 * other union elements. Note, union'd structs are difficult to use with
158f6fadc3eSShreyansh Jain 	 * static initialisation under gcc, in which case use the "opaque" form
159f6fadc3eSShreyansh Jain 	 * with one of the macros.
160f6fadc3eSShreyansh Jain 	 */
161f6fadc3eSShreyansh Jain 	union {
162f6fadc3eSShreyansh Jain 		/* For easier/faster copying of this part of the fd (eg. from a
163f6fadc3eSShreyansh Jain 		 * DQRR entry to an EQCR entry) copy 'opaque'
164f6fadc3eSShreyansh Jain 		 */
165f6fadc3eSShreyansh Jain 		u32 opaque;
166f6fadc3eSShreyansh Jain 		/* If 'format' is _contig or _sg, 20b length and 9b offset */
167f6fadc3eSShreyansh Jain 		struct {
168f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
169f6fadc3eSShreyansh Jain 			enum qm_fd_format format:3;
170f6fadc3eSShreyansh Jain 			u16 offset:9;
171f6fadc3eSShreyansh Jain 			u32 length20:20;
172f6fadc3eSShreyansh Jain #else
173f6fadc3eSShreyansh Jain 			u32 length20:20;
174f6fadc3eSShreyansh Jain 			u16 offset:9;
175f6fadc3eSShreyansh Jain 			enum qm_fd_format format:3;
176f6fadc3eSShreyansh Jain #endif
177f6fadc3eSShreyansh Jain 		};
178f6fadc3eSShreyansh Jain 		/* If 'format' is _contig_big or _sg_big, 29b length */
179f6fadc3eSShreyansh Jain 		struct {
180f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
181f6fadc3eSShreyansh Jain 			enum qm_fd_format _format1:3;
182f6fadc3eSShreyansh Jain 			u32 length29:29;
183f6fadc3eSShreyansh Jain #else
184f6fadc3eSShreyansh Jain 			u32 length29:29;
185f6fadc3eSShreyansh Jain 			enum qm_fd_format _format1:3;
186f6fadc3eSShreyansh Jain #endif
187f6fadc3eSShreyansh Jain 		};
188f6fadc3eSShreyansh Jain 		/* If 'format' is _compound, 29b "congestion weight" */
189f6fadc3eSShreyansh Jain 		struct {
190f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
191f6fadc3eSShreyansh Jain 			enum qm_fd_format _format2:3;
192f6fadc3eSShreyansh Jain 			u32 cong_weight:29;
193f6fadc3eSShreyansh Jain #else
194f6fadc3eSShreyansh Jain 			u32 cong_weight:29;
195f6fadc3eSShreyansh Jain 			enum qm_fd_format _format2:3;
196f6fadc3eSShreyansh Jain #endif
197f6fadc3eSShreyansh Jain 		};
198f6fadc3eSShreyansh Jain 	};
199f6fadc3eSShreyansh Jain 	union {
200f6fadc3eSShreyansh Jain 		u32 cmd;
201f6fadc3eSShreyansh Jain 		u32 status;
202f6fadc3eSShreyansh Jain 	};
20327595cd8STyler Retzlaff };
204f6fadc3eSShreyansh Jain #define QM_FD_DD_NULL		0x00
205f6fadc3eSShreyansh Jain #define QM_FD_PID_MASK		0x3f
206f6fadc3eSShreyansh Jain static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
207f6fadc3eSShreyansh Jain {
208f6fadc3eSShreyansh Jain 	return fd->addr;
209f6fadc3eSShreyansh Jain }
210f6fadc3eSShreyansh Jain 
211f6fadc3eSShreyansh Jain static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
212f6fadc3eSShreyansh Jain {
213f6fadc3eSShreyansh Jain 	return (dma_addr_t)fd->addr;
214f6fadc3eSShreyansh Jain }
215f6fadc3eSShreyansh Jain 
216f6fadc3eSShreyansh Jain /* Macro, so we compile better if 'v' isn't always 64-bit */
217f6fadc3eSShreyansh Jain #define qm_fd_addr_set64(fd, v) \
218f6fadc3eSShreyansh Jain 	do { \
219f6fadc3eSShreyansh Jain 		struct qm_fd *__fd931 = (fd); \
220f6fadc3eSShreyansh Jain 		__fd931->addr = v; \
221f6fadc3eSShreyansh Jain 	} while (0)
222f6fadc3eSShreyansh Jain 
223f6fadc3eSShreyansh Jain /* Scatter/Gather table entry */
224*e7750639SAndre Muezerie struct __rte_packed_begin qm_sg_entry {
225f6fadc3eSShreyansh Jain 	union {
226f6fadc3eSShreyansh Jain 		struct {
227f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
228f6fadc3eSShreyansh Jain 			u8 __reserved1[3];
229f6fadc3eSShreyansh Jain 			u8 addr_hi;	/* high 8-bits of 40-bit address */
230f6fadc3eSShreyansh Jain 			u32 addr_lo;	/* low 32-bits of 40-bit address */
231f6fadc3eSShreyansh Jain #else
232f6fadc3eSShreyansh Jain 			u32 addr_lo;	/* low 32-bits of 40-bit address */
233f6fadc3eSShreyansh Jain 			u8 addr_hi;	/* high 8-bits of 40-bit address */
234f6fadc3eSShreyansh Jain 			u8 __reserved1[3];
235f6fadc3eSShreyansh Jain #endif
236f6fadc3eSShreyansh Jain 		};
237f6fadc3eSShreyansh Jain 		struct {
238f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
239f6fadc3eSShreyansh Jain 			u64 __notaddress:24;
240f6fadc3eSShreyansh Jain 			u64 addr:40;
241f6fadc3eSShreyansh Jain #else
242f6fadc3eSShreyansh Jain 			u64 addr:40;
243f6fadc3eSShreyansh Jain 			u64 __notaddress:24;
244f6fadc3eSShreyansh Jain #endif
245f6fadc3eSShreyansh Jain 		};
246f6fadc3eSShreyansh Jain 		u64 opaque;
247f6fadc3eSShreyansh Jain 	};
248f6fadc3eSShreyansh Jain 	union {
249f6fadc3eSShreyansh Jain 		struct {
250f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
251f6fadc3eSShreyansh Jain 			u32 extension:1;	/* Extension bit */
252f6fadc3eSShreyansh Jain 			u32 final:1;		/* Final bit */
253f6fadc3eSShreyansh Jain 			u32 length:30;
254f6fadc3eSShreyansh Jain #else
255f6fadc3eSShreyansh Jain 			u32 length:30;
256f6fadc3eSShreyansh Jain 			u32 final:1;		/* Final bit */
257f6fadc3eSShreyansh Jain 			u32 extension:1;	/* Extension bit */
258f6fadc3eSShreyansh Jain #endif
259f6fadc3eSShreyansh Jain 		};
260f6fadc3eSShreyansh Jain 		u32 val;
261f6fadc3eSShreyansh Jain 	};
262f6fadc3eSShreyansh Jain 	u8 __reserved2;
263f6fadc3eSShreyansh Jain 	u8 bpid;
264f6fadc3eSShreyansh Jain 	union {
265f6fadc3eSShreyansh Jain 		struct {
266f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
267f6fadc3eSShreyansh Jain 			u16 __reserved3:3;
268f6fadc3eSShreyansh Jain 			u16 offset:13;
269f6fadc3eSShreyansh Jain #else
270f6fadc3eSShreyansh Jain 			u16 offset:13;
271f6fadc3eSShreyansh Jain 			u16 __reserved3:3;
272f6fadc3eSShreyansh Jain #endif
273f6fadc3eSShreyansh Jain 		};
274f6fadc3eSShreyansh Jain 		u16 val_off;
275f6fadc3eSShreyansh Jain 	};
276*e7750639SAndre Muezerie } __rte_packed_end;
277f6fadc3eSShreyansh Jain static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
278f6fadc3eSShreyansh Jain {
279f6fadc3eSShreyansh Jain 	return sg->addr;
280f6fadc3eSShreyansh Jain }
281f6fadc3eSShreyansh Jain 
282f6fadc3eSShreyansh Jain static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
283f6fadc3eSShreyansh Jain {
284f6fadc3eSShreyansh Jain 	return (dma_addr_t)sg->addr;
285f6fadc3eSShreyansh Jain }
286f6fadc3eSShreyansh Jain 
287f6fadc3eSShreyansh Jain /* Macro, so we compile better if 'v' isn't always 64-bit */
288f6fadc3eSShreyansh Jain #define qm_sg_entry_set64(sg, v) \
289f6fadc3eSShreyansh Jain 	do { \
290f6fadc3eSShreyansh Jain 		struct qm_sg_entry *__sg931 = (sg); \
291f6fadc3eSShreyansh Jain 		__sg931->addr = v; \
292f6fadc3eSShreyansh Jain 	} while (0)
293f6fadc3eSShreyansh Jain 
294f6fadc3eSShreyansh Jain /* See 1.5.8.1: "Enqueue Command" */
295*e7750639SAndre Muezerie struct __rte_aligned(8) __rte_packed_begin qm_eqcr_entry {
296f6fadc3eSShreyansh Jain 	u8 __dont_write_directly__verb;
297f6fadc3eSShreyansh Jain 	u8 dca;
298f6fadc3eSShreyansh Jain 	u16 seqnum;
299f6fadc3eSShreyansh Jain 	u32 orp;	/* 24-bit */
300f6fadc3eSShreyansh Jain 	u32 fqid;	/* 24-bit */
301f6fadc3eSShreyansh Jain 	u32 tag;
302dd6f8d71SAndy Green 	struct qm_fd fd; /* this has alignment 8 */
303f6fadc3eSShreyansh Jain 	u8 __reserved3[32];
304*e7750639SAndre Muezerie } __rte_packed_end;
305f6fadc3eSShreyansh Jain 
306f6fadc3eSShreyansh Jain 
307f6fadc3eSShreyansh Jain /* "Frame Dequeue Response" */
308dd6f8d71SAndy Green struct __rte_aligned(8) qm_dqrr_entry {
309f6fadc3eSShreyansh Jain 	u8 verb;
310f6fadc3eSShreyansh Jain 	u8 stat;
311f6fadc3eSShreyansh Jain 	u16 seqnum;	/* 15-bit */
312f6fadc3eSShreyansh Jain 	u8 tok;
313f6fadc3eSShreyansh Jain 	u8 __reserved2[3];
314f6fadc3eSShreyansh Jain 	u32 fqid;	/* 24-bit */
315f6fadc3eSShreyansh Jain 	u32 contextB;
316dd6f8d71SAndy Green 	struct qm_fd fd; /* this has alignment 8 */
317f6fadc3eSShreyansh Jain 	u8 __reserved4[32];
318f6fadc3eSShreyansh Jain };
319f6fadc3eSShreyansh Jain 
320f6fadc3eSShreyansh Jain #define QM_DQRR_VERB_VBIT		0x80
321f6fadc3eSShreyansh Jain #define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
322f6fadc3eSShreyansh Jain #define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
323f6fadc3eSShreyansh Jain #define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
324f6fadc3eSShreyansh Jain #define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
325f6fadc3eSShreyansh Jain #define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
326f6fadc3eSShreyansh Jain #define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
327f6fadc3eSShreyansh Jain #define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
328f6fadc3eSShreyansh Jain #define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
329f6fadc3eSShreyansh Jain 
330f6fadc3eSShreyansh Jain 
331f6fadc3eSShreyansh Jain /* "ERN Message Response" */
332f6fadc3eSShreyansh Jain /* "FQ State Change Notification" */
333*e7750639SAndre Muezerie struct __rte_aligned(8) __rte_packed_begin qm_mr_entry {
334f6fadc3eSShreyansh Jain 	union {
335*e7750639SAndre Muezerie 		alignas(8) struct __rte_packed_begin {
336dd6f8d71SAndy Green 			u8 verb;
337f6fadc3eSShreyansh Jain 			u8 dca;
338f6fadc3eSShreyansh Jain 			u16 seqnum;
339f6fadc3eSShreyansh Jain 			u8 rc;		/* Rejection Code */
340f6fadc3eSShreyansh Jain 			u32 orp:24;
341f6fadc3eSShreyansh Jain 			u32 fqid;	/* 24-bit */
342f6fadc3eSShreyansh Jain 			u32 tag;
343dd6f8d71SAndy Green 			struct qm_fd fd; /* this has alignment 8 */
344*e7750639SAndre Muezerie 		} __rte_packed_end ern;
345*e7750639SAndre Muezerie 		alignas(8) struct __rte_packed_begin {
346dd6f8d71SAndy Green 			u8 verb;
347f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
348f6fadc3eSShreyansh Jain 			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
349f6fadc3eSShreyansh Jain 			u8 __reserved1:4;
350f6fadc3eSShreyansh Jain 			enum qm_dc_portal portal:2;
351f6fadc3eSShreyansh Jain #else
352f6fadc3eSShreyansh Jain 			enum qm_dc_portal portal:3;
353f6fadc3eSShreyansh Jain 			u8 __reserved1:3;
354f6fadc3eSShreyansh Jain 			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
355f6fadc3eSShreyansh Jain #endif
356f6fadc3eSShreyansh Jain 			u16 __reserved2;
357f6fadc3eSShreyansh Jain 			u8 rc;		/* Rejection Code */
358f6fadc3eSShreyansh Jain 			u32 __reserved3:24;
359f6fadc3eSShreyansh Jain 			u32 fqid;	/* 24-bit */
360f6fadc3eSShreyansh Jain 			u32 tag;
361dd6f8d71SAndy Green 			struct qm_fd fd; /* this has alignment 8 */
362*e7750639SAndre Muezerie 		} __rte_packed_end dcern;
363*e7750639SAndre Muezerie 		alignas(8) struct __rte_packed_begin {
364dd6f8d71SAndy Green 			u8 verb;
365f6fadc3eSShreyansh Jain 			u8 fqs;		/* Frame Queue Status */
366f6fadc3eSShreyansh Jain 			u8 __reserved1[6];
367f6fadc3eSShreyansh Jain 			u32 fqid;	/* 24-bit */
368f6fadc3eSShreyansh Jain 			u32 contextB;
369f6fadc3eSShreyansh Jain 			u8 __reserved2[16];
370*e7750639SAndre Muezerie 		} __rte_packed_end fq;	/* FQRN/FQRNI/FQRL/FQPN */
371f6fadc3eSShreyansh Jain 	};
372f6fadc3eSShreyansh Jain 	u8 __reserved2[32];
373*e7750639SAndre Muezerie } __rte_packed_end;
374f6fadc3eSShreyansh Jain #define QM_MR_VERB_VBIT			0x80
375f6fadc3eSShreyansh Jain /*
376f6fadc3eSShreyansh Jain  * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
377f6fadc3eSShreyansh Jain  * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
378f6fadc3eSShreyansh Jain  * from the other MR types by noting if the 0x20 bit is unset.
379f6fadc3eSShreyansh Jain  */
380f6fadc3eSShreyansh Jain #define QM_MR_VERB_TYPE_MASK		0x27
381f6fadc3eSShreyansh Jain #define QM_MR_VERB_DC_ERN		0x20
382f6fadc3eSShreyansh Jain #define QM_MR_VERB_FQRN			0x21
383f6fadc3eSShreyansh Jain #define QM_MR_VERB_FQRNI		0x22
384f6fadc3eSShreyansh Jain #define QM_MR_VERB_FQRL			0x23
385f6fadc3eSShreyansh Jain #define QM_MR_VERB_FQPN			0x24
386f6fadc3eSShreyansh Jain #define QM_MR_RC_MASK			0xf0	/* contains one of; */
387f6fadc3eSShreyansh Jain #define QM_MR_RC_CGR_TAILDROP		0x00
388f6fadc3eSShreyansh Jain #define QM_MR_RC_WRED			0x10
389f6fadc3eSShreyansh Jain #define QM_MR_RC_ERROR			0x20
390f6fadc3eSShreyansh Jain #define QM_MR_RC_ORPWINDOW_EARLY	0x30
391f6fadc3eSShreyansh Jain #define QM_MR_RC_ORPWINDOW_LATE		0x40
392f6fadc3eSShreyansh Jain #define QM_MR_RC_FQ_TAILDROP		0x50
393f6fadc3eSShreyansh Jain #define QM_MR_RC_ORPWINDOW_RETIRED	0x60
394f6fadc3eSShreyansh Jain #define QM_MR_RC_ORP_ZERO		0x70
395f6fadc3eSShreyansh Jain #define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
396f6fadc3eSShreyansh Jain #define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
397f6fadc3eSShreyansh Jain #define QM_MR_DCERN_COLOUR_GREEN	0x00
398f6fadc3eSShreyansh Jain #define QM_MR_DCERN_COLOUR_YELLOW	0x01
399f6fadc3eSShreyansh Jain #define QM_MR_DCERN_COLOUR_RED		0x02
400f6fadc3eSShreyansh Jain #define QM_MR_DCERN_COLOUR_OVERRIDE	0x03
401f6fadc3eSShreyansh Jain /*
402f6fadc3eSShreyansh Jain  * An identical structure of FQD fields is present in the "Init FQ" command and
403f6fadc3eSShreyansh Jain  * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
404f6fadc3eSShreyansh Jain  * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
405f6fadc3eSShreyansh Jain  * latter has two inlines to assist with converting to/from the mant+exp
406f6fadc3eSShreyansh Jain  * representation.
407f6fadc3eSShreyansh Jain  */
408*e7750639SAndre Muezerie struct __rte_packed_begin qm_fqd_stashing {
409f6fadc3eSShreyansh Jain 	/* See QM_STASHING_EXCL_<...> */
410f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
411f6fadc3eSShreyansh Jain 	u8 exclusive;
412f6fadc3eSShreyansh Jain 	u8 __reserved1:2;
413f6fadc3eSShreyansh Jain 	/* Numbers of cachelines */
414f6fadc3eSShreyansh Jain 	u8 annotation_cl:2;
415f6fadc3eSShreyansh Jain 	u8 data_cl:2;
416f6fadc3eSShreyansh Jain 	u8 context_cl:2;
417f6fadc3eSShreyansh Jain #else
418f6fadc3eSShreyansh Jain 	u8 context_cl:2;
419f6fadc3eSShreyansh Jain 	u8 data_cl:2;
420f6fadc3eSShreyansh Jain 	u8 annotation_cl:2;
421f6fadc3eSShreyansh Jain 	u8 __reserved1:2;
422f6fadc3eSShreyansh Jain 	u8 exclusive;
423f6fadc3eSShreyansh Jain #endif
424*e7750639SAndre Muezerie } __rte_packed_end;
425*e7750639SAndre Muezerie struct __rte_packed_begin qm_fqd_taildrop {
426f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
427f6fadc3eSShreyansh Jain 	u16 __reserved1:3;
428f6fadc3eSShreyansh Jain 	u16 mant:8;
429f6fadc3eSShreyansh Jain 	u16 exp:5;
430f6fadc3eSShreyansh Jain #else
431f6fadc3eSShreyansh Jain 	u16 exp:5;
432f6fadc3eSShreyansh Jain 	u16 mant:8;
433f6fadc3eSShreyansh Jain 	u16 __reserved1:3;
434f6fadc3eSShreyansh Jain #endif
435*e7750639SAndre Muezerie } __rte_packed_end;
436*e7750639SAndre Muezerie struct __rte_packed_begin qm_fqd_oac {
437f6fadc3eSShreyansh Jain 	/* "Overhead Accounting Control", see QM_OAC_<...> */
438f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
439f6fadc3eSShreyansh Jain 	u8 oac:2; /* "Overhead Accounting Control" */
440f6fadc3eSShreyansh Jain 	u8 __reserved1:6;
441f6fadc3eSShreyansh Jain #else
442f6fadc3eSShreyansh Jain 	u8 __reserved1:6;
443f6fadc3eSShreyansh Jain 	u8 oac:2; /* "Overhead Accounting Control" */
444f6fadc3eSShreyansh Jain #endif
445f6fadc3eSShreyansh Jain 	/* Two's-complement value (-128 to +127) */
446f6fadc3eSShreyansh Jain 	signed char oal; /* "Overhead Accounting Length" */
447*e7750639SAndre Muezerie } __rte_packed_end;
448*e7750639SAndre Muezerie struct __rte_packed_begin qm_fqd {
449f6fadc3eSShreyansh Jain 	union {
450f6fadc3eSShreyansh Jain 		u8 orpc;
451*e7750639SAndre Muezerie 		struct __rte_packed_begin {
452f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
453f6fadc3eSShreyansh Jain 			u8 __reserved1:2;
454f6fadc3eSShreyansh Jain 			u8 orprws:3;
455f6fadc3eSShreyansh Jain 			u8 oa:1;
456f6fadc3eSShreyansh Jain 			u8 olws:2;
457f6fadc3eSShreyansh Jain #else
458f6fadc3eSShreyansh Jain 			u8 olws:2;
459f6fadc3eSShreyansh Jain 			u8 oa:1;
460f6fadc3eSShreyansh Jain 			u8 orprws:3;
461f6fadc3eSShreyansh Jain 			u8 __reserved1:2;
462f6fadc3eSShreyansh Jain #endif
463*e7750639SAndre Muezerie 		} __rte_packed_end;
464f6fadc3eSShreyansh Jain 	};
465f6fadc3eSShreyansh Jain 	u8 cgid;
466f6fadc3eSShreyansh Jain 	u16 fq_ctrl;	/* See QM_FQCTRL_<...> */
467f6fadc3eSShreyansh Jain 	union {
468f6fadc3eSShreyansh Jain 		u16 dest_wq;
469*e7750639SAndre Muezerie 		struct __rte_packed_begin {
470f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
471f6fadc3eSShreyansh Jain 			u16 channel:13; /* qm_channel */
472f6fadc3eSShreyansh Jain 			u16 wq:3;
473f6fadc3eSShreyansh Jain #else
474f6fadc3eSShreyansh Jain 			u16 wq:3;
475f6fadc3eSShreyansh Jain 			u16 channel:13; /* qm_channel */
476f6fadc3eSShreyansh Jain #endif
477*e7750639SAndre Muezerie 		} __rte_packed_end dest;
478f6fadc3eSShreyansh Jain 	};
479f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
480f6fadc3eSShreyansh Jain 	u16 __reserved2:1;
481f6fadc3eSShreyansh Jain 	u16 ics_cred:15;
482f6fadc3eSShreyansh Jain #else
483f6fadc3eSShreyansh Jain 	u16 __reserved2:1;
484f6fadc3eSShreyansh Jain 	u16 ics_cred:15;
485f6fadc3eSShreyansh Jain #endif
486f6fadc3eSShreyansh Jain 	/*
487f6fadc3eSShreyansh Jain 	 * For "Initialize Frame Queue" commands, the write-enable mask
488f6fadc3eSShreyansh Jain 	 * determines whether 'td' or 'oac_init' is observed. For query
489f6fadc3eSShreyansh Jain 	 * commands, this field is always 'td', and 'oac_query' (below) reflects
490f6fadc3eSShreyansh Jain 	 * the Overhead ACcounting values.
491f6fadc3eSShreyansh Jain 	 */
492f6fadc3eSShreyansh Jain 	union {
493f6fadc3eSShreyansh Jain 		uint16_t opaque_td;
494f6fadc3eSShreyansh Jain 		struct qm_fqd_taildrop td;
495f6fadc3eSShreyansh Jain 		struct qm_fqd_oac oac_init;
496f6fadc3eSShreyansh Jain 	};
497f6fadc3eSShreyansh Jain 	u32 context_b;
498f6fadc3eSShreyansh Jain 	union {
499f6fadc3eSShreyansh Jain 		/* Treat it as 64-bit opaque */
500f6fadc3eSShreyansh Jain 		u64 opaque;
501f6fadc3eSShreyansh Jain 		struct {
502f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
503f6fadc3eSShreyansh Jain 			u32 hi;
504f6fadc3eSShreyansh Jain 			u32 lo;
505f6fadc3eSShreyansh Jain #else
506f6fadc3eSShreyansh Jain 			u32 lo;
507f6fadc3eSShreyansh Jain 			u32 hi;
508f6fadc3eSShreyansh Jain #endif
509f6fadc3eSShreyansh Jain 		};
510f6fadc3eSShreyansh Jain 		/* Treat it as s/w portal stashing config */
511f6fadc3eSShreyansh Jain 		/* see "FQD Context_A field used for [...]" */
512*e7750639SAndre Muezerie 		struct __rte_packed_begin {
513f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
514f6fadc3eSShreyansh Jain 			struct qm_fqd_stashing stashing;
515f6fadc3eSShreyansh Jain 			/*
516f6fadc3eSShreyansh Jain 			 * 48-bit address of FQ context to
517f6fadc3eSShreyansh Jain 			 * stash, must be cacheline-aligned
518f6fadc3eSShreyansh Jain 			 */
519f6fadc3eSShreyansh Jain 			u16 context_hi;
520f6fadc3eSShreyansh Jain 			u32 context_lo;
521f6fadc3eSShreyansh Jain #else
522f6fadc3eSShreyansh Jain 			u32 context_lo;
523f6fadc3eSShreyansh Jain 			u16 context_hi;
524f6fadc3eSShreyansh Jain 			struct qm_fqd_stashing stashing;
525f6fadc3eSShreyansh Jain #endif
526*e7750639SAndre Muezerie 		} __rte_packed_end;
527f6fadc3eSShreyansh Jain 	} context_a;
528f6fadc3eSShreyansh Jain 	struct qm_fqd_oac oac_query;
529*e7750639SAndre Muezerie } __rte_packed_end;
530f6fadc3eSShreyansh Jain /* 64-bit converters for context_hi/lo */
531f6fadc3eSShreyansh Jain static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
532f6fadc3eSShreyansh Jain {
533f6fadc3eSShreyansh Jain 	return ((u64)fqd->context_a.context_hi << 32) |
534f6fadc3eSShreyansh Jain 		(u64)fqd->context_a.context_lo;
535f6fadc3eSShreyansh Jain }
536f6fadc3eSShreyansh Jain 
537f6fadc3eSShreyansh Jain static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
538f6fadc3eSShreyansh Jain {
539f6fadc3eSShreyansh Jain 	return (dma_addr_t)qm_fqd_stashing_get64(fqd);
540f6fadc3eSShreyansh Jain }
541f6fadc3eSShreyansh Jain 
542f6fadc3eSShreyansh Jain static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
543f6fadc3eSShreyansh Jain {
544f6fadc3eSShreyansh Jain 	return ((u64)fqd->context_a.hi << 32) |
545f6fadc3eSShreyansh Jain 		(u64)fqd->context_a.lo;
546f6fadc3eSShreyansh Jain }
547f6fadc3eSShreyansh Jain 
548f6fadc3eSShreyansh Jain static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
549f6fadc3eSShreyansh Jain {
550f6fadc3eSShreyansh Jain 		fqd->context_a.context_hi = upper_32_bits(addr);
551f6fadc3eSShreyansh Jain 		fqd->context_a.context_lo = lower_32_bits(addr);
552f6fadc3eSShreyansh Jain }
553f6fadc3eSShreyansh Jain 
554f6fadc3eSShreyansh Jain static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
555f6fadc3eSShreyansh Jain {
556f6fadc3eSShreyansh Jain 	fqd->context_a.hi = upper_32_bits(addr);
557f6fadc3eSShreyansh Jain 	fqd->context_a.lo = lower_32_bits(addr);
558f6fadc3eSShreyansh Jain }
559f6fadc3eSShreyansh Jain 
560f6fadc3eSShreyansh Jain /* convert a threshold value into mant+exp representation */
561f6fadc3eSShreyansh Jain static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
562f6fadc3eSShreyansh Jain 				      int roundup)
563f6fadc3eSShreyansh Jain {
564f6fadc3eSShreyansh Jain 	u32 e = 0;
565f6fadc3eSShreyansh Jain 	int oddbit = 0;
566f6fadc3eSShreyansh Jain 
567f6fadc3eSShreyansh Jain 	if (val > 0xe0000000)
568f6fadc3eSShreyansh Jain 		return -ERANGE;
569f6fadc3eSShreyansh Jain 	while (val > 0xff) {
570f6fadc3eSShreyansh Jain 		oddbit = val & 1;
571f6fadc3eSShreyansh Jain 		val >>= 1;
572f6fadc3eSShreyansh Jain 		e++;
573f6fadc3eSShreyansh Jain 		if (roundup && oddbit)
574f6fadc3eSShreyansh Jain 			val++;
575f6fadc3eSShreyansh Jain 	}
576f6fadc3eSShreyansh Jain 	td->exp = e;
577f6fadc3eSShreyansh Jain 	td->mant = val;
578f6fadc3eSShreyansh Jain 	return 0;
579f6fadc3eSShreyansh Jain }
580f6fadc3eSShreyansh Jain 
581f6fadc3eSShreyansh Jain /* and the other direction */
582f6fadc3eSShreyansh Jain static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
583f6fadc3eSShreyansh Jain {
584f6fadc3eSShreyansh Jain 	return (u32)td->mant << td->exp;
585f6fadc3eSShreyansh Jain }
586f6fadc3eSShreyansh Jain 
587f6fadc3eSShreyansh Jain 
588f6fadc3eSShreyansh Jain /* See "Frame Queue Descriptor (FQD)" */
589f6fadc3eSShreyansh Jain /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
590f6fadc3eSShreyansh Jain #define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
591f6fadc3eSShreyansh Jain #define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
592f6fadc3eSShreyansh Jain #define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
593f6fadc3eSShreyansh Jain #define QM_FQCTRL_ORP		0x0100	/* ORP Enable */
594f6fadc3eSShreyansh Jain #define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
595f6fadc3eSShreyansh Jain #define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
596f6fadc3eSShreyansh Jain #define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
597f6fadc3eSShreyansh Jain #define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
598f6fadc3eSShreyansh Jain #define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
599f6fadc3eSShreyansh Jain #define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
600f6fadc3eSShreyansh Jain #define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
601f6fadc3eSShreyansh Jain 
602f6fadc3eSShreyansh Jain /* See "FQD Context_A field used for [...] */
603f6fadc3eSShreyansh Jain /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
604f6fadc3eSShreyansh Jain #define QM_STASHING_EXCL_ANNOTATION	0x04
605f6fadc3eSShreyansh Jain #define QM_STASHING_EXCL_DATA		0x02
606f6fadc3eSShreyansh Jain #define QM_STASHING_EXCL_CTX		0x01
607f6fadc3eSShreyansh Jain 
608f6fadc3eSShreyansh Jain /* See "Intra Class Scheduling" */
609f6fadc3eSShreyansh Jain /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
610f6fadc3eSShreyansh Jain #define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
611f6fadc3eSShreyansh Jain #define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
612f6fadc3eSShreyansh Jain 
613f6fadc3eSShreyansh Jain /*
614f6fadc3eSShreyansh Jain  * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
615f6fadc3eSShreyansh Jain  * and associated commands/responses. The WRED parameters are calculated from
616f6fadc3eSShreyansh Jain  * these fields as follows;
617f6fadc3eSShreyansh Jain  *   MaxTH = MA * (2 ^ Mn)
618f6fadc3eSShreyansh Jain  *   Slope = SA / (2 ^ Sn)
619f6fadc3eSShreyansh Jain  *    MaxP = 4 * (Pn + 1)
620f6fadc3eSShreyansh Jain  */
621*e7750639SAndre Muezerie struct __rte_packed_begin qm_cgr_wr_parm {
622f6fadc3eSShreyansh Jain 	union {
623f6fadc3eSShreyansh Jain 		u32 word;
624*e7750639SAndre Muezerie 		struct __rte_packed_begin {
625f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
626f6fadc3eSShreyansh Jain 			u32 MA:8;
627f6fadc3eSShreyansh Jain 			u32 Mn:5;
628f6fadc3eSShreyansh Jain 			u32 SA:7; /* must be between 64-127 */
629f6fadc3eSShreyansh Jain 			u32 Sn:6;
630f6fadc3eSShreyansh Jain 			u32 Pn:6;
631f6fadc3eSShreyansh Jain #else
632f6fadc3eSShreyansh Jain 			u32 Pn:6;
633f6fadc3eSShreyansh Jain 			u32 Sn:6;
634f6fadc3eSShreyansh Jain 			u32 SA:7; /* must be between 64-127 */
635f6fadc3eSShreyansh Jain 			u32 Mn:5;
636f6fadc3eSShreyansh Jain 			u32 MA:8;
637f6fadc3eSShreyansh Jain #endif
638*e7750639SAndre Muezerie 		} __rte_packed_end;
639f6fadc3eSShreyansh Jain 	};
640*e7750639SAndre Muezerie } __rte_packed_end;
641f6fadc3eSShreyansh Jain /*
642f6fadc3eSShreyansh Jain  * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
643f6fadc3eSShreyansh Jain  * management commands, this is padded to a 16-bit structure field, so that's
644f6fadc3eSShreyansh Jain  * how we represent it here. The congestion state threshold is calculated from
645f6fadc3eSShreyansh Jain  * these fields as follows;
646f6fadc3eSShreyansh Jain  *   CS threshold = TA * (2 ^ Tn)
647f6fadc3eSShreyansh Jain  */
648*e7750639SAndre Muezerie struct __rte_packed_begin qm_cgr_cs_thres {
649f6fadc3eSShreyansh Jain 	union {
650f6fadc3eSShreyansh Jain 		u16 hword;
651*e7750639SAndre Muezerie 		struct __rte_packed_begin {
652f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
653f6fadc3eSShreyansh Jain 			u16 __reserved:3;
654f6fadc3eSShreyansh Jain 			u16 TA:8;
655f6fadc3eSShreyansh Jain 			u16 Tn:5;
656f6fadc3eSShreyansh Jain #else
657f6fadc3eSShreyansh Jain 			u16 Tn:5;
658f6fadc3eSShreyansh Jain 			u16 TA:8;
659f6fadc3eSShreyansh Jain 			u16 __reserved:3;
660f6fadc3eSShreyansh Jain #endif
661*e7750639SAndre Muezerie 		} __rte_packed_end;
662f6fadc3eSShreyansh Jain 	};
663*e7750639SAndre Muezerie } __rte_packed_end;
664f6fadc3eSShreyansh Jain /*
665f6fadc3eSShreyansh Jain  * This identical structure of CGR fields is present in the "Init/Modify CGR"
666f6fadc3eSShreyansh Jain  * commands and the "Query CGR" result. It's suctioned out here into its own
667f6fadc3eSShreyansh Jain  * struct.
668f6fadc3eSShreyansh Jain  */
669*e7750639SAndre Muezerie struct __rte_packed_begin __qm_mc_cgr {
670f6fadc3eSShreyansh Jain 	struct qm_cgr_wr_parm wr_parm_g;
671f6fadc3eSShreyansh Jain 	struct qm_cgr_wr_parm wr_parm_y;
672f6fadc3eSShreyansh Jain 	struct qm_cgr_wr_parm wr_parm_r;
673f6fadc3eSShreyansh Jain 	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
674f6fadc3eSShreyansh Jain 	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
675f6fadc3eSShreyansh Jain 	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
676f6fadc3eSShreyansh Jain 	u8 cscn_en;	/* boolean, use QM_CGR_EN */
677f6fadc3eSShreyansh Jain 	union {
678f6fadc3eSShreyansh Jain 		struct {
679f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
680f6fadc3eSShreyansh Jain 			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
681f6fadc3eSShreyansh Jain 			u16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */
682f6fadc3eSShreyansh Jain #else
683f6fadc3eSShreyansh Jain 			u16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */
684f6fadc3eSShreyansh Jain 			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
685f6fadc3eSShreyansh Jain #endif
686f6fadc3eSShreyansh Jain 		};
687f6fadc3eSShreyansh Jain 		u32 cscn_targ;	/* use QM_CGR_TARG_* */
688f6fadc3eSShreyansh Jain 	};
689f6fadc3eSShreyansh Jain 	u8 cstd_en;	/* boolean, use QM_CGR_EN */
690f6fadc3eSShreyansh Jain 	u8 cs;		/* boolean, only used in query response */
691f6fadc3eSShreyansh Jain 	union {
692f6fadc3eSShreyansh Jain 		struct qm_cgr_cs_thres cs_thres;
693f6fadc3eSShreyansh Jain 		/* use qm_cgr_cs_thres_set64() */
694f6fadc3eSShreyansh Jain 		u16 __cs_thres;
695f6fadc3eSShreyansh Jain 	};
696f6fadc3eSShreyansh Jain 	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
697*e7750639SAndre Muezerie } __rte_packed_end;
698f6fadc3eSShreyansh Jain #define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
699f6fadc3eSShreyansh Jain #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
700f6fadc3eSShreyansh Jain #define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
701f6fadc3eSShreyansh Jain #define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
702f6fadc3eSShreyansh Jain #define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
703f6fadc3eSShreyansh Jain #define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
704f6fadc3eSShreyansh Jain /* Convert CGR thresholds to/from "cs_thres" format */
705f6fadc3eSShreyansh Jain static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
706f6fadc3eSShreyansh Jain {
707f6fadc3eSShreyansh Jain 	return (u64)th->TA << th->Tn;
708f6fadc3eSShreyansh Jain }
709f6fadc3eSShreyansh Jain 
710f6fadc3eSShreyansh Jain static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
711f6fadc3eSShreyansh Jain 					int roundup)
712f6fadc3eSShreyansh Jain {
713f6fadc3eSShreyansh Jain 	u32 e = 0;
714f6fadc3eSShreyansh Jain 	int oddbit = 0;
715f6fadc3eSShreyansh Jain 
716f6fadc3eSShreyansh Jain 	while (val > 0xff) {
717f6fadc3eSShreyansh Jain 		oddbit = val & 1;
718f6fadc3eSShreyansh Jain 		val >>= 1;
719f6fadc3eSShreyansh Jain 		e++;
720f6fadc3eSShreyansh Jain 		if (roundup && oddbit)
721f6fadc3eSShreyansh Jain 			val++;
722f6fadc3eSShreyansh Jain 	}
723f6fadc3eSShreyansh Jain 	th->Tn = e;
724f6fadc3eSShreyansh Jain 	th->TA = val;
725f6fadc3eSShreyansh Jain 	return 0;
726f6fadc3eSShreyansh Jain }
727f6fadc3eSShreyansh Jain 
728f6fadc3eSShreyansh Jain /* See 1.5.8.5.1: "Initialize FQ" */
729f6fadc3eSShreyansh Jain /* See 1.5.8.5.2: "Query FQ" */
730f6fadc3eSShreyansh Jain /* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
731f6fadc3eSShreyansh Jain /* See 1.5.8.5.4: "Alter FQ State Commands " */
732f6fadc3eSShreyansh Jain /* See 1.5.8.6.1: "Initialize/Modify CGR" */
733f6fadc3eSShreyansh Jain /* See 1.5.8.6.2: "CGR Test Write" */
734f6fadc3eSShreyansh Jain /* See 1.5.8.6.3: "Query CGR" */
735f6fadc3eSShreyansh Jain /* See 1.5.8.6.4: "Query Congestion Group State" */
736*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_initfq {
737f6fadc3eSShreyansh Jain 	u8 __reserved1;
738f6fadc3eSShreyansh Jain 	u16 we_mask;	/* Write Enable Mask */
739f6fadc3eSShreyansh Jain 	u32 fqid;	/* 24-bit */
740f6fadc3eSShreyansh Jain 	u16 count;	/* Initialises 'count+1' FQDs */
741f6fadc3eSShreyansh Jain 	struct qm_fqd fqd; /* the FQD fields go here */
742f6fadc3eSShreyansh Jain 	u8 __reserved3[30];
743*e7750639SAndre Muezerie } __rte_packed_end;
744*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_queryfq {
745f6fadc3eSShreyansh Jain 	u8 __reserved1[3];
746f6fadc3eSShreyansh Jain 	u32 fqid;	/* 24-bit */
747f6fadc3eSShreyansh Jain 	u8 __reserved2[56];
748*e7750639SAndre Muezerie } __rte_packed_end;
749*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_queryfq_np {
750f6fadc3eSShreyansh Jain 	u8 __reserved1[3];
751f6fadc3eSShreyansh Jain 	u32 fqid;	/* 24-bit */
752f6fadc3eSShreyansh Jain 	u8 __reserved2[56];
753*e7750639SAndre Muezerie } __rte_packed_end;
754*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_alterfq {
755f6fadc3eSShreyansh Jain 	u8 __reserved1[3];
756f6fadc3eSShreyansh Jain 	u32 fqid;	/* 24-bit */
757f6fadc3eSShreyansh Jain 	u8 __reserved2;
758f6fadc3eSShreyansh Jain 	u8 count;	/* number of consecutive FQID */
759f6fadc3eSShreyansh Jain 	u8 __reserved3[10];
760f6fadc3eSShreyansh Jain 	u32 context_b;	/* frame queue context b */
761f6fadc3eSShreyansh Jain 	u8 __reserved4[40];
762*e7750639SAndre Muezerie } __rte_packed_end;
763*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_initcgr {
764f6fadc3eSShreyansh Jain 	u8 __reserved1;
765f6fadc3eSShreyansh Jain 	u16 we_mask;	/* Write Enable Mask */
766f6fadc3eSShreyansh Jain 	struct __qm_mc_cgr cgr;	/* CGR fields */
767f6fadc3eSShreyansh Jain 	u8 __reserved2[2];
768f6fadc3eSShreyansh Jain 	u8 cgid;
769f6fadc3eSShreyansh Jain 	u8 __reserved4[32];
770*e7750639SAndre Muezerie } __rte_packed_end;
771*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_cgrtestwrite {
772f6fadc3eSShreyansh Jain 	u8 __reserved1[2];
773f6fadc3eSShreyansh Jain 	u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
774f6fadc3eSShreyansh Jain 	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
775f6fadc3eSShreyansh Jain 	u8 __reserved2[23];
776f6fadc3eSShreyansh Jain 	u8 cgid;
777f6fadc3eSShreyansh Jain 	u8 __reserved3[32];
778*e7750639SAndre Muezerie } __rte_packed_end;
779*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_querycgr {
780f6fadc3eSShreyansh Jain 	u8 __reserved1[30];
781f6fadc3eSShreyansh Jain 	u8 cgid;
782f6fadc3eSShreyansh Jain 	u8 __reserved2[32];
783*e7750639SAndre Muezerie } __rte_packed_end;
784*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_querycongestion {
785f6fadc3eSShreyansh Jain 	u8 __reserved[63];
786*e7750639SAndre Muezerie } __rte_packed_end;
787*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcc_querywq {
788f6fadc3eSShreyansh Jain 	u8 __reserved;
789f6fadc3eSShreyansh Jain 	/* select channel if verb != QUERYWQ_DEDICATED */
790f6fadc3eSShreyansh Jain 	union {
791f6fadc3eSShreyansh Jain 		u16 channel_wq; /* ignores wq (3 lsbits) */
792*e7750639SAndre Muezerie 		struct __rte_packed_begin {
793f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
794f6fadc3eSShreyansh Jain 			u16 id:13; /* qm_channel */
795f6fadc3eSShreyansh Jain 			u16 __reserved1:3;
796f6fadc3eSShreyansh Jain #else
797f6fadc3eSShreyansh Jain 			u16 __reserved1:3;
798f6fadc3eSShreyansh Jain 			u16 id:13; /* qm_channel */
799f6fadc3eSShreyansh Jain #endif
800*e7750639SAndre Muezerie 		} __rte_packed_end channel;
801f6fadc3eSShreyansh Jain 	};
802f6fadc3eSShreyansh Jain 	u8 __reserved2[60];
803*e7750639SAndre Muezerie } __rte_packed_end;
804f6fadc3eSShreyansh Jain 
805*e7750639SAndre Muezerie struct __rte_packed_begin qm_mc_command {
806f6fadc3eSShreyansh Jain 	u8 __dont_write_directly__verb;
807f6fadc3eSShreyansh Jain 	union {
808f6fadc3eSShreyansh Jain 		struct qm_mcc_initfq initfq;
809f6fadc3eSShreyansh Jain 		struct qm_mcc_queryfq queryfq;
810f6fadc3eSShreyansh Jain 		struct qm_mcc_queryfq_np queryfq_np;
811f6fadc3eSShreyansh Jain 		struct qm_mcc_alterfq alterfq;
812f6fadc3eSShreyansh Jain 		struct qm_mcc_initcgr initcgr;
813f6fadc3eSShreyansh Jain 		struct qm_mcc_cgrtestwrite cgrtestwrite;
814f6fadc3eSShreyansh Jain 		struct qm_mcc_querycgr querycgr;
815f6fadc3eSShreyansh Jain 		struct qm_mcc_querycongestion querycongestion;
816f6fadc3eSShreyansh Jain 		struct qm_mcc_querywq querywq;
817f6fadc3eSShreyansh Jain 	};
818*e7750639SAndre Muezerie } __rte_packed_end;
819f6fadc3eSShreyansh Jain 
820f6fadc3eSShreyansh Jain /* INITFQ-specific flags */
821f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
822f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_OAC		0x0100
823f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_ORPC		0x0080
824f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_CGID		0x0040
825f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_FQCTRL		0x0020
826f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_DESTWQ		0x0010
827f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_ICSCRED		0x0008
828f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_TDTHRESH		0x0004
829f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_CONTEXTB		0x0002
830f6fadc3eSShreyansh Jain #define QM_INITFQ_WE_CONTEXTA		0x0001
831f6fadc3eSShreyansh Jain /* INITCGR/MODIFYCGR-specific flags */
832f6fadc3eSShreyansh Jain #define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
833f6fadc3eSShreyansh Jain #define QM_CGR_WE_WR_PARM_G		0x0400
834f6fadc3eSShreyansh Jain #define QM_CGR_WE_WR_PARM_Y		0x0200
835f6fadc3eSShreyansh Jain #define QM_CGR_WE_WR_PARM_R		0x0100
836f6fadc3eSShreyansh Jain #define QM_CGR_WE_WR_EN_G		0x0080
837f6fadc3eSShreyansh Jain #define QM_CGR_WE_WR_EN_Y		0x0040
838f6fadc3eSShreyansh Jain #define QM_CGR_WE_WR_EN_R		0x0020
839f6fadc3eSShreyansh Jain #define QM_CGR_WE_CSCN_EN		0x0010
840f6fadc3eSShreyansh Jain #define QM_CGR_WE_CSCN_TARG		0x0008
841f6fadc3eSShreyansh Jain #define QM_CGR_WE_CSTD_EN		0x0004
842f6fadc3eSShreyansh Jain #define QM_CGR_WE_CS_THRES		0x0002
843f6fadc3eSShreyansh Jain #define QM_CGR_WE_MODE			0x0001
844f6fadc3eSShreyansh Jain 
845*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_initfq {
846f6fadc3eSShreyansh Jain 	u8 __reserved1[62];
847*e7750639SAndre Muezerie } __rte_packed_end;
848*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_queryfq {
849f6fadc3eSShreyansh Jain 	u8 __reserved1[8];
850f6fadc3eSShreyansh Jain 	struct qm_fqd fqd;	/* the FQD fields are here */
851f6fadc3eSShreyansh Jain 	u8 __reserved2[30];
852*e7750639SAndre Muezerie } __rte_packed_end;
853*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_queryfq_np {
854f6fadc3eSShreyansh Jain 	u8 __reserved1;
855f6fadc3eSShreyansh Jain 	u8 state;	/* QM_MCR_NP_STATE_*** */
856f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
857f6fadc3eSShreyansh Jain 	u8 __reserved2;
858f6fadc3eSShreyansh Jain 	u32 fqd_link:24;
859f6fadc3eSShreyansh Jain 	u16 __reserved3:2;
860f6fadc3eSShreyansh Jain 	u16 odp_seq:14;
861f6fadc3eSShreyansh Jain 	u16 __reserved4:2;
862f6fadc3eSShreyansh Jain 	u16 orp_nesn:14;
863f6fadc3eSShreyansh Jain 	u16 __reserved5:1;
864f6fadc3eSShreyansh Jain 	u16 orp_ea_hseq:15;
865f6fadc3eSShreyansh Jain 	u16 __reserved6:1;
866f6fadc3eSShreyansh Jain 	u16 orp_ea_tseq:15;
867f6fadc3eSShreyansh Jain 	u8 __reserved7;
868f6fadc3eSShreyansh Jain 	u32 orp_ea_hptr:24;
869f6fadc3eSShreyansh Jain 	u8 __reserved8;
870f6fadc3eSShreyansh Jain 	u32 orp_ea_tptr:24;
871f6fadc3eSShreyansh Jain 	u8 __reserved9;
872f6fadc3eSShreyansh Jain 	u32 pfdr_hptr:24;
873f6fadc3eSShreyansh Jain 	u8 __reserved10;
874f6fadc3eSShreyansh Jain 	u32 pfdr_tptr:24;
875f6fadc3eSShreyansh Jain 	u8 __reserved11[5];
876f6fadc3eSShreyansh Jain 	u8 __reserved12:7;
877f6fadc3eSShreyansh Jain 	u8 is:1;
878f6fadc3eSShreyansh Jain 	u16 ics_surp;
879f6fadc3eSShreyansh Jain 	u32 byte_cnt;
880f6fadc3eSShreyansh Jain 	u8 __reserved13;
881f6fadc3eSShreyansh Jain 	u32 frm_cnt:24;
882f6fadc3eSShreyansh Jain 	u32 __reserved14;
883f6fadc3eSShreyansh Jain 	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
884f6fadc3eSShreyansh Jain 	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
885f6fadc3eSShreyansh Jain 	u16 __reserved15;
886f6fadc3eSShreyansh Jain 	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
887f6fadc3eSShreyansh Jain 	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
888f6fadc3eSShreyansh Jain 	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
889f6fadc3eSShreyansh Jain #else
890f6fadc3eSShreyansh Jain 	u8 __reserved2;
891f6fadc3eSShreyansh Jain 	u32 fqd_link:24;
892f6fadc3eSShreyansh Jain 
893f6fadc3eSShreyansh Jain 	u16 odp_seq:14;
894f6fadc3eSShreyansh Jain 	u16 __reserved3:2;
895f6fadc3eSShreyansh Jain 
896f6fadc3eSShreyansh Jain 	u16 orp_nesn:14;
897f6fadc3eSShreyansh Jain 	u16 __reserved4:2;
898f6fadc3eSShreyansh Jain 
899f6fadc3eSShreyansh Jain 	u16 orp_ea_hseq:15;
900f6fadc3eSShreyansh Jain 	u16 __reserved5:1;
901f6fadc3eSShreyansh Jain 
902f6fadc3eSShreyansh Jain 	u16 orp_ea_tseq:15;
903f6fadc3eSShreyansh Jain 	u16 __reserved6:1;
904f6fadc3eSShreyansh Jain 
905f6fadc3eSShreyansh Jain 	u8 __reserved7;
906f6fadc3eSShreyansh Jain 	u32 orp_ea_hptr:24;
907f6fadc3eSShreyansh Jain 
908f6fadc3eSShreyansh Jain 	u8 __reserved8;
909f6fadc3eSShreyansh Jain 	u32 orp_ea_tptr:24;
910f6fadc3eSShreyansh Jain 
911f6fadc3eSShreyansh Jain 	u8 __reserved9;
912f6fadc3eSShreyansh Jain 	u32 pfdr_hptr:24;
913f6fadc3eSShreyansh Jain 
914f6fadc3eSShreyansh Jain 	u8 __reserved10;
915f6fadc3eSShreyansh Jain 	u32 pfdr_tptr:24;
916f6fadc3eSShreyansh Jain 
917f6fadc3eSShreyansh Jain 	u8 __reserved11[5];
918f6fadc3eSShreyansh Jain 	u8 is:1;
919f6fadc3eSShreyansh Jain 	u8 __reserved12:7;
920f6fadc3eSShreyansh Jain 	u16 ics_surp;
921f6fadc3eSShreyansh Jain 	u32 byte_cnt;
922f6fadc3eSShreyansh Jain 	u8 __reserved13;
923f6fadc3eSShreyansh Jain 	u32 frm_cnt:24;
924f6fadc3eSShreyansh Jain 	u32 __reserved14;
925f6fadc3eSShreyansh Jain 	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
926f6fadc3eSShreyansh Jain 	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
927f6fadc3eSShreyansh Jain 	u16 __reserved15;
928f6fadc3eSShreyansh Jain 	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
929f6fadc3eSShreyansh Jain 	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
930f6fadc3eSShreyansh Jain 	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
931f6fadc3eSShreyansh Jain #endif
932*e7750639SAndre Muezerie } __rte_packed_end;
933f6fadc3eSShreyansh Jain 
934*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_alterfq {
935f6fadc3eSShreyansh Jain 	u8 fqs;		/* Frame Queue Status */
936f6fadc3eSShreyansh Jain 	u8 __reserved1[61];
937*e7750639SAndre Muezerie } __rte_packed_end;
938*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_initcgr {
939f6fadc3eSShreyansh Jain 	u8 __reserved1[62];
940*e7750639SAndre Muezerie } __rte_packed_end;
941*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_cgrtestwrite {
942f6fadc3eSShreyansh Jain 	u16 __reserved1;
943f6fadc3eSShreyansh Jain 	struct __qm_mc_cgr cgr; /* CGR fields */
944f6fadc3eSShreyansh Jain 	u8 __reserved2[3];
945f6fadc3eSShreyansh Jain 	u32 __reserved3:24;
946f6fadc3eSShreyansh Jain 	u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
947f6fadc3eSShreyansh Jain 	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
948f6fadc3eSShreyansh Jain 	u32 __reserved4:24;
949f6fadc3eSShreyansh Jain 	u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
950f6fadc3eSShreyansh Jain 	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
951f6fadc3eSShreyansh Jain 	u16 lgt;	/* Last Group Tick */
952f6fadc3eSShreyansh Jain 	u16 wr_prob_g;
953f6fadc3eSShreyansh Jain 	u16 wr_prob_y;
954f6fadc3eSShreyansh Jain 	u16 wr_prob_r;
955f6fadc3eSShreyansh Jain 	u8 __reserved5[8];
956*e7750639SAndre Muezerie } __rte_packed_end;
957*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_querycgr {
958f6fadc3eSShreyansh Jain 	u16 __reserved1;
959f6fadc3eSShreyansh Jain 	struct __qm_mc_cgr cgr; /* CGR fields */
960f6fadc3eSShreyansh Jain 	u8 __reserved2[3];
961f6fadc3eSShreyansh Jain 	union {
962f6fadc3eSShreyansh Jain 		struct {
963f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
964f6fadc3eSShreyansh Jain 			u32 __reserved3:24;
965f6fadc3eSShreyansh Jain 			u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
966f6fadc3eSShreyansh Jain 			u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
967f6fadc3eSShreyansh Jain #else
968f6fadc3eSShreyansh Jain 			u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
969f6fadc3eSShreyansh Jain 			u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
970f6fadc3eSShreyansh Jain 			u32 __reserved3:24;
971f6fadc3eSShreyansh Jain #endif
972f6fadc3eSShreyansh Jain 		};
973f6fadc3eSShreyansh Jain 		u64 i_bcnt;
974f6fadc3eSShreyansh Jain 	};
975f6fadc3eSShreyansh Jain 	union {
976f6fadc3eSShreyansh Jain 		struct {
977f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
978f6fadc3eSShreyansh Jain 			u32 __reserved4:24;
979f6fadc3eSShreyansh Jain 			u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
980f6fadc3eSShreyansh Jain 			u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
981f6fadc3eSShreyansh Jain #else
982f6fadc3eSShreyansh Jain 			u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
983f6fadc3eSShreyansh Jain 			u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
984f6fadc3eSShreyansh Jain 			u32 __reserved4:24;
985f6fadc3eSShreyansh Jain #endif
986f6fadc3eSShreyansh Jain 		};
987f6fadc3eSShreyansh Jain 		u64 a_bcnt;
988f6fadc3eSShreyansh Jain 	};
989f6fadc3eSShreyansh Jain 	union {
990f6fadc3eSShreyansh Jain 		u32 cscn_targ_swp[4];
991f6fadc3eSShreyansh Jain 		u8 __reserved5[16];
992f6fadc3eSShreyansh Jain 	};
993*e7750639SAndre Muezerie } __rte_packed_end;
994f6fadc3eSShreyansh Jain 
995f6fadc3eSShreyansh Jain struct __qm_mcr_querycongestion {
996f6fadc3eSShreyansh Jain 	u32 state[8];
997f6fadc3eSShreyansh Jain };
998f6fadc3eSShreyansh Jain 
999*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_querycongestion {
1000f6fadc3eSShreyansh Jain 	u8 __reserved[30];
1001f6fadc3eSShreyansh Jain 	/* Access this struct using QM_MCR_QUERYCONGESTION() */
1002f6fadc3eSShreyansh Jain 	struct __qm_mcr_querycongestion state;
1003*e7750639SAndre Muezerie } __rte_packed_end;
1004*e7750639SAndre Muezerie struct __rte_packed_begin qm_mcr_querywq {
1005f6fadc3eSShreyansh Jain 	union {
1006f6fadc3eSShreyansh Jain 		u16 channel_wq; /* ignores wq (3 lsbits) */
1007*e7750639SAndre Muezerie 		struct __rte_packed_begin {
1008f6fadc3eSShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1009f6fadc3eSShreyansh Jain 			u16 id:13; /* qm_channel */
1010f6fadc3eSShreyansh Jain 			u16 __reserved:3;
1011f6fadc3eSShreyansh Jain #else
1012f6fadc3eSShreyansh Jain 			u16 __reserved:3;
1013f6fadc3eSShreyansh Jain 			u16 id:13; /* qm_channel */
1014f6fadc3eSShreyansh Jain #endif
1015*e7750639SAndre Muezerie 		} __rte_packed_end channel;
1016f6fadc3eSShreyansh Jain 	};
1017f6fadc3eSShreyansh Jain 	u8 __reserved[28];
1018f6fadc3eSShreyansh Jain 	u32 wq_len[8];
1019*e7750639SAndre Muezerie } __rte_packed_end;
1020f6fadc3eSShreyansh Jain 
1021*e7750639SAndre Muezerie struct __rte_packed_begin qm_mc_result {
1022f6fadc3eSShreyansh Jain 	u8 verb;
1023f6fadc3eSShreyansh Jain 	u8 result;
1024f6fadc3eSShreyansh Jain 	union {
1025f6fadc3eSShreyansh Jain 		struct qm_mcr_initfq initfq;
1026f6fadc3eSShreyansh Jain 		struct qm_mcr_queryfq queryfq;
1027f6fadc3eSShreyansh Jain 		struct qm_mcr_queryfq_np queryfq_np;
1028f6fadc3eSShreyansh Jain 		struct qm_mcr_alterfq alterfq;
1029f6fadc3eSShreyansh Jain 		struct qm_mcr_initcgr initcgr;
1030f6fadc3eSShreyansh Jain 		struct qm_mcr_cgrtestwrite cgrtestwrite;
1031f6fadc3eSShreyansh Jain 		struct qm_mcr_querycgr querycgr;
1032f6fadc3eSShreyansh Jain 		struct qm_mcr_querycongestion querycongestion;
1033f6fadc3eSShreyansh Jain 		struct qm_mcr_querywq querywq;
1034f6fadc3eSShreyansh Jain 	};
1035*e7750639SAndre Muezerie } __rte_packed_end;
1036f6fadc3eSShreyansh Jain 
1037f6fadc3eSShreyansh Jain #define QM_MCR_VERB_RRID		0x80
1038f6fadc3eSShreyansh Jain #define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
1039f6fadc3eSShreyansh Jain #define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
1040f6fadc3eSShreyansh Jain #define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
1041f6fadc3eSShreyansh Jain #define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
1042f6fadc3eSShreyansh Jain #define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
1043f6fadc3eSShreyansh Jain #define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
1044f6fadc3eSShreyansh Jain #define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
1045f6fadc3eSShreyansh Jain #define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
1046f6fadc3eSShreyansh Jain #define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
1047f6fadc3eSShreyansh Jain #define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
1048f6fadc3eSShreyansh Jain #define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
1049f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_NULL		0x00
1050f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_OK		0xf0
1051f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_ERR_FQID		0xf1
1052f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_ERR_FQSTATE	0xf2
1053f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
1054f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
1055f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_PENDING		0xf8
1056f6fadc3eSShreyansh Jain #define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
1057f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_FE		0x10
1058f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_R		0x08
1059f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
1060f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_OOS		0x00
1061f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_RETIRED		0x01
1062f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_TEN_SCHED	0x02
1063f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_TRU_SCHED	0x03
1064f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_PARKED		0x04
1065f6fadc3eSShreyansh Jain #define QM_MCR_NP_STATE_ACTIVE		0x05
1066f6fadc3eSShreyansh Jain #define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
1067f6fadc3eSShreyansh Jain #define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
1068f6fadc3eSShreyansh Jain #define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
1069f6fadc3eSShreyansh Jain #define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
1070f6fadc3eSShreyansh Jain #define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
1071f6fadc3eSShreyansh Jain #define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
1072f6fadc3eSShreyansh Jain #define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
1073f6fadc3eSShreyansh Jain /* This extracts the state for congestion group 'n' from a query response.
1074f6fadc3eSShreyansh Jain  * Eg.
1075f6fadc3eSShreyansh Jain  *   u8 cgr = [...];
1076f6fadc3eSShreyansh Jain  *   struct qm_mc_result *res = [...];
1077f6fadc3eSShreyansh Jain  *   printf("congestion group %d congestion state: %d\n", cgr,
1078f6fadc3eSShreyansh Jain  *       QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
1079f6fadc3eSShreyansh Jain  */
1080f6fadc3eSShreyansh Jain #define __CGR_WORD(num)		(num >> 5)
1081f6fadc3eSShreyansh Jain #define __CGR_SHIFT(num)	(num & 0x1f)
1082f6fadc3eSShreyansh Jain #define __CGR_NUM		(sizeof(struct __qm_mcr_querycongestion) << 3)
1083f6fadc3eSShreyansh Jain static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
1084f6fadc3eSShreyansh Jain 					 u8 cgr)
1085f6fadc3eSShreyansh Jain {
1086f6fadc3eSShreyansh Jain 	return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
1087f6fadc3eSShreyansh Jain }
1088f6fadc3eSShreyansh Jain 
1089f6fadc3eSShreyansh Jain 	/* Portal and Frame Queues */
1090f6fadc3eSShreyansh Jain /* Represents a managed portal */
1091f6fadc3eSShreyansh Jain struct qman_portal;
1092f6fadc3eSShreyansh Jain 
1093f6fadc3eSShreyansh Jain /*
1094f6fadc3eSShreyansh Jain  * This object type represents QMan frame queue descriptors (FQD), it is
1095f6fadc3eSShreyansh Jain  * cacheline-aligned, and initialised by qman_create_fq(). The structure is
1096f6fadc3eSShreyansh Jain  * defined further down.
1097f6fadc3eSShreyansh Jain  */
1098f6fadc3eSShreyansh Jain struct qman_fq;
1099f6fadc3eSShreyansh Jain 
1100f6fadc3eSShreyansh Jain /*
1101f6fadc3eSShreyansh Jain  * This object type represents a QMan congestion group, it is defined further
1102f6fadc3eSShreyansh Jain  * down.
1103f6fadc3eSShreyansh Jain  */
1104f6fadc3eSShreyansh Jain struct qman_cgr;
1105f6fadc3eSShreyansh Jain 
1106f6fadc3eSShreyansh Jain /*
1107f6fadc3eSShreyansh Jain  * This enum, and the callback type that returns it, are used when handling
1108f6fadc3eSShreyansh Jain  * dequeued frames via DQRR. Note that for "null" callbacks registered with the
1109f6fadc3eSShreyansh Jain  * portal object (for handling dequeues that do not demux because context_b is
1110f6fadc3eSShreyansh Jain  * NULL), the return value *MUST* be qman_cb_dqrr_consume.
1111f6fadc3eSShreyansh Jain  */
1112f6fadc3eSShreyansh Jain enum qman_cb_dqrr_result {
1113f6fadc3eSShreyansh Jain 	/* DQRR entry can be consumed */
1114f6fadc3eSShreyansh Jain 	qman_cb_dqrr_consume,
1115f6fadc3eSShreyansh Jain 	/* Like _consume, but requests parking - FQ must be held-active */
1116f6fadc3eSShreyansh Jain 	qman_cb_dqrr_park,
1117f6fadc3eSShreyansh Jain 	/* Does not consume, for DCA mode only. This allows out-of-order
1118f6fadc3eSShreyansh Jain 	 * consumes by explicit calls to qman_dca() and/or the use of implicit
1119f6fadc3eSShreyansh Jain 	 * DCA via EQCR entries.
1120f6fadc3eSShreyansh Jain 	 */
1121f6fadc3eSShreyansh Jain 	qman_cb_dqrr_defer,
1122f6fadc3eSShreyansh Jain 	/*
1123f6fadc3eSShreyansh Jain 	 * Stop processing without consuming this ring entry. Exits the current
1124f6fadc3eSShreyansh Jain 	 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
1125f6fadc3eSShreyansh Jain 	 * an interrupt handler, the callback would typically call
1126f6fadc3eSShreyansh Jain 	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
1127f6fadc3eSShreyansh Jain 	 * otherwise the interrupt will reassert immediately.
1128f6fadc3eSShreyansh Jain 	 */
1129f6fadc3eSShreyansh Jain 	qman_cb_dqrr_stop,
1130f6fadc3eSShreyansh Jain 	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
1131f6fadc3eSShreyansh Jain 	qman_cb_dqrr_consume_stop
1132f6fadc3eSShreyansh Jain };
1133f6fadc3eSShreyansh Jain 
1134f6fadc3eSShreyansh Jain typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
1135f6fadc3eSShreyansh Jain 					struct qman_fq *fq,
1136f6fadc3eSShreyansh Jain 					const struct qm_dqrr_entry *dqrr);
1137f6fadc3eSShreyansh Jain 
1138f5648825SHemant Agrawal typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
1139f5648825SHemant Agrawal 					struct qman_portal *qm,
1140f5648825SHemant Agrawal 					struct qman_fq *fq,
1141f5648825SHemant Agrawal 					const struct qm_dqrr_entry *dqrr,
1142f5648825SHemant Agrawal 					void **bd);
1143f5648825SHemant Agrawal 
1144b9083ea5SNipun Gupta /* This callback type is used when handling buffers in dpdk pull mode */
1145b9083ea5SNipun Gupta typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
1146b9083ea5SNipun Gupta 					struct qm_dqrr_entry **dqrr,
1147b9083ea5SNipun Gupta 					void **bufs,
1148b9083ea5SNipun Gupta 					int num_bufs);
1149b9083ea5SNipun Gupta 
1150b9083ea5SNipun Gupta typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
1151b9083ea5SNipun Gupta 
1152f6fadc3eSShreyansh Jain /*
1153f6fadc3eSShreyansh Jain  * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
1154f6fadc3eSShreyansh Jain  * are always consumed after the callback returns.
1155f6fadc3eSShreyansh Jain  */
1156f6fadc3eSShreyansh Jain typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
1157f6fadc3eSShreyansh Jain 				const struct qm_mr_entry *msg);
1158f6fadc3eSShreyansh Jain 
1159f6fadc3eSShreyansh Jain /* This callback type is used when handling DCP ERNs */
1160f6fadc3eSShreyansh Jain typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
1161f6fadc3eSShreyansh Jain 				const struct qm_mr_entry *msg);
11629124e65dSGagandeep Singh 
11639124e65dSGagandeep Singh /* This callback function will be used to free mbufs of ERN */
11649124e65dSGagandeep Singh typedef uint16_t (*qman_cb_free_mbuf)(const struct qm_fd *fd);
11659124e65dSGagandeep Singh 
1166f6fadc3eSShreyansh Jain /*
1167f6fadc3eSShreyansh Jain  * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
1168f6fadc3eSShreyansh Jain  * held-active + held-suspended are just "sched". Things like "retired" will not
1169f6fadc3eSShreyansh Jain  * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
1170f6fadc3eSShreyansh Jain  * then, to indicate it's completing and to gate attempts to retry the retire
1171f6fadc3eSShreyansh Jain  * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
1172f6fadc3eSShreyansh Jain  * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
1173f6fadc3eSShreyansh Jain  * index rather than the FQ that ring entry corresponds to), so repeated park
1174f6fadc3eSShreyansh Jain  * commands are allowed (if you're silly enough to try) but won't change FQ
1175f6fadc3eSShreyansh Jain  * state, and the resulting park notifications move FQs from "sched" to
1176f6fadc3eSShreyansh Jain  * "parked".
1177f6fadc3eSShreyansh Jain  */
1178f6fadc3eSShreyansh Jain enum qman_fq_state {
1179f6fadc3eSShreyansh Jain 	qman_fq_state_oos,
1180f6fadc3eSShreyansh Jain 	qman_fq_state_parked,
1181f6fadc3eSShreyansh Jain 	qman_fq_state_sched,
1182f6fadc3eSShreyansh Jain 	qman_fq_state_retired
1183f6fadc3eSShreyansh Jain };
1184f6fadc3eSShreyansh Jain 
1185f6fadc3eSShreyansh Jain 
1186f6fadc3eSShreyansh Jain /*
1187f6fadc3eSShreyansh Jain  * Frame queue objects (struct qman_fq) are stored within memory passed to
1188f6fadc3eSShreyansh Jain  * qman_create_fq(), as this allows stashing of caller-provided demux callback
1189f6fadc3eSShreyansh Jain  * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
1190f6fadc3eSShreyansh Jain  * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
1191f6fadc3eSShreyansh Jain  * they should;
1192f6fadc3eSShreyansh Jain  *
1193f6fadc3eSShreyansh Jain  * (a) extend the qman_fq structure with their state; eg.
1194f6fadc3eSShreyansh Jain  *
1195f6fadc3eSShreyansh Jain  *     // myfq is allocated and driver_fq callbacks filled in;
1196f6fadc3eSShreyansh Jain  *     struct my_fq {
1197f6fadc3eSShreyansh Jain  *	   struct qman_fq base;
1198f6fadc3eSShreyansh Jain  *	   int an_extra_field;
1199f6fadc3eSShreyansh Jain  *	   [ ... add other fields to be associated with each FQ ...]
1200f6fadc3eSShreyansh Jain  *     } *myfq = some_my_fq_allocator();
1201f6fadc3eSShreyansh Jain  *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
1202f6fadc3eSShreyansh Jain  *
1203f6fadc3eSShreyansh Jain  *     // in a dequeue callback, access extra fields from 'fq' via a cast;
1204f6fadc3eSShreyansh Jain  *     struct my_fq *myfq = (struct my_fq *)fq;
1205f6fadc3eSShreyansh Jain  *     do_something_with(myfq->an_extra_field);
1206f6fadc3eSShreyansh Jain  *     [...]
1207f6fadc3eSShreyansh Jain  *
1208f6fadc3eSShreyansh Jain  * (b) when and if configuring the FQ for context stashing, specify how ever
1209f6fadc3eSShreyansh Jain  *     many cachelines are required to stash 'struct my_fq', to accelerate not
1210f6fadc3eSShreyansh Jain  *     only the QMan driver but the callback as well.
1211f6fadc3eSShreyansh Jain  */
1212f6fadc3eSShreyansh Jain 
1213f6fadc3eSShreyansh Jain struct qman_fq_cb {
1214f5648825SHemant Agrawal 	union { /* for dequeued frames */
1215f5648825SHemant Agrawal 		qman_dpdk_cb_dqrr dqrr_dpdk_cb;
1216b9083ea5SNipun Gupta 		qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
1217f5648825SHemant Agrawal 		qman_cb_dqrr dqrr;
1218f5648825SHemant Agrawal 	};
1219b9083ea5SNipun Gupta 	qman_dpdk_cb_prepare dqrr_prepare;
1220f6fadc3eSShreyansh Jain 	qman_cb_mr ern;		/* for s/w ERNs */
1221f6fadc3eSShreyansh Jain 	qman_cb_mr fqs;		/* frame-queue state changes*/
1222f6fadc3eSShreyansh Jain };
1223f6fadc3eSShreyansh Jain 
1224f6fadc3eSShreyansh Jain struct qman_fq {
1225f6fadc3eSShreyansh Jain 	/* Caller of qman_create_fq() provides these demux callbacks */
1226f6fadc3eSShreyansh Jain 	struct qman_fq_cb cb;
12279d32ef0fSHemant Agrawal 
12288dc88183SNipun Gupta 	u32 fqid_le;
1229a6a75240SNipun Gupta 	u32 fqid;
1230a6a75240SNipun Gupta 
1231a6a75240SNipun Gupta 	int q_fd;
12329d32ef0fSHemant Agrawal 	u16 ch_id;
1233e4abd4ffSJun Yang 	int8_t vsp_id;
12349d32ef0fSHemant Agrawal 	u8 cgr_groupid;
1235b9c94167SNipun Gupta 	u8 is_static:4;
1236b9c94167SNipun Gupta 	u8 qp_initialized:4;
12378dc88183SNipun Gupta 
1238f6fadc3eSShreyansh Jain 	/* DPDK Interface */
1239f6fadc3eSShreyansh Jain 	void *dpaa_intf;
1240d11482d9SVanshika Shukla 	/*to store tx_conf_queue corresponding to tx_queue*/
1241d11482d9SVanshika Shukla 	struct qman_fq *tx_conf_queue;
1242f6fadc3eSShreyansh Jain 
124343797e7bSSunil Kumar Kori 	struct rte_event ev;
12449d32ef0fSHemant Agrawal 	/* affined portal in case of static queue */
12459d32ef0fSHemant Agrawal 	struct qman_portal *qp;
1246e1797f4bSAkhil Goyal 	struct dpaa_bp_info *bp_array;
12479d32ef0fSHemant Agrawal 
1248f6fadc3eSShreyansh Jain 	volatile unsigned long flags;
12499d32ef0fSHemant Agrawal 
1250f6fadc3eSShreyansh Jain 	enum qman_fq_state state;
12519d32ef0fSHemant Agrawal 	spinlock_t fqlock;
12529d32ef0fSHemant Agrawal 
1253f6fadc3eSShreyansh Jain 	struct rb_node node;
1254847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
12554bbc759fSAkhil Goyal 	void **qman_fq_lookup_table;
1256847ee3bdSShreyansh Jain 	u32 key;
1257847ee3bdSShreyansh Jain #endif
12582cf9264fSHemant Agrawal 	u16 nb_desc;
12592cf9264fSHemant Agrawal 	u16 resv;
12602cf9264fSHemant Agrawal 	u64 offloads;
1261f6fadc3eSShreyansh Jain };
1262f6fadc3eSShreyansh Jain 
1263f6fadc3eSShreyansh Jain /*
1264f6fadc3eSShreyansh Jain  * This callback type is used when handling congestion group entry/exit.
1265f6fadc3eSShreyansh Jain  * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
1266f6fadc3eSShreyansh Jain  */
1267f6fadc3eSShreyansh Jain typedef void (*qman_cb_cgr)(struct qman_portal *qm,
1268f6fadc3eSShreyansh Jain 			    struct qman_cgr *cgr, int congested);
1269f6fadc3eSShreyansh Jain 
1270f6fadc3eSShreyansh Jain struct qman_cgr {
1271f6fadc3eSShreyansh Jain 	/* Set these prior to qman_create_cgr() */
1272f6fadc3eSShreyansh Jain 	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
1273f6fadc3eSShreyansh Jain 	qman_cb_cgr cb;
1274f6fadc3eSShreyansh Jain 	/* These are private to the driver */
1275f6fadc3eSShreyansh Jain 	u16 chan; /* portal channel this object is created on */
1276f6fadc3eSShreyansh Jain 	struct list_head node;
1277f6fadc3eSShreyansh Jain };
1278f6fadc3eSShreyansh Jain 
1279c47ff048SShreyansh Jain /* Flags to qman_create_fq() */
1280c47ff048SShreyansh Jain #define QMAN_FQ_FLAG_NO_ENQUEUE      0x00000001 /* can't enqueue */
1281c47ff048SShreyansh Jain #define QMAN_FQ_FLAG_NO_MODIFY       0x00000002 /* can only enqueue */
1282c47ff048SShreyansh Jain #define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
1283c47ff048SShreyansh Jain #define QMAN_FQ_FLAG_LOCKED          0x00000008 /* multi-core locking */
1284c47ff048SShreyansh Jain #define QMAN_FQ_FLAG_AS_IS           0x00000010 /* query h/w state */
1285c47ff048SShreyansh Jain #define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
1286c47ff048SShreyansh Jain 
1287c47ff048SShreyansh Jain /* Flags to qman_destroy_fq() */
1288c47ff048SShreyansh Jain #define QMAN_FQ_DESTROY_PARKED       0x00000001 /* FQ can be parked or OOS */
1289c47ff048SShreyansh Jain 
1290c47ff048SShreyansh Jain /* Flags from qman_fq_state() */
1291c47ff048SShreyansh Jain #define QMAN_FQ_STATE_CHANGING       0x80000000 /* 'state' is changing */
1292c47ff048SShreyansh Jain #define QMAN_FQ_STATE_NE             0x40000000 /* retired FQ isn't empty */
1293c47ff048SShreyansh Jain #define QMAN_FQ_STATE_ORL            0x20000000 /* retired FQ has ORL */
1294c47ff048SShreyansh Jain #define QMAN_FQ_STATE_BLOCKOOS       0xe0000000 /* if any are set, no OOS */
1295c47ff048SShreyansh Jain #define QMAN_FQ_STATE_CGR_EN         0x10000000 /* CGR enabled */
1296c47ff048SShreyansh Jain #define QMAN_FQ_STATE_VDQCR          0x08000000 /* being volatile dequeued */
1297c47ff048SShreyansh Jain 
1298c47ff048SShreyansh Jain /* Flags to qman_init_fq() */
1299c47ff048SShreyansh Jain #define QMAN_INITFQ_FLAG_SCHED       0x00000001 /* schedule rather than park */
1300c47ff048SShreyansh Jain #define QMAN_INITFQ_FLAG_LOCAL       0x00000004 /* set dest portal */
1301c47ff048SShreyansh Jain 
1302c47ff048SShreyansh Jain /* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
1303c47ff048SShreyansh Jain  * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
1304c47ff048SShreyansh Jain  * any change here should be audited in PME.)
1305c47ff048SShreyansh Jain  */
1306c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_WATCH_CGR  0x00080000 /* watch congestion state */
1307c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_DCA        0x00008000 /* perform enqueue-DCA */
1308c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_DCA_PARK   0x00004000 /* If DCA, requests park */
1309c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_DCA_PTR(p)		/* If DCA, p is DQRR entry */ \
1310c47ff048SShreyansh Jain 		(((u32)(p) << 2) & 0x00000f00)
1311c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_C_GREEN    0x00000000 /* choose one C_*** flag */
1312c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_C_YELLOW   0x00000008
1313c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_C_RED      0x00000010
1314c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
1315c47ff048SShreyansh Jain /* For the ORP-specific qman_enqueue_orp() variant;
1316c47ff048SShreyansh Jain  * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
1317c47ff048SShreyansh Jain  *   of a frame.
1318c47ff048SShreyansh Jain  */
1319c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_NLIS       0x01000000
1320c47ff048SShreyansh Jain /* - this flag performs no enqueue but fills in an ORP sequence number that
1321c47ff048SShreyansh Jain  *   would otherwise block it (eg. if a frame has been dropped).
1322c47ff048SShreyansh Jain  */
1323c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_HOLE       0x02000000
1324c47ff048SShreyansh Jain /* - this flag performs no enqueue but advances NESN to the given sequence
1325c47ff048SShreyansh Jain  *   number.
1326c47ff048SShreyansh Jain  */
1327c47ff048SShreyansh Jain #define QMAN_ENQUEUE_FLAG_NESN       0x04000000
1328c47ff048SShreyansh Jain 
1329c47ff048SShreyansh Jain /* Flags to qman_modify_cgr() */
1330c47ff048SShreyansh Jain #define QMAN_CGR_FLAG_USE_INIT       0x00000001
1331c47ff048SShreyansh Jain #define QMAN_CGR_MODE_FRAME          0x00000001
1332c47ff048SShreyansh Jain 
13334bbc759fSAkhil Goyal #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
13341e0f9b07SHemant Agrawal __rte_internal
13354bbc759fSAkhil Goyal void qman_set_fq_lookup_table(void **table);
13364bbc759fSAkhil Goyal #endif
13374bbc759fSAkhil Goyal 
1338c47ff048SShreyansh Jain /**
1339c47ff048SShreyansh Jain  * qman_get_portal_index - get portal configuration index
1340c47ff048SShreyansh Jain  */
1341c47ff048SShreyansh Jain int qman_get_portal_index(void);
1342c47ff048SShreyansh Jain 
13431e0f9b07SHemant Agrawal __rte_internal
134443797e7bSSunil Kumar Kori u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
134543797e7bSSunil Kumar Kori 			void **bufs);
134643797e7bSSunil Kumar Kori 
1347c47ff048SShreyansh Jain /**
13488e253882SHemant Agrawal  * qman_irqsource_add - add processing sources to be interrupt-driven
13498e253882SHemant Agrawal  * @bits: bitmask of QM_PIRQ_**I processing sources
13508e253882SHemant Agrawal  *
13518e253882SHemant Agrawal  * Adds processing sources that should be interrupt-driven (rather than
13528e253882SHemant Agrawal  * processed via qman_poll_***() functions). Returns zero for success, or
13538e253882SHemant Agrawal  * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
13548e253882SHemant Agrawal  */
13551e0f9b07SHemant Agrawal __rte_internal
13568e253882SHemant Agrawal int qman_irqsource_add(u32 bits);
13578e253882SHemant Agrawal 
13588e253882SHemant Agrawal /**
13597be78d02SJosh Soref  * qman_fq_portal_irqsource_add - similar to qman_irqsource_add, but it
1360a6a75240SNipun Gupta  * takes portal (fq specific) as input rather than using the thread affined
1361a6a75240SNipun Gupta  * portal.
1362a6a75240SNipun Gupta  */
13631e0f9b07SHemant Agrawal __rte_internal
1364a6a75240SNipun Gupta int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits);
1365a6a75240SNipun Gupta 
1366a6a75240SNipun Gupta /**
13678e253882SHemant Agrawal  * qman_irqsource_remove - remove processing sources from being interrupt-driven
13688e253882SHemant Agrawal  * @bits: bitmask of QM_PIRQ_**I processing sources
13698e253882SHemant Agrawal  *
13708e253882SHemant Agrawal  * Removes processing sources from being interrupt-driven, so that they will
13718e253882SHemant Agrawal  * instead be processed via qman_poll_***() functions. Returns zero for success,
13728e253882SHemant Agrawal  * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
13738e253882SHemant Agrawal  */
13741e0f9b07SHemant Agrawal __rte_internal
13758e253882SHemant Agrawal int qman_irqsource_remove(u32 bits);
13768e253882SHemant Agrawal 
13778e253882SHemant Agrawal /**
1378a6a75240SNipun Gupta  * qman_fq_portal_irqsource_remove - similar to qman_irqsource_remove, but it
1379a6a75240SNipun Gupta  * takes portal (fq specific) as input rather than using the thread affined
1380a6a75240SNipun Gupta  * portal.
1381a6a75240SNipun Gupta  */
13821e0f9b07SHemant Agrawal __rte_internal
1383a6a75240SNipun Gupta int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
1384a6a75240SNipun Gupta 
1385a6a75240SNipun Gupta /**
1386c47ff048SShreyansh Jain  * qman_affine_channel - return the channel ID of an portal
1387c47ff048SShreyansh Jain  * @cpu: the cpu whose affine portal is the subject of the query
1388c47ff048SShreyansh Jain  *
1389c47ff048SShreyansh Jain  * If @cpu is -1, the affine portal for the current CPU will be used. It is a
1390c47ff048SShreyansh Jain  * bug to call this function for any value of @cpu (other than -1) that is not a
1391c47ff048SShreyansh Jain  * member of the cpu mask.
1392c47ff048SShreyansh Jain  */
1393c47ff048SShreyansh Jain u16 qman_affine_channel(int cpu);
1394c47ff048SShreyansh Jain 
13951e0f9b07SHemant Agrawal __rte_internal
1396f5648825SHemant Agrawal unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1397f5648825SHemant Agrawal 				 void **bufs, struct qman_portal *q);
1398f5648825SHemant Agrawal 
1399c47ff048SShreyansh Jain /**
1400c47ff048SShreyansh Jain  * qman_set_vdq - Issue a volatile dequeue command
1401c47ff048SShreyansh Jain  * @fq: Frame Queue on which the volatile dequeue command is issued
1402c47ff048SShreyansh Jain  * @num: Number of Frames requested for volatile dequeue
1403f40d5a53SNipun Gupta  * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
1404c47ff048SShreyansh Jain  *
1405c47ff048SShreyansh Jain  * This function will issue a volatile dequeue command to the QMAN.
1406c47ff048SShreyansh Jain  */
14071e0f9b07SHemant Agrawal __rte_internal
1408f40d5a53SNipun Gupta int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
1409c47ff048SShreyansh Jain 
1410c47ff048SShreyansh Jain /**
1411c47ff048SShreyansh Jain  * qman_dequeue - Get the DQRR entry after volatile dequeue command
1412c47ff048SShreyansh Jain  * @fq: Frame Queue on which the volatile dequeue command is issued
1413c47ff048SShreyansh Jain  *
1414c47ff048SShreyansh Jain  * This function will return the DQRR entry after a volatile dequeue command
1415c47ff048SShreyansh Jain  * is issued. It will keep returning NULL until there is no packet available on
1416c47ff048SShreyansh Jain  * the DQRR.
1417c47ff048SShreyansh Jain  */
14181e0f9b07SHemant Agrawal __rte_internal
1419c47ff048SShreyansh Jain struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
1420c47ff048SShreyansh Jain 
1421c47ff048SShreyansh Jain /**
14227be78d02SJosh Soref  * qman_dqrr_consume - Consume the DQRR entry after volatile dequeue
1423c47ff048SShreyansh Jain  * @fq: Frame Queue on which the volatile dequeue command is issued
1424c47ff048SShreyansh Jain  * @dq: DQRR entry to consume. This is the one which is provided by the
1425c47ff048SShreyansh Jain  *    'qbman_dequeue' command.
1426c47ff048SShreyansh Jain  *
1427c47ff048SShreyansh Jain  * This will consume the DQRR enrey and make it available for next volatile
1428c47ff048SShreyansh Jain  * dequeue.
1429c47ff048SShreyansh Jain  */
14301e0f9b07SHemant Agrawal __rte_internal
1431c47ff048SShreyansh Jain void qman_dqrr_consume(struct qman_fq *fq,
1432c47ff048SShreyansh Jain 		       struct qm_dqrr_entry *dq);
1433c47ff048SShreyansh Jain 
1434c47ff048SShreyansh Jain /**
1435c47ff048SShreyansh Jain  * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
1436c47ff048SShreyansh Jain  *
1437c47ff048SShreyansh Jain  * Disables DQRR processing of the portal. This is reference-counted, so
1438c47ff048SShreyansh Jain  * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1439c47ff048SShreyansh Jain  * truly re-enable dequeuing.
1440c47ff048SShreyansh Jain  */
1441c47ff048SShreyansh Jain void qman_stop_dequeues(void);
1442c47ff048SShreyansh Jain 
1443c47ff048SShreyansh Jain /**
1444c47ff048SShreyansh Jain  * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
1445c47ff048SShreyansh Jain  *
1446c47ff048SShreyansh Jain  * Enables DQRR processing of the portal. This is reference-counted, so
1447c47ff048SShreyansh Jain  * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1448c47ff048SShreyansh Jain  * truly re-enable dequeuing.
1449c47ff048SShreyansh Jain  */
1450c47ff048SShreyansh Jain void qman_start_dequeues(void);
1451c47ff048SShreyansh Jain 
1452c47ff048SShreyansh Jain /**
1453c47ff048SShreyansh Jain  * qman_static_dequeue_add - Add pool channels to the portal SDQCR
1454c47ff048SShreyansh Jain  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1455c47ff048SShreyansh Jain  *
1456c47ff048SShreyansh Jain  * Adds a set of pool channels to the portal's static dequeue command register
1457c47ff048SShreyansh Jain  * (SDQCR). The requested pools are limited to those the portal has dequeue
1458c47ff048SShreyansh Jain  * access to.
1459c47ff048SShreyansh Jain  */
14601e0f9b07SHemant Agrawal __rte_internal
14619d32ef0fSHemant Agrawal void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
1462c47ff048SShreyansh Jain 
1463c47ff048SShreyansh Jain /**
1464c47ff048SShreyansh Jain  * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
1465c47ff048SShreyansh Jain  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1466c47ff048SShreyansh Jain  *
1467c47ff048SShreyansh Jain  * Removes a set of pool channels from the portal's static dequeue command
1468c47ff048SShreyansh Jain  * register (SDQCR). The requested pools are limited to those the portal has
1469c47ff048SShreyansh Jain  * dequeue access to.
1470c47ff048SShreyansh Jain  */
14719d32ef0fSHemant Agrawal void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
1472c47ff048SShreyansh Jain 
1473c47ff048SShreyansh Jain /**
1474c47ff048SShreyansh Jain  * qman_static_dequeue_get - return the portal's current SDQCR
1475c47ff048SShreyansh Jain  *
1476c47ff048SShreyansh Jain  * Returns the portal's current static dequeue command register (SDQCR). The
1477c47ff048SShreyansh Jain  * entire register is returned, so if only the currently-enabled pool channels
1478c47ff048SShreyansh Jain  * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
1479c47ff048SShreyansh Jain  */
14809d32ef0fSHemant Agrawal u32 qman_static_dequeue_get(struct qman_portal *qp);
1481c47ff048SShreyansh Jain 
1482c47ff048SShreyansh Jain /**
1483c47ff048SShreyansh Jain  * qman_dca - Perform a Discrete Consumption Acknowledgment
1484c47ff048SShreyansh Jain  * @dq: the DQRR entry to be consumed
1485c47ff048SShreyansh Jain  * @park_request: indicates whether the held-active @fq should be parked
1486c47ff048SShreyansh Jain  *
1487c47ff048SShreyansh Jain  * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1488c47ff048SShreyansh Jain  * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1489c47ff048SShreyansh Jain  * does not take a 'portal' argument but implies the core affine portal from the
1490c47ff048SShreyansh Jain  * cpu that is currently executing the function. For reasons of locking, this
1491c47ff048SShreyansh Jain  * function must be called from the same CPU as that which processed the DQRR
1492c47ff048SShreyansh Jain  * entry in the first place.
1493c47ff048SShreyansh Jain  */
149443797e7bSSunil Kumar Kori void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
149543797e7bSSunil Kumar Kori 
149643797e7bSSunil Kumar Kori /**
149743797e7bSSunil Kumar Kori  * qman_dca_index - Perform a Discrete Consumption Acknowledgment
149843797e7bSSunil Kumar Kori  * @index: the DQRR index to be consumed
149943797e7bSSunil Kumar Kori  * @park_request: indicates whether the held-active @fq should be parked
150043797e7bSSunil Kumar Kori  *
150143797e7bSSunil Kumar Kori  * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
150243797e7bSSunil Kumar Kori  * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
150343797e7bSSunil Kumar Kori  * does not take a 'portal' argument but implies the core affine portal from the
150443797e7bSSunil Kumar Kori  * cpu that is currently executing the function. For reasons of locking, this
150543797e7bSSunil Kumar Kori  * function must be called from the same CPU as that which processed the DQRR
150643797e7bSSunil Kumar Kori  * entry in the first place.
150743797e7bSSunil Kumar Kori  */
15081e0f9b07SHemant Agrawal __rte_internal
150943797e7bSSunil Kumar Kori void qman_dca_index(u8 index, int park_request);
1510c47ff048SShreyansh Jain 
1511c47ff048SShreyansh Jain /**
1512c47ff048SShreyansh Jain  * qman_eqcr_is_empty - Determine if portal's EQCR is empty
1513c47ff048SShreyansh Jain  *
1514c47ff048SShreyansh Jain  * For use in situations where a cpu-affine caller needs to determine when all
1515c47ff048SShreyansh Jain  * enqueues for the local portal have been processed by Qman but can't use the
1516c47ff048SShreyansh Jain  * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
1517c47ff048SShreyansh Jain  * The function forces tracking of EQCR consumption (which normally doesn't
1518c47ff048SShreyansh Jain  * happen until enqueue processing needs to find space to put new enqueue
1519c47ff048SShreyansh Jain  * commands), and returns zero if the ring still has unprocessed entries,
1520c47ff048SShreyansh Jain  * non-zero if it is empty.
1521c47ff048SShreyansh Jain  */
1522c47ff048SShreyansh Jain int qman_eqcr_is_empty(void);
1523c47ff048SShreyansh Jain 
1524c47ff048SShreyansh Jain /**
1525c47ff048SShreyansh Jain  * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
1526c47ff048SShreyansh Jain  * @handler: callback for processing DCP ERNs
1527c47ff048SShreyansh Jain  * @affine: whether this handler is specific to the locally affine portal
1528c47ff048SShreyansh Jain  *
1529c47ff048SShreyansh Jain  * If a hardware block's interface to Qman (ie. its direct-connect portal, or
1530c47ff048SShreyansh Jain  * DCP) is configured not to receive enqueue rejections, then any enqueues
1531c47ff048SShreyansh Jain  * through that DCP that are rejected will be sent to a given software portal.
1532c47ff048SShreyansh Jain  * If @affine is non-zero, then this handler will only be used for DCP ERNs
1533c47ff048SShreyansh Jain  * received on the portal affine to the current CPU. If multiple CPUs share a
1534c47ff048SShreyansh Jain  * portal and they all call this function, they will be setting the handler for
1535c47ff048SShreyansh Jain  * the same portal! If @affine is zero, then this handler will be global to all
1536c47ff048SShreyansh Jain  * portals handled by this instance of the driver. Only those portals that do
1537c47ff048SShreyansh Jain  * not have their own affine handler will use the global handler.
1538c47ff048SShreyansh Jain  */
1539c47ff048SShreyansh Jain void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
1540c47ff048SShreyansh Jain 
1541c47ff048SShreyansh Jain 	/* FQ management */
1542c47ff048SShreyansh Jain 	/* ------------- */
1543c47ff048SShreyansh Jain /**
1544c47ff048SShreyansh Jain  * qman_create_fq - Allocates a FQ
1545c47ff048SShreyansh Jain  * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
1546c47ff048SShreyansh Jain  * @flags: bit-mask of QMAN_FQ_FLAG_*** options
1547c47ff048SShreyansh Jain  * @fq: memory for storing the 'fq', with callbacks filled in
1548c47ff048SShreyansh Jain  *
1549c47ff048SShreyansh Jain  * Creates a frame queue object for the given @fqid, unless the
1550c47ff048SShreyansh Jain  * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
1551c47ff048SShreyansh Jain  * dynamically allocated (or the function fails if none are available). Once
1552c47ff048SShreyansh Jain  * created, the caller should not touch the memory at 'fq' except as extended to
1553c47ff048SShreyansh Jain  * adjacent memory for user-defined fields (see the definition of "struct
1554c47ff048SShreyansh Jain  * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
1555c47ff048SShreyansh Jain  * pre-existing frame-queues that aren't to be otherwise interfered with, it
1556c47ff048SShreyansh Jain  * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
1557c47ff048SShreyansh Jain  * causes the driver to honour any contextB modifications requested in the
1558c47ff048SShreyansh Jain  * qm_init_fq() API, as this indicates the frame queue will be consumed by a
1559c47ff048SShreyansh Jain  * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
1560c47ff048SShreyansh Jain  * software portals, the contextB field is controlled by the driver and can't be
1561c47ff048SShreyansh Jain  * modified by the caller. If the AS_IS flag is specified, management commands
1562c47ff048SShreyansh Jain  * will be used on portal @p to query state for frame queue @fqid and construct
1563c47ff048SShreyansh Jain  * a frame queue object based on that, rather than assuming/requiring that it be
1564c47ff048SShreyansh Jain  * Out of Service.
1565c47ff048SShreyansh Jain  */
15661e0f9b07SHemant Agrawal __rte_internal
1567c47ff048SShreyansh Jain int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
1568c47ff048SShreyansh Jain 
1569c47ff048SShreyansh Jain /**
1570c47ff048SShreyansh Jain  * qman_destroy_fq - Deallocates a FQ
1571c47ff048SShreyansh Jain  * @fq: the frame queue object to release
1572c47ff048SShreyansh Jain  * @flags: bit-mask of QMAN_FQ_FREE_*** options
1573c47ff048SShreyansh Jain  *
1574c47ff048SShreyansh Jain  * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
1575c47ff048SShreyansh Jain  * not deallocated but the caller regains ownership, to do with as desired. The
1576c47ff048SShreyansh Jain  * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
1577c47ff048SShreyansh Jain  * is specified, in which case it may also be in the 'parked' state.
1578c47ff048SShreyansh Jain  */
1579c47ff048SShreyansh Jain void qman_destroy_fq(struct qman_fq *fq, u32 flags);
1580c47ff048SShreyansh Jain 
1581c47ff048SShreyansh Jain /**
1582c47ff048SShreyansh Jain  * qman_fq_fqid - Queries the frame queue ID of a FQ object
1583c47ff048SShreyansh Jain  * @fq: the frame queue object to query
1584c47ff048SShreyansh Jain  */
15851e0f9b07SHemant Agrawal __rte_internal
1586c47ff048SShreyansh Jain u32 qman_fq_fqid(struct qman_fq *fq);
1587c47ff048SShreyansh Jain 
1588c47ff048SShreyansh Jain /**
1589c47ff048SShreyansh Jain  * qman_fq_state - Queries the state of a FQ object
1590c47ff048SShreyansh Jain  * @fq: the frame queue object to query
1591c47ff048SShreyansh Jain  * @state: pointer to state enum to return the FQ scheduling state
1592c47ff048SShreyansh Jain  * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
1593c47ff048SShreyansh Jain  *
1594c47ff048SShreyansh Jain  * Queries the state of the FQ object, without performing any h/w commands.
1595c47ff048SShreyansh Jain  * This captures the state, as seen by the driver, at the time the function
1596c47ff048SShreyansh Jain  * executes.
1597c47ff048SShreyansh Jain  */
15981e0f9b07SHemant Agrawal __rte_internal
1599c47ff048SShreyansh Jain void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
1600c47ff048SShreyansh Jain 
1601c47ff048SShreyansh Jain /**
1602c47ff048SShreyansh Jain  * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
1603c47ff048SShreyansh Jain  * @fq: the frame queue object to modify, must be 'parked' or new.
1604c47ff048SShreyansh Jain  * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
1605c47ff048SShreyansh Jain  * @opts: the FQ-modification settings, as defined in the low-level API
1606c47ff048SShreyansh Jain  *
1607c47ff048SShreyansh Jain  * The @opts parameter comes from the low-level portal API. Select
1608c47ff048SShreyansh Jain  * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
1609c47ff048SShreyansh Jain  * rather than parked. NB, @opts can be NULL.
1610c47ff048SShreyansh Jain  *
1611c47ff048SShreyansh Jain  * Note that some fields and options within @opts may be ignored or overwritten
1612c47ff048SShreyansh Jain  * by the driver;
1613c47ff048SShreyansh Jain  * 1. the 'count' and 'fqid' fields are always ignored (this operation only
1614c47ff048SShreyansh Jain  * affects one frame queue: @fq).
1615c47ff048SShreyansh Jain  * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
1616c47ff048SShreyansh Jain  * 'fqd' structure's 'context_b' field are sometimes overwritten;
1617c47ff048SShreyansh Jain  *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
1618c47ff048SShreyansh Jain  *     initialised to a value used by the driver for demux.
1619c47ff048SShreyansh Jain  *   - if context_b is initialised for demux, so is context_a in case stashing
1620c47ff048SShreyansh Jain  *     is requested (see item 4).
1621c47ff048SShreyansh Jain  * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
1622c47ff048SShreyansh Jain  * objects.)
1623c47ff048SShreyansh Jain  * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
1624c47ff048SShreyansh Jain  * 'dest::channel' field will be overwritten to match the portal used to issue
1625c47ff048SShreyansh Jain  * the command. If the WE_DESTWQ write-enable bit had already been set by the
1626c47ff048SShreyansh Jain  * caller, the channel workqueue will be left as-is, otherwise the write-enable
1627c47ff048SShreyansh Jain  * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
1628c47ff048SShreyansh Jain  * isn't set, the destination channel/workqueue fields and the write-enable bit
1629c47ff048SShreyansh Jain  * are left as-is.
1630c47ff048SShreyansh Jain  * 4. if the driver overwrites context_a/b for demux, then if
1631c47ff048SShreyansh Jain  * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
1632c47ff048SShreyansh Jain  * context_a.address fields and will leave the stashing fields provided by the
1633c47ff048SShreyansh Jain  * user alone, otherwise it will zero out the context_a.stashing fields.
1634c47ff048SShreyansh Jain  */
16351e0f9b07SHemant Agrawal __rte_internal
1636c47ff048SShreyansh Jain int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1637c47ff048SShreyansh Jain 
1638c47ff048SShreyansh Jain /**
1639c47ff048SShreyansh Jain  * qman_schedule_fq - Schedules a FQ
1640c47ff048SShreyansh Jain  * @fq: the frame queue object to schedule, must be 'parked'
1641c47ff048SShreyansh Jain  *
1642c47ff048SShreyansh Jain  * Schedules the frame queue, which must be Parked, which takes it to
1643c47ff048SShreyansh Jain  * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
1644c47ff048SShreyansh Jain  */
1645c47ff048SShreyansh Jain int qman_schedule_fq(struct qman_fq *fq);
1646c47ff048SShreyansh Jain 
1647c47ff048SShreyansh Jain /**
1648c47ff048SShreyansh Jain  * qman_retire_fq - Retires a FQ
1649c47ff048SShreyansh Jain  * @fq: the frame queue object to retire
1650c47ff048SShreyansh Jain  * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
1651c47ff048SShreyansh Jain  *
1652c47ff048SShreyansh Jain  * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
1653c47ff048SShreyansh Jain  * the retirement was started asynchronously, otherwise it returns negative for
1654c47ff048SShreyansh Jain  * failure. When this function returns zero, @flags is set to indicate whether
1655c47ff048SShreyansh Jain  * the retired FQ is empty and/or whether it has any ORL fragments (to show up
1656c47ff048SShreyansh Jain  * as ERNs). Otherwise the corresponding flags will be known when a subsequent
1657c47ff048SShreyansh Jain  * FQRN message shows up on the portal's message ring.
1658c47ff048SShreyansh Jain  *
1659c47ff048SShreyansh Jain  * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
1660c47ff048SShreyansh Jain  * Active state), the completion will be via the message ring as a FQRN - but
1661c47ff048SShreyansh Jain  * the corresponding callback may occur before this function returns!! Ie. the
1662c47ff048SShreyansh Jain  * caller should be prepared to accept the callback as the function is called,
1663c47ff048SShreyansh Jain  * not only once it has returned.
1664c47ff048SShreyansh Jain  */
16651e0f9b07SHemant Agrawal __rte_internal
1666c47ff048SShreyansh Jain int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1667c47ff048SShreyansh Jain 
1668c47ff048SShreyansh Jain /**
1669c47ff048SShreyansh Jain  * qman_oos_fq - Puts a FQ "out of service"
1670c47ff048SShreyansh Jain  * @fq: the frame queue object to be put out-of-service, must be 'retired'
1671c47ff048SShreyansh Jain  *
1672c47ff048SShreyansh Jain  * The frame queue must be retired and empty, and if any order restoration list
1673c47ff048SShreyansh Jain  * was released as ERNs at the time of retirement, they must all be consumed.
1674c47ff048SShreyansh Jain  */
16751e0f9b07SHemant Agrawal __rte_internal
1676c47ff048SShreyansh Jain int qman_oos_fq(struct qman_fq *fq);
1677c47ff048SShreyansh Jain 
1678c47ff048SShreyansh Jain /**
1679c47ff048SShreyansh Jain  * qman_fq_flow_control - Set the XON/XOFF state of a FQ
1680c47ff048SShreyansh Jain  * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
1681c47ff048SShreyansh Jain  * or 'retired' or 'parked' state
1682c47ff048SShreyansh Jain  * @xon: boolean to set fq in XON or XOFF state
1683c47ff048SShreyansh Jain  *
1684c47ff048SShreyansh Jain  * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
1685c47ff048SShreyansh Jain  * otherwise the IFSI interrupt will be asserted.
1686c47ff048SShreyansh Jain  */
1687c47ff048SShreyansh Jain int qman_fq_flow_control(struct qman_fq *fq, int xon);
1688c47ff048SShreyansh Jain 
1689c47ff048SShreyansh Jain /**
1690c47ff048SShreyansh Jain  * qman_query_fq - Queries FQD fields (via h/w query command)
1691c47ff048SShreyansh Jain  * @fq: the frame queue object to be queried
1692c47ff048SShreyansh Jain  * @fqd: storage for the queried FQD fields
1693c47ff048SShreyansh Jain  */
1694c47ff048SShreyansh Jain int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
1695c47ff048SShreyansh Jain 
1696c47ff048SShreyansh Jain /**
1697c47ff048SShreyansh Jain  * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
1698c47ff048SShreyansh Jain  * if packets are in the frame queue. If there are no packets on frame
1699c47ff048SShreyansh Jain  * queue '0' is returned.
1700c47ff048SShreyansh Jain  * @fq: the frame queue object to be queried
1701c47ff048SShreyansh Jain  */
1702c47ff048SShreyansh Jain int qman_query_fq_has_pkts(struct qman_fq *fq);
1703c47ff048SShreyansh Jain 
1704c47ff048SShreyansh Jain /**
1705c47ff048SShreyansh Jain  * qman_query_fq_np - Queries non-programmable FQD fields
1706c47ff048SShreyansh Jain  * @fq: the frame queue object to be queried
1707c47ff048SShreyansh Jain  * @np: storage for the queried FQD fields
1708c47ff048SShreyansh Jain  */
17091e0f9b07SHemant Agrawal __rte_internal
1710c47ff048SShreyansh Jain int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1711c47ff048SShreyansh Jain 
1712c47ff048SShreyansh Jain /**
171306268e2cSHemant Agrawal  * qman_query_fq_frmcnt - Queries fq frame count
171406268e2cSHemant Agrawal  * @fq: the frame queue object to be queried
171506268e2cSHemant Agrawal  * @frm_cnt: number of frames in the queue
171606268e2cSHemant Agrawal  */
17171e0f9b07SHemant Agrawal __rte_internal
171806268e2cSHemant Agrawal int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
171906268e2cSHemant Agrawal 
172006268e2cSHemant Agrawal /**
1721c47ff048SShreyansh Jain  * qman_query_wq - Queries work queue lengths
1722c47ff048SShreyansh Jain  * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
1723c47ff048SShreyansh Jain  *		to this software portal. Otherwise, query length of WQs in a
1724c47ff048SShreyansh Jain  *		channel  specified in wq.
1725c47ff048SShreyansh Jain  * @wq: storage for the queried WQs lengths. Also specified the channel to
1726c47ff048SShreyansh Jain  *	to query if query_dedicated is zero.
1727c47ff048SShreyansh Jain  */
1728c47ff048SShreyansh Jain int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
1729c47ff048SShreyansh Jain 
1730c47ff048SShreyansh Jain /**
1731c47ff048SShreyansh Jain  * qman_volatile_dequeue - Issue a volatile dequeue command
1732c47ff048SShreyansh Jain  * @fq: the frame queue object to dequeue from
1733c47ff048SShreyansh Jain  * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1734c47ff048SShreyansh Jain  * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1735c47ff048SShreyansh Jain  *
1736c47ff048SShreyansh Jain  * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1737c47ff048SShreyansh Jain  * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1738c47ff048SShreyansh Jain  * the VDQCR is already in use, otherwise returns non-zero for failure. If
1739c47ff048SShreyansh Jain  * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1740c47ff048SShreyansh Jain  * the VDQCR command has finished executing (ie. once the callback for the last
1741c47ff048SShreyansh Jain  * DQRR entry resulting from the VDQCR command has been called). If not using
1742c47ff048SShreyansh Jain  * the FINISH flag, completion can be determined either by detecting the
1743c47ff048SShreyansh Jain  * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1744c47ff048SShreyansh Jain  * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
1745c47ff048SShreyansh Jain  * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
1746c47ff048SShreyansh Jain  * "flags" retrieved from qman_fq_state().
1747c47ff048SShreyansh Jain  */
17481e0f9b07SHemant Agrawal __rte_internal
1749c47ff048SShreyansh Jain int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1750c47ff048SShreyansh Jain 
1751c47ff048SShreyansh Jain /**
1752c47ff048SShreyansh Jain  * qman_enqueue - Enqueue a frame to a frame queue
1753c47ff048SShreyansh Jain  * @fq: the frame queue object to enqueue to
1754c47ff048SShreyansh Jain  * @fd: a descriptor of the frame to be enqueued
1755c47ff048SShreyansh Jain  * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1756c47ff048SShreyansh Jain  *
1757c47ff048SShreyansh Jain  * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
1758c47ff048SShreyansh Jain  * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
1759c47ff048SShreyansh Jain  * field is ignored. The return value is non-zero on error, such as ring full
1760c47ff048SShreyansh Jain  * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
1761c47ff048SShreyansh Jain  * specified), etc. If the ring is full and FLAG_WAIT is specified, this
1762c47ff048SShreyansh Jain  * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
1763c47ff048SShreyansh Jain  * interrupt will assert when Qman consumes the EQCR entry (subject to "status
1764c47ff048SShreyansh Jain  * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
1765c47ff048SShreyansh Jain  * perform an implied "discrete consumption acknowledgment" on the dequeue
1766c47ff048SShreyansh Jain  * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
1767c47ff048SShreyansh Jain  * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
1768c47ff048SShreyansh Jain  * this implicit DCA can delay the release of a "held active" frame queue
1769c47ff048SShreyansh Jain  * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
1770c47ff048SShreyansh Jain  * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
1771c47ff048SShreyansh Jain  * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
1772c47ff048SShreyansh Jain  * acknowledgment should "park request" the "held active" frame queue. Ie.
1773c47ff048SShreyansh Jain  * when the portal eventually releases that frame queue, it will be left in the
1774c47ff048SShreyansh Jain  * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
1775c47ff048SShreyansh Jain  * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
1776c47ff048SShreyansh Jain  * is requested, and the FQ is a member of a congestion group, then this
1777c47ff048SShreyansh Jain  * function returns -EAGAIN if the congestion group is currently congested.
1778c47ff048SShreyansh Jain  * Note, this does not eliminate ERNs, as the async interface means we can be
1779c47ff048SShreyansh Jain  * sending enqueue commands to an un-congested FQ that becomes congested before
1780c47ff048SShreyansh Jain  * the enqueue commands are processed, but it does minimise needless thrashing
1781c47ff048SShreyansh Jain  * of an already busy hardware resource by throttling many of the to-be-dropped
1782c47ff048SShreyansh Jain  * enqueues "at the source".
1783c47ff048SShreyansh Jain  */
17841e0f9b07SHemant Agrawal __rte_internal
1785c47ff048SShreyansh Jain int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
1786c47ff048SShreyansh Jain 
17871e0f9b07SHemant Agrawal __rte_internal
178843797e7bSSunil Kumar Kori int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
1789c47ff048SShreyansh Jain 		       int frames_to_send);
1790c47ff048SShreyansh Jain 
1791c6c1ac5cSAkhil Goyal /**
17929124e65dSGagandeep Singh  * qman_ern_poll_free - Polling on MR and calling a callback function to free
17939124e65dSGagandeep Singh  * mbufs when SW ERNs received.
17949124e65dSGagandeep Singh  */
17959124e65dSGagandeep Singh __rte_internal
17969124e65dSGagandeep Singh void qman_ern_poll_free(void);
17979124e65dSGagandeep Singh 
17989124e65dSGagandeep Singh /**
17999124e65dSGagandeep Singh  * qman_ern_register_cb - Register a callback function to free buffers.
18009124e65dSGagandeep Singh  */
18019124e65dSGagandeep Singh __rte_internal
18029124e65dSGagandeep Singh void qman_ern_register_cb(qman_cb_free_mbuf cb);
18039124e65dSGagandeep Singh 
18049124e65dSGagandeep Singh /**
1805c6c1ac5cSAkhil Goyal  * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
1806c6c1ac5cSAkhil Goyal  * queues.
1807c6c1ac5cSAkhil Goyal  * @fq[]: Array of frame queue objects to enqueue to
1808c6c1ac5cSAkhil Goyal  * @fd: pointer to first descriptor of frame to be enqueued
1809c6c1ac5cSAkhil Goyal  * @frames_to_send: number of frames to be sent.
1810c6c1ac5cSAkhil Goyal  *
1811c6c1ac5cSAkhil Goyal  * This API is similar to qman_enqueue_multi(), but it takes fd which needs
1812c6c1ac5cSAkhil Goyal  * to be processed by different frame queues.
1813c6c1ac5cSAkhil Goyal  */
18141e0f9b07SHemant Agrawal __rte_internal
1815c6c1ac5cSAkhil Goyal int
1816c6c1ac5cSAkhil Goyal qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
1817fe3688baSAkhil Goyal 		      u32 *flags, int frames_to_send);
1818c6c1ac5cSAkhil Goyal 
1819c47ff048SShreyansh Jain typedef int (*qman_cb_precommit) (void *arg);
1820c47ff048SShreyansh Jain 
1821c47ff048SShreyansh Jain /**
1822c47ff048SShreyansh Jain  * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
1823c47ff048SShreyansh Jain  * @fq: the frame queue object to enqueue to
1824c47ff048SShreyansh Jain  * @fd: a descriptor of the frame to be enqueued
1825c47ff048SShreyansh Jain  * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1826c47ff048SShreyansh Jain  * @orp: the frame queue object used as an order restoration point.
1827c47ff048SShreyansh Jain  * @orp_seqnum: the sequence number of this frame in the order restoration path
1828c47ff048SShreyansh Jain  *
1829c47ff048SShreyansh Jain  * Similar to qman_enqueue(), but with the addition of an Order Restoration
1830c47ff048SShreyansh Jain  * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
1831c47ff048SShreyansh Jain  * enqueue operation to employ order restoration. Each frame queue object acts
1832c47ff048SShreyansh Jain  * as an Order Definition Point (ODP) by providing each frame dequeued from it
1833c47ff048SShreyansh Jain  * with an incrementing sequence number, this value is generally ignored unless
1834c47ff048SShreyansh Jain  * that sequence of dequeued frames will need order restoration later. Each
1835c47ff048SShreyansh Jain  * frame queue object also encapsulates an Order Restoration Point (ORP), which
1836c47ff048SShreyansh Jain  * is a re-assembly context for re-ordering frames relative to their sequence
1837c47ff048SShreyansh Jain  * numbers as they are enqueued. The ORP does not have to be within the frame
1838c47ff048SShreyansh Jain  * queue that receives the enqueued frame, in fact it is usually the frame
1839c47ff048SShreyansh Jain  * queue from which the frames were originally dequeued. For the purposes of
1840c47ff048SShreyansh Jain  * order restoration, multiple frames (or "fragments") can be enqueued for a
1841c47ff048SShreyansh Jain  * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
1842c47ff048SShreyansh Jain  * enqueues except the final fragment of a given sequence number. Ordering
1843c47ff048SShreyansh Jain  * between sequence numbers is guaranteed, even if fragments of different
1844c47ff048SShreyansh Jain  * sequence numbers are interlaced with one another. Fragments of the same
1845c47ff048SShreyansh Jain  * sequence number will retain the order in which they are enqueued. If no
1846c47ff048SShreyansh Jain  * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
1847c47ff048SShreyansh Jain  * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
1848c47ff048SShreyansh Jain  * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
1849c47ff048SShreyansh Jain  * sequence number should become the ORP's "Next Expected Sequence Number".
1850c47ff048SShreyansh Jain  *
1851c47ff048SShreyansh Jain  * Side note: a frame queue object can be used purely as an ORP, without
1852c47ff048SShreyansh Jain  * carrying any frames at all. Care should be taken not to deallocate a frame
1853c47ff048SShreyansh Jain  * queue object that is being actively used as an ORP, as a future allocation
1854c47ff048SShreyansh Jain  * of the frame queue object may start using the internal ORP before the
1855c47ff048SShreyansh Jain  * previous use has finished.
1856c47ff048SShreyansh Jain  */
1857c47ff048SShreyansh Jain int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
1858c47ff048SShreyansh Jain 		     struct qman_fq *orp, u16 orp_seqnum);
1859c47ff048SShreyansh Jain 
1860c47ff048SShreyansh Jain /**
1861c47ff048SShreyansh Jain  * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
1862c47ff048SShreyansh Jain  * @result: is set by the API to the base FQID of the allocated range
1863c47ff048SShreyansh Jain  * @count: the number of FQIDs required
1864c47ff048SShreyansh Jain  * @align: required alignment of the allocated range
1865c47ff048SShreyansh Jain  * @partial: non-zero if the API can return fewer than @count FQIDs
1866c47ff048SShreyansh Jain  *
1867c47ff048SShreyansh Jain  * Returns the number of frame queues allocated, or a negative error code. If
1868c47ff048SShreyansh Jain  * @partial is non zero, the allocation request may return a smaller range of
1869c47ff048SShreyansh Jain  * FQs than requested (though alignment will be as requested). If @partial is
1870c47ff048SShreyansh Jain  * zero, the return value will either be 'count' or negative.
1871c47ff048SShreyansh Jain  */
18724defbc8cSSachin Saxena __rte_internal
1873c47ff048SShreyansh Jain int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
1874c47ff048SShreyansh Jain static inline int qman_alloc_fqid(u32 *result)
1875c47ff048SShreyansh Jain {
1876c47ff048SShreyansh Jain 	int ret = qman_alloc_fqid_range(result, 1, 0, 0);
1877c47ff048SShreyansh Jain 
1878c47ff048SShreyansh Jain 	return (ret > 0) ? 0 : ret;
1879c47ff048SShreyansh Jain }
1880c47ff048SShreyansh Jain 
1881c47ff048SShreyansh Jain /**
1882c47ff048SShreyansh Jain  * qman_release_fqid_range - Release the specified range of frame queue IDs
1883c47ff048SShreyansh Jain  * @fqid: the base FQID of the range to deallocate
1884c47ff048SShreyansh Jain  * @count: the number of FQIDs in the range
1885c47ff048SShreyansh Jain  *
1886c47ff048SShreyansh Jain  * This function can also be used to seed the allocator with ranges of FQIDs
1887c47ff048SShreyansh Jain  * that it can subsequently allocate from.
1888c47ff048SShreyansh Jain  */
1889c47ff048SShreyansh Jain void qman_release_fqid_range(u32 fqid, unsigned int count);
1890c47ff048SShreyansh Jain static inline void qman_release_fqid(u32 fqid)
1891c47ff048SShreyansh Jain {
1892c47ff048SShreyansh Jain 	qman_release_fqid_range(fqid, 1);
1893c47ff048SShreyansh Jain }
1894c47ff048SShreyansh Jain 
1895c47ff048SShreyansh Jain void qman_seed_fqid_range(u32 fqid, unsigned int count);
1896c47ff048SShreyansh Jain 
1897c47ff048SShreyansh Jain int qman_shutdown_fq(u32 fqid);
1898c47ff048SShreyansh Jain 
1899c47ff048SShreyansh Jain /**
1900c47ff048SShreyansh Jain  * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
1901c47ff048SShreyansh Jain  * @fqid: the base FQID of the range to deallocate
1902c47ff048SShreyansh Jain  * @count: the number of FQIDs in the range
1903c47ff048SShreyansh Jain  */
19041e0f9b07SHemant Agrawal __rte_internal
1905c47ff048SShreyansh Jain int qman_reserve_fqid_range(u32 fqid, unsigned int count);
1906c47ff048SShreyansh Jain static inline int qman_reserve_fqid(u32 fqid)
1907c47ff048SShreyansh Jain {
1908c47ff048SShreyansh Jain 	return qman_reserve_fqid_range(fqid, 1);
1909c47ff048SShreyansh Jain }
1910c47ff048SShreyansh Jain 
1911c47ff048SShreyansh Jain /* Pool-channel management */
1912c47ff048SShreyansh Jain /**
1913c47ff048SShreyansh Jain  * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
1914c47ff048SShreyansh Jain  * @result: is set by the API to the base pool-channel ID of the allocated range
1915c47ff048SShreyansh Jain  * @count: the number of pool-channel IDs required
1916c47ff048SShreyansh Jain  * @align: required alignment of the allocated range
1917c47ff048SShreyansh Jain  * @partial: non-zero if the API can return fewer than @count
1918c47ff048SShreyansh Jain  *
1919c47ff048SShreyansh Jain  * Returns the number of pool-channel IDs allocated, or a negative error code.
1920c47ff048SShreyansh Jain  * If @partial is non zero, the allocation request may return a smaller range of
1921c47ff048SShreyansh Jain  * than requested (though alignment will be as requested). If @partial is zero,
1922c47ff048SShreyansh Jain  * the return value will either be 'count' or negative.
1923c47ff048SShreyansh Jain  */
19241e0f9b07SHemant Agrawal __rte_internal
1925c47ff048SShreyansh Jain int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
1926c47ff048SShreyansh Jain static inline int qman_alloc_pool(u32 *result)
1927c47ff048SShreyansh Jain {
1928c47ff048SShreyansh Jain 	int ret = qman_alloc_pool_range(result, 1, 0, 0);
1929c47ff048SShreyansh Jain 
1930c47ff048SShreyansh Jain 	return (ret > 0) ? 0 : ret;
1931c47ff048SShreyansh Jain }
1932c47ff048SShreyansh Jain 
1933c47ff048SShreyansh Jain /**
1934c47ff048SShreyansh Jain  * qman_release_pool_range - Release the specified range of pool-channel IDs
1935c47ff048SShreyansh Jain  * @id: the base pool-channel ID of the range to deallocate
1936c47ff048SShreyansh Jain  * @count: the number of pool-channel IDs in the range
1937c47ff048SShreyansh Jain  */
1938c47ff048SShreyansh Jain void qman_release_pool_range(u32 id, unsigned int count);
1939c47ff048SShreyansh Jain static inline void qman_release_pool(u32 id)
1940c47ff048SShreyansh Jain {
1941c47ff048SShreyansh Jain 	qman_release_pool_range(id, 1);
1942c47ff048SShreyansh Jain }
1943c47ff048SShreyansh Jain 
1944c47ff048SShreyansh Jain /**
1945c47ff048SShreyansh Jain  * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
1946c47ff048SShreyansh Jain  * @id: the base pool-channel ID of the range to reserve
1947c47ff048SShreyansh Jain  * @count: the number of pool-channel IDs in the range
1948c47ff048SShreyansh Jain  */
1949c47ff048SShreyansh Jain int qman_reserve_pool_range(u32 id, unsigned int count);
1950c47ff048SShreyansh Jain static inline int qman_reserve_pool(u32 id)
1951c47ff048SShreyansh Jain {
1952c47ff048SShreyansh Jain 	return qman_reserve_pool_range(id, 1);
1953c47ff048SShreyansh Jain }
1954c47ff048SShreyansh Jain 
1955c47ff048SShreyansh Jain void qman_seed_pool_range(u32 id, unsigned int count);
1956c47ff048SShreyansh Jain 
1957c47ff048SShreyansh Jain 	/* CGR management */
1958c47ff048SShreyansh Jain 	/* -------------- */
1959c47ff048SShreyansh Jain /**
1960c47ff048SShreyansh Jain  * qman_create_cgr - Register a congestion group object
1961c47ff048SShreyansh Jain  * @cgr: the 'cgr' object, with fields filled in
1962c47ff048SShreyansh Jain  * @flags: QMAN_CGR_FLAG_* values
1963c47ff048SShreyansh Jain  * @opts: optional state of CGR settings
1964c47ff048SShreyansh Jain  *
1965c47ff048SShreyansh Jain  * Registers this object to receiving congestion entry/exit callbacks on the
1966c47ff048SShreyansh Jain  * portal affine to the cpu portal on which this API is executed. If opts is
1967c47ff048SShreyansh Jain  * NULL then only the callback (cgr->cb) function is registered. If @flags
1968c47ff048SShreyansh Jain  * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1969c47ff048SShreyansh Jain  * any unspecified parameters) will be used rather than a modify hw hardware
1970c47ff048SShreyansh Jain  * (which only modifies the specified parameters).
1971c47ff048SShreyansh Jain  */
19721e0f9b07SHemant Agrawal __rte_internal
1973c47ff048SShreyansh Jain int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1974c47ff048SShreyansh Jain 		    struct qm_mcc_initcgr *opts);
1975c47ff048SShreyansh Jain 
1976c47ff048SShreyansh Jain /**
1977c47ff048SShreyansh Jain  * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
1978c47ff048SShreyansh Jain  * @cgr: the 'cgr' object, with fields filled in
1979c47ff048SShreyansh Jain  * @flags: QMAN_CGR_FLAG_* values
1980c47ff048SShreyansh Jain  * @dcp_portal: the DCP portal to which the cgr object is registered.
1981c47ff048SShreyansh Jain  * @opts: optional state of CGR settings
1982c47ff048SShreyansh Jain  *
1983c47ff048SShreyansh Jain  */
1984c47ff048SShreyansh Jain int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
1985c47ff048SShreyansh Jain 			   struct qm_mcc_initcgr *opts);
1986c47ff048SShreyansh Jain 
1987c47ff048SShreyansh Jain /**
1988c47ff048SShreyansh Jain  * qman_delete_cgr - Deregisters a congestion group object
1989c47ff048SShreyansh Jain  * @cgr: the 'cgr' object to deregister
1990c47ff048SShreyansh Jain  *
1991c47ff048SShreyansh Jain  * "Unplugs" this CGR object from the portal affine to the cpu on which this API
19927be78d02SJosh Soref  * is executed. This must be executed on the same affine portal on which it was
1993c47ff048SShreyansh Jain  * created.
1994c47ff048SShreyansh Jain  */
19951e0f9b07SHemant Agrawal __rte_internal
1996c47ff048SShreyansh Jain int qman_delete_cgr(struct qman_cgr *cgr);
1997c47ff048SShreyansh Jain 
1998c47ff048SShreyansh Jain /**
1999c47ff048SShreyansh Jain  * qman_modify_cgr - Modify CGR fields
2000c47ff048SShreyansh Jain  * @cgr: the 'cgr' object to modify
2001c47ff048SShreyansh Jain  * @flags: QMAN_CGR_FLAG_* values
2002c47ff048SShreyansh Jain  * @opts: the CGR-modification settings
2003c47ff048SShreyansh Jain  *
2004c47ff048SShreyansh Jain  * The @opts parameter comes from the low-level portal API, and can be NULL.
2005c47ff048SShreyansh Jain  * Note that some fields and options within @opts may be ignored or overwritten
2006c47ff048SShreyansh Jain  * by the driver, in particular the 'cgrid' field is ignored (this operation
2007c47ff048SShreyansh Jain  * only affects the given CGR object). If @flags contains
2008c47ff048SShreyansh Jain  * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
2009c47ff048SShreyansh Jain  * unspecified parameters) will be used rather than a modify hw hardware (which
2010c47ff048SShreyansh Jain  * only modifies the specified parameters).
2011c47ff048SShreyansh Jain  */
20121e0f9b07SHemant Agrawal __rte_internal
2013c47ff048SShreyansh Jain int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2014c47ff048SShreyansh Jain 		    struct qm_mcc_initcgr *opts);
2015c47ff048SShreyansh Jain 
2016c47ff048SShreyansh Jain /**
2017c47ff048SShreyansh Jain  * qman_query_cgr - Queries CGR fields
2018c47ff048SShreyansh Jain  * @cgr: the 'cgr' object to query
2019c47ff048SShreyansh Jain  * @result: storage for the queried congestion group record
2020c47ff048SShreyansh Jain  */
2021c47ff048SShreyansh Jain int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
2022c47ff048SShreyansh Jain 
2023c47ff048SShreyansh Jain /**
2024c47ff048SShreyansh Jain  * qman_query_congestion - Queries the state of all congestion groups
2025c47ff048SShreyansh Jain  * @congestion: storage for the queried state of all congestion groups
2026c47ff048SShreyansh Jain  */
2027c47ff048SShreyansh Jain int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
2028c47ff048SShreyansh Jain 
2029c47ff048SShreyansh Jain /**
2030c47ff048SShreyansh Jain  * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
2031c47ff048SShreyansh Jain  * @result: is set by the API to the base CGR ID of the allocated range
2032c47ff048SShreyansh Jain  * @count: the number of CGR IDs required
2033c47ff048SShreyansh Jain  * @align: required alignment of the allocated range
2034c47ff048SShreyansh Jain  * @partial: non-zero if the API can return fewer than @count
2035c47ff048SShreyansh Jain  *
2036c47ff048SShreyansh Jain  * Returns the number of CGR IDs allocated, or a negative error code.
2037c47ff048SShreyansh Jain  * If @partial is non zero, the allocation request may return a smaller range of
2038c47ff048SShreyansh Jain  * than requested (though alignment will be as requested). If @partial is zero,
2039c47ff048SShreyansh Jain  * the return value will either be 'count' or negative.
2040c47ff048SShreyansh Jain  */
20411e0f9b07SHemant Agrawal __rte_internal
2042c47ff048SShreyansh Jain int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
2043c47ff048SShreyansh Jain static inline int qman_alloc_cgrid(u32 *result)
2044c47ff048SShreyansh Jain {
2045c47ff048SShreyansh Jain 	int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
2046c47ff048SShreyansh Jain 
2047c47ff048SShreyansh Jain 	return (ret > 0) ? 0 : ret;
2048c47ff048SShreyansh Jain }
2049c47ff048SShreyansh Jain 
2050c47ff048SShreyansh Jain /**
2051c47ff048SShreyansh Jain  * qman_release_cgrid_range - Release the specified range of CGR IDs
2052c47ff048SShreyansh Jain  * @id: the base CGR ID of the range to deallocate
2053c47ff048SShreyansh Jain  * @count: the number of CGR IDs in the range
2054c47ff048SShreyansh Jain  */
20551e0f9b07SHemant Agrawal __rte_internal
2056c47ff048SShreyansh Jain void qman_release_cgrid_range(u32 id, unsigned int count);
2057c47ff048SShreyansh Jain static inline void qman_release_cgrid(u32 id)
2058c47ff048SShreyansh Jain {
2059c47ff048SShreyansh Jain 	qman_release_cgrid_range(id, 1);
2060c47ff048SShreyansh Jain }
2061c47ff048SShreyansh Jain 
2062c47ff048SShreyansh Jain /**
2063c47ff048SShreyansh Jain  * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
2064c47ff048SShreyansh Jain  * @id: the base CGR ID of the range to reserve
2065c47ff048SShreyansh Jain  * @count: the number of CGR IDs in the range
2066c47ff048SShreyansh Jain  */
2067c47ff048SShreyansh Jain int qman_reserve_cgrid_range(u32 id, unsigned int count);
2068c47ff048SShreyansh Jain static inline int qman_reserve_cgrid(u32 id)
2069c47ff048SShreyansh Jain {
2070c47ff048SShreyansh Jain 	return qman_reserve_cgrid_range(id, 1);
2071c47ff048SShreyansh Jain }
2072c47ff048SShreyansh Jain 
2073c47ff048SShreyansh Jain void qman_seed_cgrid_range(u32 id, unsigned int count);
2074c47ff048SShreyansh Jain 
2075c47ff048SShreyansh Jain 	/* Helpers */
2076c47ff048SShreyansh Jain 	/* ------- */
2077c47ff048SShreyansh Jain /**
2078c47ff048SShreyansh Jain  * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
2079c47ff048SShreyansh Jain  * @fqid: the FQID that will be initialised by other s/w
2080c47ff048SShreyansh Jain  *
2081c47ff048SShreyansh Jain  * In many situations, a FQID is provided for communication between s/w
2082c47ff048SShreyansh Jain  * entities, and whilst the consumer is responsible for initialising and
2083c47ff048SShreyansh Jain  * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
2084c47ff048SShreyansh Jain  * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
2085c47ff048SShreyansh Jain  *     qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
2086c47ff048SShreyansh Jain  * However, data can not be enqueued to the FQ until it is initialised out of
2087c47ff048SShreyansh Jain  * the OOS state - this function polls for that condition. It is particularly
2088c47ff048SShreyansh Jain  * useful for users of IPC functions - each endpoint's Rx FQ is the other
2089c47ff048SShreyansh Jain  * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
2090c47ff048SShreyansh Jain  * and then use this API on the (NO_MODIFY) Tx FQ object in order to
2091c47ff048SShreyansh Jain  * synchronise. The function returns zero for success, +1 if the FQ is still in
2092c47ff048SShreyansh Jain  * the OOS state, or negative if there was an error.
2093c47ff048SShreyansh Jain  */
2094c47ff048SShreyansh Jain static inline int qman_poll_fq_for_init(struct qman_fq *fq)
2095c47ff048SShreyansh Jain {
2096c47ff048SShreyansh Jain 	struct qm_mcr_queryfq_np np;
2097c47ff048SShreyansh Jain 	int err;
2098c47ff048SShreyansh Jain 
2099c47ff048SShreyansh Jain 	err = qman_query_fq_np(fq, &np);
2100c47ff048SShreyansh Jain 	if (err)
2101c47ff048SShreyansh Jain 		return err;
2102c47ff048SShreyansh Jain 	if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
2103c47ff048SShreyansh Jain 		return 1;
2104c47ff048SShreyansh Jain 	return 0;
2105c47ff048SShreyansh Jain }
2106c47ff048SShreyansh Jain 
2107c47ff048SShreyansh Jain #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
21080d941d16SHemant Agrawal #define cpu_to_hw_sg(x)
21090d941d16SHemant Agrawal #define hw_sg_to_cpu(x)
2110c47ff048SShreyansh Jain #else
2111c47ff048SShreyansh Jain #define cpu_to_hw_sg(x)  __cpu_to_hw_sg(x)
2112c47ff048SShreyansh Jain #define hw_sg_to_cpu(x)  __hw_sg_to_cpu(x)
2113c47ff048SShreyansh Jain 
2114c47ff048SShreyansh Jain static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
2115c47ff048SShreyansh Jain {
2116c47ff048SShreyansh Jain 	sgentry->opaque = cpu_to_be64(sgentry->opaque);
2117c47ff048SShreyansh Jain 	sgentry->val = cpu_to_be32(sgentry->val);
2118c47ff048SShreyansh Jain 	sgentry->val_off = cpu_to_be16(sgentry->val_off);
2119c47ff048SShreyansh Jain }
2120c47ff048SShreyansh Jain 
2121c47ff048SShreyansh Jain static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
2122c47ff048SShreyansh Jain {
2123c47ff048SShreyansh Jain 	sgentry->opaque = be64_to_cpu(sgentry->opaque);
2124c47ff048SShreyansh Jain 	sgentry->val = be32_to_cpu(sgentry->val);
2125c47ff048SShreyansh Jain 	sgentry->val_off = be16_to_cpu(sgentry->val_off);
2126c47ff048SShreyansh Jain }
2127c47ff048SShreyansh Jain #endif
2128f6fadc3eSShreyansh Jain 
2129f6fadc3eSShreyansh Jain #ifdef __cplusplus
2130f6fadc3eSShreyansh Jain }
2131f6fadc3eSShreyansh Jain #endif
2132f6fadc3eSShreyansh Jain 
2133f6fadc3eSShreyansh Jain #endif /* __FSL_QMAN_H */
2134