xref: /dpdk/drivers/dma/dpaa2/dpaa2_qdma.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2024 NXP
3  */
4 
5 #ifndef _DPAA2_QDMA_H_
6 #define _DPAA2_QDMA_H_
7 
8 #include "portal/dpaa2_hw_pvt.h"
9 #include "portal/dpaa2_hw_dpio.h"
10 
11 #define DPAA2_QDMA_MAX_VHANS		64
12 
13 #define DPAA2_DPDMAI_MAX_QUEUES	16
14 
15 /** Notification by FQD_CTX[fqid] */
16 #define QDMA_SER_CTX (1 << 8)
17 #define DPAA2_RBP_MEM_RW            0x0
18 /**
19  * Source descriptor command read transaction type for RBP=0:
20  * coherent copy of cacheable memory
21  */
22 #define DPAA2_COHERENT_NO_ALLOCATE_CACHE	0xb
23 #define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE	0x7
24 /**
25  * Destination descriptor command write transaction type for RBP=0:
26  * coherent copy of cacheable memory
27  */
28 #define DPAA2_COHERENT_ALLOCATE_CACHE		0x6
29 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
30 
31 /** Maximum possible H/W Queues on each core */
32 #define MAX_HW_QUEUE_PER_CORE 64
33 
34 #define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
35 #define DPAA2_QDMA_FD_LONG_FORMAT 0x1
36 #define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
37 
38 #define DPAA2_QDMA_BMT_ENABLE 0x1
39 #define DPAA2_QDMA_BMT_DISABLE 0x0
40 
41 /** Source/Destination Descriptor */
42 struct __rte_packed_begin qdma_sdd {
43 	uint32_t rsv;
44 	/** Stride configuration */
45 	uint32_t stride;
46 	/** Route-by-port command */
47 	union {
48 		uint32_t rbpcmd;
49 		struct rbpcmd_st {
50 			uint32_t vfid:6;
51 			uint32_t rsv4:2;
52 			uint32_t pfid:1;
53 			uint32_t rsv3:7;
54 			uint32_t attr:3;
55 			uint32_t rsv2:1;
56 			uint32_t at:2;
57 			uint32_t vfa:1;
58 			uint32_t ca:1;
59 			uint32_t tc:3;
60 			uint32_t rsv1:5;
61 		} rbpcmd_simple;
62 	};
63 	union {
64 		uint32_t cmd;
65 		struct rcmd_simple {
66 			uint32_t portid:4;
67 			uint32_t rsv1:14;
68 			uint32_t rbp:1;
69 			uint32_t ssen:1;
70 			uint32_t rthrotl:4;
71 			uint32_t sqos:3;
72 			uint32_t ns:1;
73 			uint32_t rdtype:4;
74 		} read_cmd;
75 		struct wcmd_simple {
76 			uint32_t portid:4;
77 			uint32_t rsv3:10;
78 			uint32_t rsv2:2;
79 			uint32_t lwc:2;
80 			uint32_t rbp:1;
81 			uint32_t dsen:1;
82 			uint32_t rsv1:4;
83 			uint32_t dqos:3;
84 			uint32_t ns:1;
85 			uint32_t wrttype:4;
86 		} write_cmd;
87 	};
88 } __rte_packed_end;
89 
90 #define QDMA_SG_FMT_SDB	0x0 /* single data buffer */
91 #define QDMA_SG_FMT_FDS	0x1 /* frame data section */
92 #define QDMA_SG_FMT_SGTE	0x2 /* SGT extension */
93 #define QDMA_SG_SL_SHORT	0x1 /* short length */
94 #define QDMA_SG_SL_LONG	0x0 /* long length */
95 #define QDMA_SG_F	0x1 /* last sg entry */
96 #define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
97 #define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
98 
99 struct __rte_packed_begin qdma_sg_entry {
100 	uint32_t addr_lo;		/* address 0:31 */
101 	uint32_t addr_hi:17;	/* address 32:48 */
102 	uint32_t rsv:15;
103 	union {
104 		uint32_t data_len_sl0;	/* SL=0, the long format */
105 		struct {
106 			uint32_t len:17;	/* SL=1, the short format */
107 			uint32_t reserve:3;
108 			uint32_t sf:1;
109 			uint32_t sr:1;
110 			uint32_t size:10;	/* buff size */
111 		} data_len_sl1;
112 	} data_len;					/* AVAIL_LENGTH */
113 	union {
114 		uint32_t ctrl_fields;
115 		struct {
116 			uint32_t bpid:14;
117 			uint32_t ivp:1;
118 			uint32_t bmt:1;
119 			uint32_t offset:12;
120 			uint32_t fmt:2;
121 			uint32_t sl:1;
122 			uint32_t f:1;
123 		} ctrl;
124 	};
125 } __rte_packed_end;
126 
127 struct dpaa2_qdma_rbp {
128 	uint32_t use_ultrashort:1;
129 	uint32_t enable:1;
130 	/**
131 	 * dportid:
132 	 * 0000 PCI-Express 1
133 	 * 0001 PCI-Express 2
134 	 * 0010 PCI-Express 3
135 	 * 0011 PCI-Express 4
136 	 * 0100 PCI-Express 5
137 	 * 0101 PCI-Express 6
138 	 */
139 	uint32_t dportid:4;
140 	uint32_t dpfid:2;
141 	uint32_t dvfid:6;
142 	uint32_t dvfa:1;
143 	/*using route by port for destination */
144 	uint32_t drbp:1;
145 	/**
146 	 * sportid:
147 	 * 0000 PCI-Express 1
148 	 * 0001 PCI-Express 2
149 	 * 0010 PCI-Express 3
150 	 * 0011 PCI-Express 4
151 	 * 0100 PCI-Express 5
152 	 * 0101 PCI-Express 6
153 	 */
154 	uint32_t sportid:4;
155 	uint32_t spfid:2;
156 	uint32_t svfid:6;
157 	uint32_t svfa:1;
158 	/* using route by port for source */
159 	uint32_t srbp:1;
160 	uint32_t rsv:2;
161 };
162 
163 enum dpaa2_qdma_fd_type {
164 	DPAA2_QDMA_FD_SHORT = 1,
165 	DPAA2_QDMA_FD_LONG = 2,
166 	DPAA2_QDMA_FD_SG = 3
167 };
168 
169 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
170 #define DPAA2_QDMA_FD_ATT_MAX_IDX \
171 	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
172 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
173 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
174 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
175 	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
176 
177 #define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
178 #define DPAA2_QDMA_MIN_DESC 1
179 
180 static inline void
181 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
182 	uint64_t addr)
183 {
184 	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
185 	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
186 }
187 
188 static inline void
189 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
190 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
191 {
192 	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
193 	fd->simple_ddr.rsv1_att = job_idx |
194 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
195 }
196 
197 static inline uint16_t
198 dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
199 {
200 	return fd->simple_ddr.rsv1_att;
201 }
202 
203 enum {
204 	DPAA2_QDMA_SDD_FLE,
205 	DPAA2_QDMA_SRC_FLE,
206 	DPAA2_QDMA_DST_FLE,
207 	DPAA2_QDMA_MAX_FLE
208 };
209 
210 enum {
211 	DPAA2_QDMA_SRC_SDD,
212 	DPAA2_QDMA_DST_SDD,
213 	DPAA2_QDMA_MAX_SDD
214 };
215 
216 struct __rte_packed_begin qdma_cntx_fle_sdd {
217 	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
218 	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
219 } __rte_packed_end;
220 
221 struct __rte_packed_begin qdma_cntx_sg {
222 	struct qdma_cntx_fle_sdd fle_sdd;
223 	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
224 	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
225 	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
226 	uint16_t job_nb;
227 	uint16_t rsv[3];
228 } __rte_packed_end;
229 
230 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
231 	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
232 
233 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
234 	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
235 
236 /** Represents a DPDMAI device */
237 struct dpaa2_dpdmai_dev {
238 	/** Pointer to Next device instance */
239 	TAILQ_ENTRY(dpaa2_qdma_device) next;
240 	/** HW ID for DPDMAI object */
241 	uint32_t dpdmai_id;
242 	/** Tocken of this device */
243 	uint16_t token;
244 	/** Number of queue in this DPDMAI device */
245 	uint8_t num_queues;
246 	/** RX queues */
247 	struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
248 	/** TX queues */
249 	struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
250 	struct qdma_device *qdma_dev;
251 };
252 
253 #define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
254 #define QDMA_CNTX_IDX_RING_MAX_FREE \
255 	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
256 struct qdma_cntx_idx_ring {
257 	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
258 	uint16_t start;
259 	uint16_t tail;
260 	uint16_t free_space;
261 	uint16_t nb_in_ring;
262 };
263 
264 #define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
265 
266 /** Represents a QDMA virtual queue */
267 struct qdma_virt_queue {
268 	/** Associated hw queue */
269 	struct dpaa2_dpdmai_dev *dpdmai_dev;
270 	/** FLE pool for the queue */
271 	struct rte_mempool *fle_pool;
272 	uint64_t fle_iova2va_offset;
273 	void **fle_elem;
274 	/** Route by port */
275 	struct dpaa2_qdma_rbp rbp;
276 	/** States if this vq is in use or not */
277 	uint8_t fle_pre_populate;
278 	/** Number of descriptor for the virtual DMA channel */
279 	uint16_t nb_desc;
280 	/* Total number of enqueues on this VQ */
281 	uint64_t num_enqueues;
282 	/* Total number of dequeues from this VQ */
283 	uint64_t num_dequeues;
284 	uint64_t copy_num;
285 
286 	uint16_t vq_id;
287 	uint32_t flags;
288 	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
289 	uint16_t fd_idx;
290 	struct qdma_cntx_idx_ring *ring_cntx_idx;
291 
292 	/**Used for silent enabled*/
293 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
294 	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
295 	uint16_t silent_idx;
296 
297 	int num_valid_jobs;
298 	int using_short_fd;
299 
300 	struct rte_dma_stats stats;
301 };
302 
303 /** Represents a QDMA device. */
304 struct qdma_device {
305 	/** VQ's of this device */
306 	struct qdma_virt_queue *vqs;
307 	/** Total number of VQ's */
308 	uint16_t num_vqs;
309 	uint8_t is_silent;
310 };
311 
312 #endif /* _DPAA2_QDMA_H_ */
313