xref: /dpdk/drivers/dma/dpaa/dpaa_qdma.c (revision a63c6426fdfd9233ba9c7d4503e52bff3732fe69)
1583f3732SGagandeep Singh /* SPDX-License-Identifier: BSD-3-Clause
2f1d30e27SJun Yang  * Copyright 2021-2024 NXP
3583f3732SGagandeep Singh  */
4583f3732SGagandeep Singh 
5a2f1da7dSDavid Marchand #include <bus_dpaa_driver.h>
6cc166b51SGagandeep Singh #include <rte_dmadev_pmd.h>
7*a63c6426SJun Yang #include <rte_kvargs.h>
8cc166b51SGagandeep Singh 
9cc166b51SGagandeep Singh #include "dpaa_qdma.h"
10cc166b51SGagandeep Singh #include "dpaa_qdma_logs.h"
11cc166b51SGagandeep Singh 
12f1d30e27SJun Yang static uint32_t s_sg_max_entry_sz = 2000;
13*a63c6426SJun Yang static bool s_hw_err_check;
14*a63c6426SJun Yang 
15*a63c6426SJun Yang #define DPAA_DMA_ERROR_CHECK "dpaa_dma_err_check"
16f1d30e27SJun Yang 
17453d8273SGagandeep Singh static inline void
18f1d30e27SJun Yang qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
19453d8273SGagandeep Singh {
20453d8273SGagandeep Singh 	ccdf->addr_hi = upper_32_bits(addr);
21453d8273SGagandeep Singh 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
22453d8273SGagandeep Singh }
23453d8273SGagandeep Singh 
24f1d30e27SJun Yang static inline void
25f1d30e27SJun Yang qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
267da29a64SGagandeep Singh {
27f1d30e27SJun Yang 	sge->addr_hi = upper_32_bits(addr);
28f1d30e27SJun Yang 	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
297da29a64SGagandeep Singh }
307da29a64SGagandeep Singh 
317da29a64SGagandeep Singh static inline int
32f1d30e27SJun Yang qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
33f1d30e27SJun Yang 	uint8_t *queue_idx)
347da29a64SGagandeep Singh {
35f1d30e27SJun Yang 	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
36f1d30e27SJun Yang 
37f1d30e27SJun Yang 	if (addr && queue_idx)
38f1d30e27SJun Yang 		*queue_idx = ccdf->queue;
39f1d30e27SJun Yang 	if (addr) {
40f1d30e27SJun Yang 		ccdf->addr_hi = 0;
41f1d30e27SJun Yang 		ccdf->addr_lo = 0;
42f1d30e27SJun Yang 		return true;
437da29a64SGagandeep Singh 	}
447da29a64SGagandeep Singh 
45f1d30e27SJun Yang 	return false;
467da29a64SGagandeep Singh }
477da29a64SGagandeep Singh 
48cc166b51SGagandeep Singh static inline int
49cc166b51SGagandeep Singh ilog2(int x)
50cc166b51SGagandeep Singh {
51cc166b51SGagandeep Singh 	int log = 0;
52cc166b51SGagandeep Singh 
53cc166b51SGagandeep Singh 	x >>= 1;
54cc166b51SGagandeep Singh 
55cc166b51SGagandeep Singh 	while (x) {
56cc166b51SGagandeep Singh 		log++;
57cc166b51SGagandeep Singh 		x >>= 1;
58cc166b51SGagandeep Singh 	}
59cc166b51SGagandeep Singh 	return log;
60cc166b51SGagandeep Singh }
61cc166b51SGagandeep Singh 
62f1d30e27SJun Yang static inline int
63f1d30e27SJun Yang ilog2_qsize(uint32_t q_size)
64f1d30e27SJun Yang {
65f1d30e27SJun Yang 	return (ilog2(q_size) - ilog2(64));
66f1d30e27SJun Yang }
67f1d30e27SJun Yang 
68f1d30e27SJun Yang static inline int
69bdcb782aSJun Yang ilog2_qthld(uint32_t q_thld)
70bdcb782aSJun Yang {
71bdcb782aSJun Yang     return (ilog2(q_thld) - ilog2(16));
72bdcb782aSJun Yang }
73bdcb782aSJun Yang 
74bdcb782aSJun Yang static inline int
75f1d30e27SJun Yang fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
76f1d30e27SJun Yang {
77f1d30e27SJun Yang 	struct rte_dma_stats *stats = &fsl_queue->stats;
78f1d30e27SJun Yang 
79f1d30e27SJun Yang 	return (stats->submitted - stats->completed);
80f1d30e27SJun Yang }
81f1d30e27SJun Yang 
82cc166b51SGagandeep Singh static u32
83cc166b51SGagandeep Singh qdma_readl(void *addr)
84cc166b51SGagandeep Singh {
85cc166b51SGagandeep Singh 	return QDMA_IN(addr);
86cc166b51SGagandeep Singh }
87cc166b51SGagandeep Singh 
88cc166b51SGagandeep Singh static void
89cc166b51SGagandeep Singh qdma_writel(u32 val, void *addr)
90cc166b51SGagandeep Singh {
91cc166b51SGagandeep Singh 	QDMA_OUT(addr, val);
92cc166b51SGagandeep Singh }
93cc166b51SGagandeep Singh 
947da29a64SGagandeep Singh static u32
957da29a64SGagandeep Singh qdma_readl_be(void *addr)
967da29a64SGagandeep Singh {
977da29a64SGagandeep Singh 	return QDMA_IN_BE(addr);
987da29a64SGagandeep Singh }
997da29a64SGagandeep Singh 
1007da29a64SGagandeep Singh static void
1017da29a64SGagandeep Singh qdma_writel_be(u32 val, void *addr)
1027da29a64SGagandeep Singh {
1037da29a64SGagandeep Singh 	QDMA_OUT_BE(addr, val);
1047da29a64SGagandeep Singh }
1057da29a64SGagandeep Singh 
106f1d30e27SJun Yang static void *
107f1d30e27SJun Yang dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
108cc166b51SGagandeep Singh {
109cc166b51SGagandeep Singh 	void *virt_addr;
110cc166b51SGagandeep Singh 
111f1d30e27SJun Yang 	virt_addr = rte_zmalloc(nm, size, aligned);
112cc166b51SGagandeep Singh 	if (!virt_addr)
113cc166b51SGagandeep Singh 		return NULL;
114cc166b51SGagandeep Singh 
115cc166b51SGagandeep Singh 	*phy_addr = rte_mem_virt2iova(virt_addr);
116cc166b51SGagandeep Singh 
117cc166b51SGagandeep Singh 	return virt_addr;
118cc166b51SGagandeep Singh }
119cc166b51SGagandeep Singh 
120453d8273SGagandeep Singh /*
121453d8273SGagandeep Singh  * Pre-request command descriptor and compound S/G for enqueue.
122453d8273SGagandeep Singh  */
123453d8273SGagandeep Singh static int
124f1d30e27SJun Yang fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
125453d8273SGagandeep Singh {
1267a7bb89eSJun Yang 	struct fsl_qdma_engine *fsl_qdma = queue->engine;
127453d8273SGagandeep Singh 	struct fsl_qdma_sdf *sdf;
128453d8273SGagandeep Singh 	struct fsl_qdma_ddf *ddf;
129f1d30e27SJun Yang 	struct fsl_qdma_comp_cmd_desc *ccdf;
130f1d30e27SJun Yang 	uint16_t i, j;
131f1d30e27SJun Yang 	struct fsl_qdma_cmpd_ft *ft;
132453d8273SGagandeep Singh 
133f1d30e27SJun Yang 	for (i = 0; i < queue->n_cq; i++) {
134f1d30e27SJun Yang 		dma_addr_t phy_ft = 0;
135453d8273SGagandeep Singh 
136f1d30e27SJun Yang 		queue->ft[i] = dma_pool_alloc(NULL,
137f1d30e27SJun Yang 			sizeof(struct fsl_qdma_cmpd_ft),
138f1d30e27SJun Yang 			RTE_CACHE_LINE_SIZE, &phy_ft);
139f1d30e27SJun Yang 		if (!queue->ft[i])
140f1d30e27SJun Yang 			goto fail;
141f1d30e27SJun Yang 		if (((uint64_t)queue->ft[i]) &
142f1d30e27SJun Yang 			(RTE_CACHE_LINE_SIZE - 1)) {
143f1d30e27SJun Yang 			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
144f1d30e27SJun Yang 				i, queue->ft[i]);
145f1d30e27SJun Yang 			rte_free(queue->ft[i]);
146f1d30e27SJun Yang 			queue->ft[i] = NULL;
147453d8273SGagandeep Singh 			goto fail;
148453d8273SGagandeep Singh 		}
149f1d30e27SJun Yang 		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
150f1d30e27SJun Yang 			(RTE_CACHE_LINE_SIZE - 1)) {
151f1d30e27SJun Yang 			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
152f1d30e27SJun Yang 				i, &queue->ft[i]->desc_ssge[0]);
153f1d30e27SJun Yang 			rte_free(queue->ft[i]);
154f1d30e27SJun Yang 			queue->ft[i] = NULL;
155453d8273SGagandeep Singh 			goto fail;
156453d8273SGagandeep Singh 		}
157f1d30e27SJun Yang 		queue->ft[i]->phy_ssge = phy_ft +
158f1d30e27SJun Yang 			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
159f1d30e27SJun Yang 		queue->ft[i]->phy_dsge = phy_ft +
160f1d30e27SJun Yang 			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
161f1d30e27SJun Yang 		queue->ft[i]->phy_df = phy_ft +
162f1d30e27SJun Yang 			offsetof(struct fsl_qdma_cmpd_ft, df);
163453d8273SGagandeep Singh 
164f1d30e27SJun Yang 		ft = queue->ft[i];
165f1d30e27SJun Yang 		sdf = &ft->df.sdf;
166f1d30e27SJun Yang 		ddf = &ft->df.ddf;
167453d8273SGagandeep Singh 		/* Compound Command Descriptor(Frame List Table) */
168f1d30e27SJun Yang 		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
169453d8273SGagandeep Singh 		/* It must be 32 as Compound S/G Descriptor */
170f1d30e27SJun Yang 		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
171453d8273SGagandeep Singh 
172f1d30e27SJun Yang 		/* Descriptor Buffer */
173f1d30e27SJun Yang 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
1748c53b9b7SJun Yang #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
1758c53b9b7SJun Yang 		sdf->prefetch = 1;
1768c53b9b7SJun Yang #endif
177f1d30e27SJun Yang 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
178f1d30e27SJun Yang 		ddf->lwc = FSL_QDMA_CMD_LWC;
179f1d30e27SJun Yang 
180f1d30e27SJun Yang 		ccdf = &queue->cq[i];
181f1d30e27SJun Yang 		qdma_desc_addr_set64(ccdf, phy_ft);
182f1d30e27SJun Yang 		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
1837a7bb89eSJun Yang 		if (!fsl_qdma->is_silent)
1847a7bb89eSJun Yang 			ccdf->ser = 1;
185f1d30e27SJun Yang 		ccdf->queue = queue->queue_id;
186453d8273SGagandeep Singh 	}
187f1d30e27SJun Yang 	queue->ci = 0;
188453d8273SGagandeep Singh 
189453d8273SGagandeep Singh 	return 0;
190453d8273SGagandeep Singh 
191453d8273SGagandeep Singh fail:
192f1d30e27SJun Yang 	for (j = 0; j < i; j++)
193f1d30e27SJun Yang 		rte_free(queue->ft[j]);
194453d8273SGagandeep Singh 
195453d8273SGagandeep Singh 	return -ENOMEM;
196453d8273SGagandeep Singh }
197453d8273SGagandeep Singh 
198f1d30e27SJun Yang static int
199f1d30e27SJun Yang fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
200f1d30e27SJun Yang 	int queue_id, int block_id)
2017da29a64SGagandeep Singh {
202f1d30e27SJun Yang 	struct fsl_qdma_queue *cmd_queue;
203f1d30e27SJun Yang 	uint32_t queue_size;
204f1d30e27SJun Yang 	char nm[RTE_MEMZONE_NAMESIZE];
2057da29a64SGagandeep Singh 
206f1d30e27SJun Yang 	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
207f1d30e27SJun Yang 	cmd_queue->engine = fsl_qdma;
208f1d30e27SJun Yang 
209f1d30e27SJun Yang 	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
210f1d30e27SJun Yang 		QDMA_QUEUE_SIZE;
211f1d30e27SJun Yang 
212f1d30e27SJun Yang 	sprintf(nm, "Command queue_%d_%d",
213f1d30e27SJun Yang 		block_id, queue_id);
214f1d30e27SJun Yang 	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
215f1d30e27SJun Yang 		queue_size, &cmd_queue->bus_addr);
216f1d30e27SJun Yang 	if (!cmd_queue->cq) {
217f1d30e27SJun Yang 		DPAA_QDMA_ERR("%s alloc failed!", nm);
218f1d30e27SJun Yang 		return -ENOMEM;
2197da29a64SGagandeep Singh 	}
2207da29a64SGagandeep Singh 
221f1d30e27SJun Yang 	cmd_queue->block_vir = fsl_qdma->block_base +
222f1d30e27SJun Yang 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
223f1d30e27SJun Yang 	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
224f1d30e27SJun Yang 	cmd_queue->queue_id = queue_id;
225f1d30e27SJun Yang 	cmd_queue->block_id = block_id;
226f1d30e27SJun Yang 	cmd_queue->pending_start = 0;
227f1d30e27SJun Yang 	cmd_queue->pending_num = 0;
228f1d30e27SJun Yang 	cmd_queue->complete_start = 0;
229f1d30e27SJun Yang 
230f1d30e27SJun Yang 	sprintf(nm, "Compound Table_%d_%d",
231f1d30e27SJun Yang 		block_id, queue_id);
232f1d30e27SJun Yang 	cmd_queue->ft = rte_zmalloc(nm,
233f1d30e27SJun Yang 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
234f1d30e27SJun Yang 	if (!cmd_queue->ft) {
235f1d30e27SJun Yang 		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
236f1d30e27SJun Yang 		rte_free(cmd_queue->cq);
237f1d30e27SJun Yang 		return -ENOMEM;
238f1d30e27SJun Yang 	}
239f1d30e27SJun Yang 	sprintf(nm, "Pending_desc_%d_%d",
240f1d30e27SJun Yang 		block_id, queue_id);
241f1d30e27SJun Yang 	cmd_queue->pending_desc = rte_zmalloc(nm,
242f1d30e27SJun Yang 		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
243f1d30e27SJun Yang 	if (!cmd_queue->pending_desc) {
244f1d30e27SJun Yang 		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
245f1d30e27SJun Yang 		rte_free(cmd_queue->ft);
246f1d30e27SJun Yang 		rte_free(cmd_queue->cq);
247f1d30e27SJun Yang 		return -ENOMEM;
248f1d30e27SJun Yang 	}
249f1d30e27SJun Yang 	sprintf(nm, "complete-burst_ring_%d_%d",
250f1d30e27SJun Yang 		block_id, queue_id);
251f1d30e27SJun Yang 	cmd_queue->complete_burst = rte_ring_create(nm,
252f1d30e27SJun Yang 		QDMA_QUEUE_SIZE * 2, 0,
253f1d30e27SJun Yang 		RING_F_SP_ENQ | RING_F_SC_DEQ);
254f1d30e27SJun Yang 	if (!cmd_queue->complete_burst) {
255f1d30e27SJun Yang 		DPAA_QDMA_ERR("%s create failed!", nm);
256f1d30e27SJun Yang 		rte_free(cmd_queue->pending_desc);
257f1d30e27SJun Yang 		rte_free(cmd_queue->ft);
258f1d30e27SJun Yang 		rte_free(cmd_queue->cq);
259f1d30e27SJun Yang 		return -ENOMEM;
260f1d30e27SJun Yang 	}
261f1d30e27SJun Yang 	sprintf(nm, "complete-desc_ring_%d_%d",
262f1d30e27SJun Yang 		block_id, queue_id);
263f1d30e27SJun Yang 	cmd_queue->complete_desc = rte_ring_create(nm,
264f1d30e27SJun Yang 		FSL_QDMA_MAX_DESC_NUM * 2, 0,
265f1d30e27SJun Yang 		RING_F_SP_ENQ | RING_F_SC_DEQ);
266f1d30e27SJun Yang 	if (!cmd_queue->complete_desc) {
267f1d30e27SJun Yang 		DPAA_QDMA_ERR("%s create failed!", nm);
268f1d30e27SJun Yang 		rte_ring_free(cmd_queue->complete_burst);
269f1d30e27SJun Yang 		rte_free(cmd_queue->pending_desc);
270f1d30e27SJun Yang 		rte_free(cmd_queue->ft);
271f1d30e27SJun Yang 		rte_free(cmd_queue->cq);
272f1d30e27SJun Yang 		return -ENOMEM;
273f1d30e27SJun Yang 	}
274f1d30e27SJun Yang 	sprintf(nm, "complete-pool-desc_ring_%d_%d",
275f1d30e27SJun Yang 		block_id, queue_id);
276f1d30e27SJun Yang 	cmd_queue->complete_pool = rte_ring_create(nm,
277f1d30e27SJun Yang 		FSL_QDMA_MAX_DESC_NUM * 2, 0,
278f1d30e27SJun Yang 		RING_F_SP_ENQ | RING_F_SC_DEQ);
279f1d30e27SJun Yang 	if (!cmd_queue->complete_pool) {
280f1d30e27SJun Yang 		DPAA_QDMA_ERR("%s create failed!", nm);
281f1d30e27SJun Yang 		rte_ring_free(cmd_queue->complete_desc);
282f1d30e27SJun Yang 		rte_ring_free(cmd_queue->complete_burst);
283f1d30e27SJun Yang 		rte_free(cmd_queue->pending_desc);
284f1d30e27SJun Yang 		rte_free(cmd_queue->ft);
285f1d30e27SJun Yang 		rte_free(cmd_queue->cq);
286f1d30e27SJun Yang 		return -ENOMEM;
2877da29a64SGagandeep Singh 	}
2887da29a64SGagandeep Singh 
289f1d30e27SJun Yang 	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
290f1d30e27SJun Yang 	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
291f1d30e27SJun Yang 
292f1d30e27SJun Yang 	return 0;
293f1d30e27SJun Yang }
294f1d30e27SJun Yang 
295f1d30e27SJun Yang static void
296f1d30e27SJun Yang fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
297cc166b51SGagandeep Singh {
298f1d30e27SJun Yang 	rte_free(queue->ft);
299f1d30e27SJun Yang 	rte_free(queue->cq);
300f1d30e27SJun Yang 	rte_free(queue->pending_desc);
301f1d30e27SJun Yang 	rte_ring_free(queue->complete_burst);
302f1d30e27SJun Yang 	rte_ring_free(queue->complete_desc);
303f1d30e27SJun Yang 	rte_ring_free(queue->complete_pool);
304cc166b51SGagandeep Singh }
305cc166b51SGagandeep Singh 
306f1d30e27SJun Yang static void
307f1d30e27SJun Yang fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
308cc166b51SGagandeep Singh {
309f1d30e27SJun Yang 	rte_free(queue->cq);
310cc166b51SGagandeep Singh }
311cc166b51SGagandeep Singh 
312f1d30e27SJun Yang static int
313f1d30e27SJun Yang fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
314f1d30e27SJun Yang 	uint32_t block_id)
315f1d30e27SJun Yang {
316f1d30e27SJun Yang 	struct fsl_qdma_status_queue *status;
317f1d30e27SJun Yang 	uint32_t status_size;
318cc166b51SGagandeep Singh 
319f1d30e27SJun Yang 	status = &fsl_qdma->stat_queues[block_id];
320f1d30e27SJun Yang 	status->engine = fsl_qdma;
321cc166b51SGagandeep Singh 
322f1d30e27SJun Yang 	status_size = QDMA_STATUS_SIZE *
323f1d30e27SJun Yang 		sizeof(struct fsl_qdma_comp_cmd_desc);
324cc166b51SGagandeep Singh 
325f1d30e27SJun Yang 	status->cq = dma_pool_alloc(NULL, status_size,
326f1d30e27SJun Yang 		status_size, &status->bus_addr);
327cc166b51SGagandeep Singh 
328f1d30e27SJun Yang 	if (!status->cq)
329f1d30e27SJun Yang 		return -ENOMEM;
330f1d30e27SJun Yang 
331f1d30e27SJun Yang 	memset(status->cq, 0x0, status_size);
332f1d30e27SJun Yang 	status->n_cq = QDMA_STATUS_SIZE;
333f1d30e27SJun Yang 	status->complete = 0;
334f1d30e27SJun Yang 	status->block_id = block_id;
335f1d30e27SJun Yang 	status->block_vir = fsl_qdma->block_base +
336f1d30e27SJun Yang 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
337f1d30e27SJun Yang 
338f1d30e27SJun Yang 	return 0;
339cc166b51SGagandeep Singh }
340583f3732SGagandeep Singh 
341583f3732SGagandeep Singh static int
342cc166b51SGagandeep Singh fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
343583f3732SGagandeep Singh {
344cc166b51SGagandeep Singh 	void *ctrl = fsl_qdma->ctrl_base;
345cc166b51SGagandeep Singh 	void *block;
346cc166b51SGagandeep Singh 	int i, count = RETRIES;
347cc166b51SGagandeep Singh 	unsigned int j;
348cc166b51SGagandeep Singh 	u32 reg;
349cc166b51SGagandeep Singh 
350cc166b51SGagandeep Singh 	/* Disable the command queue and wait for idle state. */
351cc166b51SGagandeep Singh 	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
352cc166b51SGagandeep Singh 	reg |= FSL_QDMA_DMR_DQD;
353cc166b51SGagandeep Singh 	qdma_writel(reg, ctrl + FSL_QDMA_DMR);
354cc166b51SGagandeep Singh 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
355cc166b51SGagandeep Singh 		block = fsl_qdma->block_base +
356cc166b51SGagandeep Singh 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
357cc166b51SGagandeep Singh 		for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
358cc166b51SGagandeep Singh 			qdma_writel(0, block + FSL_QDMA_BCQMR(i));
359cc166b51SGagandeep Singh 	}
360cc166b51SGagandeep Singh 	while (true) {
361cc166b51SGagandeep Singh 		reg = qdma_readl(ctrl + FSL_QDMA_DSR);
362cc166b51SGagandeep Singh 		if (!(reg & FSL_QDMA_DSR_DB))
363cc166b51SGagandeep Singh 			break;
364cc166b51SGagandeep Singh 		if (count-- < 0)
365cc166b51SGagandeep Singh 			return -EBUSY;
366cc166b51SGagandeep Singh 		rte_delay_us(100);
367cc166b51SGagandeep Singh 	}
368cc166b51SGagandeep Singh 
369cc166b51SGagandeep Singh 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
370cc166b51SGagandeep Singh 		block = fsl_qdma->block_base +
371cc166b51SGagandeep Singh 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
372cc166b51SGagandeep Singh 
373cc166b51SGagandeep Singh 		/* Disable status queue. */
374cc166b51SGagandeep Singh 		qdma_writel(0, block + FSL_QDMA_BSQMR);
375cc166b51SGagandeep Singh 
376cc166b51SGagandeep Singh 		/*
377cc166b51SGagandeep Singh 		 * clear the command queue interrupt detect register for
378cc166b51SGagandeep Singh 		 * all queues.
379cc166b51SGagandeep Singh 		 */
380cc166b51SGagandeep Singh 		qdma_writel(0xffffffff, block + FSL_QDMA_BCQIDR(0));
381cc166b51SGagandeep Singh 	}
382cc166b51SGagandeep Singh 
383583f3732SGagandeep Singh 	return 0;
384583f3732SGagandeep Singh }
385583f3732SGagandeep Singh 
386f1d30e27SJun Yang static void
387f1d30e27SJun Yang fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
388f1d30e27SJun Yang 	uint8_t num, struct fsl_qdma_queue *fsl_queue)
3897da29a64SGagandeep Singh {
390f1d30e27SJun Yang 	uint32_t i, j;
391f1d30e27SJun Yang 	uint8_t *v_src, *v_dst;
392f1d30e27SJun Yang 	char err_msg[512];
393f1d30e27SJun Yang 	int offset;
3947da29a64SGagandeep Singh 
3957da29a64SGagandeep Singh 
396f1d30e27SJun Yang 	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
397f1d30e27SJun Yang 		fsl_queue->block_id,
398f1d30e27SJun Yang 		fsl_queue->queue_id);
399f1d30e27SJun Yang 	for (i = 0; i < num; i++) {
400f1d30e27SJun Yang 		v_src = rte_mem_iova2virt(desc[i]->src);
401f1d30e27SJun Yang 		v_dst = rte_mem_iova2virt(desc[i]->dst);
402f1d30e27SJun Yang 		for (j = 0; j < desc[i]->len; j++) {
403f1d30e27SJun Yang 			if (v_src[j] != v_dst[j]) {
404f1d30e27SJun Yang 				sprintf(&err_msg[offset],
405f1d30e27SJun Yang 					"job[%"PRIu64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
406f1d30e27SJun Yang 					desc[i]->flag, v_src, j, v_src[j],
407f1d30e27SJun Yang 					v_dst, j, v_dst[j]);
408f1d30e27SJun Yang 				DPAA_QDMA_ERR("%s, stop validating!",
409f1d30e27SJun Yang 					err_msg);
410f1d30e27SJun Yang 				return;
4117da29a64SGagandeep Singh 			}
412f1d30e27SJun Yang 		}
413f1d30e27SJun Yang 	}
4147da29a64SGagandeep Singh }
4157da29a64SGagandeep Singh 
4167da29a64SGagandeep Singh static int
417cc166b51SGagandeep Singh fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
418583f3732SGagandeep Singh {
419cc166b51SGagandeep Singh 	struct fsl_qdma_queue *temp;
420f1d30e27SJun Yang 	struct fsl_qdma_status_queue *temp_stat;
421cc166b51SGagandeep Singh 	void *ctrl = fsl_qdma->ctrl_base;
422cc166b51SGagandeep Singh 	void *block;
423cc166b51SGagandeep Singh 	u32 i, j;
424cc166b51SGagandeep Singh 	u32 reg;
425cc166b51SGagandeep Singh 	int ret, val;
426cc166b51SGagandeep Singh 
427cc166b51SGagandeep Singh 	/* Try to halt the qDMA engine first. */
428cc166b51SGagandeep Singh 	ret = fsl_qdma_halt(fsl_qdma);
429cc166b51SGagandeep Singh 	if (ret) {
430cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("DMA halt failed!");
431cc166b51SGagandeep Singh 		return ret;
432cc166b51SGagandeep Singh 	}
433cc166b51SGagandeep Singh 
434cc166b51SGagandeep Singh 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
435cc166b51SGagandeep Singh 		block = fsl_qdma->block_base +
436cc166b51SGagandeep Singh 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
437f1d30e27SJun Yang 		for (i = 0; i < QDMA_QUEUES; i++) {
438f1d30e27SJun Yang 			temp = &fsl_qdma->cmd_queues[j][i];
439cc166b51SGagandeep Singh 			/*
440cc166b51SGagandeep Singh 			 * Initialize Command Queue registers to
441cc166b51SGagandeep Singh 			 * point to the first
442cc166b51SGagandeep Singh 			 * command descriptor in memory.
443cc166b51SGagandeep Singh 			 * Dequeue Pointer Address Registers
444cc166b51SGagandeep Singh 			 * Enqueue Pointer Address Registers
445cc166b51SGagandeep Singh 			 */
446cc166b51SGagandeep Singh 
447cc166b51SGagandeep Singh 			qdma_writel(lower_32_bits(temp->bus_addr),
448cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQDPA_SADDR(i));
449cc166b51SGagandeep Singh 			qdma_writel(upper_32_bits(temp->bus_addr),
450cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQEDPA_SADDR(i));
451cc166b51SGagandeep Singh 			qdma_writel(lower_32_bits(temp->bus_addr),
452cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQEPA_SADDR(i));
453cc166b51SGagandeep Singh 			qdma_writel(upper_32_bits(temp->bus_addr),
454cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQEEPA_SADDR(i));
455cc166b51SGagandeep Singh 
456cc166b51SGagandeep Singh 			/* Initialize the queue mode. */
457cc166b51SGagandeep Singh 			reg = FSL_QDMA_BCQMR_EN;
458bdcb782aSJun Yang 			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
459bdcb782aSJun Yang 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
460bdcb782aSJun Yang 			temp->le_cqmr = reg;
461cc166b51SGagandeep Singh 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
462cc166b51SGagandeep Singh 		}
463cc166b51SGagandeep Singh 
464cc166b51SGagandeep Singh 		/*
465cc166b51SGagandeep Singh 		 * Workaround for erratum: ERR010812.
466cc166b51SGagandeep Singh 		 * We must enable XOFF to avoid the enqueue rejection occurs.
467cc166b51SGagandeep Singh 		 * Setting SQCCMR ENTER_WM to 0x20.
468cc166b51SGagandeep Singh 		 */
469cc166b51SGagandeep Singh 
470cc166b51SGagandeep Singh 		qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
471cc166b51SGagandeep Singh 			    block + FSL_QDMA_SQCCMR);
472cc166b51SGagandeep Singh 
473cc166b51SGagandeep Singh 		/*
474cc166b51SGagandeep Singh 		 * Initialize status queue registers to point to the first
475cc166b51SGagandeep Singh 		 * command descriptor in memory.
476cc166b51SGagandeep Singh 		 * Dequeue Pointer Address Registers
477cc166b51SGagandeep Singh 		 * Enqueue Pointer Address Registers
478cc166b51SGagandeep Singh 		 */
479cc166b51SGagandeep Singh 
480f1d30e27SJun Yang 		temp_stat = &fsl_qdma->stat_queues[j];
481f1d30e27SJun Yang 		qdma_writel(upper_32_bits(temp_stat->bus_addr),
482cc166b51SGagandeep Singh 			block + FSL_QDMA_SQEEPAR);
483f1d30e27SJun Yang 		qdma_writel(lower_32_bits(temp_stat->bus_addr),
484cc166b51SGagandeep Singh 			block + FSL_QDMA_SQEPAR);
485f1d30e27SJun Yang 		qdma_writel(upper_32_bits(temp_stat->bus_addr),
486cc166b51SGagandeep Singh 			block + FSL_QDMA_SQEDPAR);
487f1d30e27SJun Yang 		qdma_writel(lower_32_bits(temp_stat->bus_addr),
488cc166b51SGagandeep Singh 			block + FSL_QDMA_SQDPAR);
489cc166b51SGagandeep Singh 		/* Desiable status queue interrupt. */
490cc166b51SGagandeep Singh 
491cc166b51SGagandeep Singh 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
492cc166b51SGagandeep Singh 		qdma_writel(0x0, block + FSL_QDMA_BSQICR);
493cc166b51SGagandeep Singh 		qdma_writel(0x0, block + FSL_QDMA_CQIER);
494cc166b51SGagandeep Singh 
495cc166b51SGagandeep Singh 		/* Initialize the status queue mode. */
496cc166b51SGagandeep Singh 		reg = FSL_QDMA_BSQMR_EN;
497f1d30e27SJun Yang 		val = ilog2_qsize(temp_stat->n_cq);
498cc166b51SGagandeep Singh 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
499cc166b51SGagandeep Singh 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
500cc166b51SGagandeep Singh 	}
501cc166b51SGagandeep Singh 
502cc166b51SGagandeep Singh 	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
503cc166b51SGagandeep Singh 	reg &= ~FSL_QDMA_DMR_DQD;
504cc166b51SGagandeep Singh 	qdma_writel(reg, ctrl + FSL_QDMA_DMR);
505cc166b51SGagandeep Singh 
506cc166b51SGagandeep Singh 	return 0;
507cc166b51SGagandeep Singh }
508cc166b51SGagandeep Singh 
509f1d30e27SJun Yang static uint16_t
510f1d30e27SJun Yang dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
511f1d30e27SJun Yang 	uint8_t block_id)
5127da29a64SGagandeep Singh {
513f1d30e27SJun Yang 	struct fsl_qdma_status_queue *stat_queue;
514f1d30e27SJun Yang 	struct fsl_qdma_queue *cmd_queue;
515f1d30e27SJun Yang 	struct fsl_qdma_comp_cmd_desc *cq;
516f1d30e27SJun Yang 	uint16_t start, count = 0;
517f1d30e27SJun Yang 	uint8_t qid = 0;
518f1d30e27SJun Yang 	uint32_t reg;
519453d8273SGagandeep Singh 	int ret;
520f1d30e27SJun Yang 	uint8_t *block;
521f1d30e27SJun Yang 	uint16_t *dq_complete;
522f1d30e27SJun Yang 	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
523453d8273SGagandeep Singh 
524f1d30e27SJun Yang 	stat_queue = &fsl_qdma->stat_queues[block_id];
525f1d30e27SJun Yang 	cq = stat_queue->cq;
526f1d30e27SJun Yang 	start = stat_queue->complete;
527453d8273SGagandeep Singh 
528f1d30e27SJun Yang 	block = fsl_qdma->block_base +
529f1d30e27SJun Yang 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
530453d8273SGagandeep Singh 
531f1d30e27SJun Yang 	do {
532f1d30e27SJun Yang 		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
533f1d30e27SJun Yang 		if (reg & FSL_QDMA_BSQSR_QE_BE)
534f1d30e27SJun Yang 			break;
535f1d30e27SJun Yang 
536f1d30e27SJun Yang 		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
537f1d30e27SJun Yang 		ret = qdma_ccdf_get_queue(&cq[start], &qid);
538f1d30e27SJun Yang 		if (ret == true) {
539f1d30e27SJun Yang 			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
540f1d30e27SJun Yang 
541f1d30e27SJun Yang 			ret = rte_ring_dequeue(cmd_queue->complete_burst,
542f1d30e27SJun Yang 				(void **)&dq_complete);
543453d8273SGagandeep Singh 			if (ret) {
544f1d30e27SJun Yang 				DPAA_QDMA_ERR("DQ desc number failed!");
545f1d30e27SJun Yang 				break;
546453d8273SGagandeep Singh 			}
547453d8273SGagandeep Singh 
548f1d30e27SJun Yang 			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
549f1d30e27SJun Yang 				(void **)desc, *dq_complete, NULL);
550f1d30e27SJun Yang 			if (ret != (*dq_complete)) {
551f1d30e27SJun Yang 				DPAA_QDMA_ERR("DQ %d descs failed!(%d)",
552f1d30e27SJun Yang 					*dq_complete, ret);
553f1d30e27SJun Yang 				break;
554f1d30e27SJun Yang 			}
555453d8273SGagandeep Singh 
556f1d30e27SJun Yang 			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
557f1d30e27SJun Yang 
558f1d30e27SJun Yang 			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
559f1d30e27SJun Yang 				(void **)desc, (*dq_complete), NULL);
560f1d30e27SJun Yang 			if (ret != (*dq_complete)) {
561f1d30e27SJun Yang 				DPAA_QDMA_ERR("Failed desc eq %d!=%d to %s",
562f1d30e27SJun Yang 					ret, *dq_complete,
563f1d30e27SJun Yang 					cmd_queue->complete_pool->name);
564f1d30e27SJun Yang 				break;
565f1d30e27SJun Yang 			}
566f1d30e27SJun Yang 
567f1d30e27SJun Yang 			cmd_queue->complete_start =
568f1d30e27SJun Yang 				(cmd_queue->complete_start + (*dq_complete)) &
569f1d30e27SJun Yang 				(cmd_queue->pending_max - 1);
570f1d30e27SJun Yang 			cmd_queue->stats.completed++;
571f1d30e27SJun Yang 
572f1d30e27SJun Yang 			start++;
573f1d30e27SJun Yang 			if (unlikely(start == stat_queue->n_cq))
574f1d30e27SJun Yang 				start = 0;
575f1d30e27SJun Yang 			count++;
576f1d30e27SJun Yang 		} else {
577f1d30e27SJun Yang 			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
578f1d30e27SJun Yang 				block_id);
579f1d30e27SJun Yang 			break;
580f1d30e27SJun Yang 		}
581f1d30e27SJun Yang 	} while (1);
582f1d30e27SJun Yang 	stat_queue->complete = start;
583f1d30e27SJun Yang 
584f1d30e27SJun Yang 	return count;
585453d8273SGagandeep Singh }
586453d8273SGagandeep Singh 
587453d8273SGagandeep Singh static int
588f1d30e27SJun Yang fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
589f1d30e27SJun Yang 	uint16_t num)
590453d8273SGagandeep Singh {
5917a7bb89eSJun Yang 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
592f1d30e27SJun Yang 	uint16_t i, idx, start, dq;
593f1d30e27SJun Yang 	int ret, dq_cnt;
594453d8273SGagandeep Singh 
5957a7bb89eSJun Yang 	if (fsl_qdma->is_silent)
5967a7bb89eSJun Yang 		return 0;
597453d8273SGagandeep Singh 
598f1d30e27SJun Yang 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
599f1d30e27SJun Yang eq_again:
600f1d30e27SJun Yang 	ret = rte_ring_enqueue(fsl_queue->complete_burst,
601f1d30e27SJun Yang 			&fsl_queue->desc_in_hw[fsl_queue->ci]);
602f1d30e27SJun Yang 	if (ret) {
603f1d30e27SJun Yang 		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
604f1d30e27SJun Yang 			__func__);
605f1d30e27SJun Yang 		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
606f1d30e27SJun Yang 			__func__, fsl_queue->stats.submitted,
607f1d30e27SJun Yang 			fsl_queue->stats.completed);
608f1d30e27SJun Yang 		dq_cnt = 0;
609f1d30e27SJun Yang dq_again:
610f1d30e27SJun Yang 		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
611f1d30e27SJun Yang 			fsl_queue->block_id);
612f1d30e27SJun Yang 		dq_cnt++;
613f1d30e27SJun Yang 		if (dq > 0) {
614f1d30e27SJun Yang 			goto eq_again;
615f1d30e27SJun Yang 		} else {
616f1d30e27SJun Yang 			if (dq_cnt < 100)
617f1d30e27SJun Yang 				goto dq_again;
618f1d30e27SJun Yang 			DPAA_QDMA_ERR("%s: Dq block%d failed!",
619f1d30e27SJun Yang 				__func__, fsl_queue->block_id);
620f1d30e27SJun Yang 		}
621f1d30e27SJun Yang 		return ret;
622f1d30e27SJun Yang 	}
623f1d30e27SJun Yang 	start = fsl_queue->pending_start;
624f1d30e27SJun Yang 	for (i = 0; i < num; i++) {
625f1d30e27SJun Yang 		idx = (start + i) & (fsl_queue->pending_max - 1);
626f1d30e27SJun Yang 		ret = rte_ring_enqueue(fsl_queue->complete_desc,
627f1d30e27SJun Yang 				&fsl_queue->pending_desc[idx]);
628f1d30e27SJun Yang 		if (ret) {
629f1d30e27SJun Yang 			DPAA_QDMA_ERR("Descriptors eq failed!");
630f1d30e27SJun Yang 			return ret;
631f1d30e27SJun Yang 		}
632f1d30e27SJun Yang 	}
633453d8273SGagandeep Singh 
634453d8273SGagandeep Singh 	return 0;
635453d8273SGagandeep Singh }
636453d8273SGagandeep Singh 
637453d8273SGagandeep Singh static int
638f1d30e27SJun Yang fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
639453d8273SGagandeep Singh {
640f1d30e27SJun Yang 	int overflow = 0;
6417a7bb89eSJun Yang 	uint32_t reg;
642f1d30e27SJun Yang 	uint16_t blk_drain, check_num, drain_num;
6437a7bb89eSJun Yang 	uint8_t *block = fsl_queue->block_vir;
644f1d30e27SJun Yang 	const struct rte_dma_stats *st = &fsl_queue->stats;
645f1d30e27SJun Yang 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
646f1d30e27SJun Yang 
647f1d30e27SJun Yang 	check_num = 0;
648f1d30e27SJun Yang overflow_check:
649*a63c6426SJun Yang 	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
6507a7bb89eSJun Yang 		reg = qdma_readl_be(block +
6517a7bb89eSJun Yang 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
6527a7bb89eSJun Yang 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
6537a7bb89eSJun Yang 			1 : 0;
6547a7bb89eSJun Yang 	} else {
655f1d30e27SJun Yang 		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
656f1d30e27SJun Yang 			QDMA_QUEUE_CR_WM) ? 1 : 0;
6577a7bb89eSJun Yang 	}
658f1d30e27SJun Yang 
6597a7bb89eSJun Yang 	if (likely(!overflow)) {
660f1d30e27SJun Yang 		return 0;
6617a7bb89eSJun Yang 	} else if (fsl_qdma->is_silent) {
6627a7bb89eSJun Yang 		check_num++;
6637a7bb89eSJun Yang 		if (check_num >= 10000) {
6647a7bb89eSJun Yang 			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
6657a7bb89eSJun Yang 			check_num = 0;
6667a7bb89eSJun Yang 		}
6677a7bb89eSJun Yang 		goto overflow_check;
6687a7bb89eSJun Yang 	}
669f1d30e27SJun Yang 
670f1d30e27SJun Yang 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
671f1d30e27SJun Yang 		fsl_queue->block_id, fsl_queue->queue_id,
672f1d30e27SJun Yang 		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
673f1d30e27SJun Yang 	drain_num = 0;
674f1d30e27SJun Yang 
675f1d30e27SJun Yang drain_again:
676f1d30e27SJun Yang 	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
677f1d30e27SJun Yang 		fsl_queue->block_id);
678f1d30e27SJun Yang 	if (!blk_drain) {
679f1d30e27SJun Yang 		drain_num++;
680f1d30e27SJun Yang 		if (drain_num >= 10000) {
681f1d30e27SJun Yang 			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
682f1d30e27SJun Yang 				fsl_queue->block_id, fsl_queue->queue_id,
683f1d30e27SJun Yang 				st->submitted - st->completed);
684f1d30e27SJun Yang 			drain_num = 0;
685f1d30e27SJun Yang 		}
686f1d30e27SJun Yang 		goto drain_again;
687f1d30e27SJun Yang 	}
688f1d30e27SJun Yang 	check_num++;
689f1d30e27SJun Yang 	if (check_num >= 1000) {
690f1d30e27SJun Yang 		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
691f1d30e27SJun Yang 			fsl_queue->block_id, fsl_queue->queue_id,
692f1d30e27SJun Yang 			st->submitted - st->completed);
693f1d30e27SJun Yang 		check_num = 0;
694f1d30e27SJun Yang 	}
695f1d30e27SJun Yang 	goto overflow_check;
696f1d30e27SJun Yang 
697f1d30e27SJun Yang 	return 0;
698f1d30e27SJun Yang }
699f1d30e27SJun Yang 
700f1d30e27SJun Yang static int
701f1d30e27SJun Yang fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
702f1d30e27SJun Yang 	dma_addr_t dst, dma_addr_t src, size_t len)
703f1d30e27SJun Yang {
704f1d30e27SJun Yang 	uint8_t *block = fsl_queue->block_vir;
705f1d30e27SJun Yang 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
706f1d30e27SJun Yang 	struct fsl_qdma_cmpd_ft *ft;
707453d8273SGagandeep Singh 	int ret;
708bdcb782aSJun Yang #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
709bdcb782aSJun Yang 	struct fsl_qdma_sdf *sdf;
710bdcb782aSJun Yang #endif
711453d8273SGagandeep Singh 
712f1d30e27SJun Yang 	ret = fsl_qdma_enqueue_overflow(fsl_queue);
713f1d30e27SJun Yang 	if (unlikely(ret))
714f1d30e27SJun Yang 		return ret;
715453d8273SGagandeep Singh 
716f1d30e27SJun Yang 	ft = fsl_queue->ft[fsl_queue->ci];
717453d8273SGagandeep Singh 
718bdcb782aSJun Yang #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
719bdcb782aSJun Yang 	sdf = &ft->df.sdf;
720bdcb782aSJun Yang 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
7218c53b9b7SJun Yang #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
7228c53b9b7SJun Yang 	sdf->prefetch = 1;
7238c53b9b7SJun Yang #endif
724bdcb782aSJun Yang 	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
725bdcb782aSJun Yang 		sdf->ssen = 1;
726bdcb782aSJun Yang 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
727bdcb782aSJun Yang 		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
728bdcb782aSJun Yang 	} else {
729bdcb782aSJun Yang 		sdf->ssen = 0;
730bdcb782aSJun Yang 		sdf->sss = 0;
731bdcb782aSJun Yang 		sdf->ssd = 0;
732bdcb782aSJun Yang 	}
733bdcb782aSJun Yang #endif
734f1d30e27SJun Yang 	csgf_src = &ft->desc_sbuf;
735f1d30e27SJun Yang 	csgf_dest = &ft->desc_dbuf;
736f1d30e27SJun Yang 	qdma_desc_sge_addr_set64(csgf_src, src);
737f1d30e27SJun Yang 	csgf_src->length = len;
738f1d30e27SJun Yang 	csgf_src->extion = 0;
739f1d30e27SJun Yang 	qdma_desc_sge_addr_set64(csgf_dest, dst);
740f1d30e27SJun Yang 	csgf_dest->length = len;
741f1d30e27SJun Yang 	csgf_dest->extion = 0;
742f1d30e27SJun Yang 	/* This entry is the last entry. */
743f1d30e27SJun Yang 	csgf_dest->final = 1;
744f1d30e27SJun Yang 
745f1d30e27SJun Yang 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
746f1d30e27SJun Yang 	if (ret)
747f1d30e27SJun Yang 		return ret;
748f1d30e27SJun Yang 	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
749f1d30e27SJun Yang 
750f1d30e27SJun Yang 	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
751f1d30e27SJun Yang 		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
752f1d30e27SJun Yang 	fsl_queue->stats.submitted++;
753f1d30e27SJun Yang 
754f1d30e27SJun Yang 	return 0;
755f1d30e27SJun Yang }
756f1d30e27SJun Yang 
757f1d30e27SJun Yang static int
758f1d30e27SJun Yang fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
759f1d30e27SJun Yang {
760f1d30e27SJun Yang 	uint8_t *block = fsl_queue->block_vir;
761f1d30e27SJun Yang 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
762f1d30e27SJun Yang 	struct fsl_qdma_cmpd_ft *ft;
763f1d30e27SJun Yang 	uint32_t total_len;
764f1d30e27SJun Yang 	uint16_t start, idx, num, i, next_idx;
765f1d30e27SJun Yang 	int ret;
766bdcb782aSJun Yang #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
767bdcb782aSJun Yang 	struct fsl_qdma_sdf *sdf;
768bdcb782aSJun Yang #endif
769f1d30e27SJun Yang 
770f1d30e27SJun Yang eq_sg:
771f1d30e27SJun Yang 	total_len = 0;
772f1d30e27SJun Yang 	start = fsl_queue->pending_start;
773f1d30e27SJun Yang 	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
774f1d30e27SJun Yang 		fsl_queue->pending_num == 1) {
775f1d30e27SJun Yang 		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
776f1d30e27SJun Yang 			fsl_queue->pending_desc[start].dst,
777f1d30e27SJun Yang 			fsl_queue->pending_desc[start].src,
778f1d30e27SJun Yang 			fsl_queue->pending_desc[start].len);
779f1d30e27SJun Yang 		if (!ret) {
780f1d30e27SJun Yang 			fsl_queue->pending_start =
781f1d30e27SJun Yang 				(start + 1) & (fsl_queue->pending_max - 1);
782f1d30e27SJun Yang 			fsl_queue->pending_num--;
783f1d30e27SJun Yang 		}
784f1d30e27SJun Yang 		if (fsl_queue->pending_num > 0)
785f1d30e27SJun Yang 			goto eq_sg;
786f1d30e27SJun Yang 
787f1d30e27SJun Yang 		return ret;
788f1d30e27SJun Yang 	}
789f1d30e27SJun Yang 
790f1d30e27SJun Yang 	ret = fsl_qdma_enqueue_overflow(fsl_queue);
791f1d30e27SJun Yang 	if (unlikely(ret))
792f1d30e27SJun Yang 		return ret;
793f1d30e27SJun Yang 
794f1d30e27SJun Yang 	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
795f1d30e27SJun Yang 		num = FSL_QDMA_SG_MAX_ENTRY;
796f1d30e27SJun Yang 	else
797f1d30e27SJun Yang 		num = fsl_queue->pending_num;
798f1d30e27SJun Yang 
799f1d30e27SJun Yang 	ft = fsl_queue->ft[fsl_queue->ci];
800f1d30e27SJun Yang 	csgf_src = &ft->desc_sbuf;
801f1d30e27SJun Yang 	csgf_dest = &ft->desc_dbuf;
802f1d30e27SJun Yang 
803f1d30e27SJun Yang 	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
804f1d30e27SJun Yang 	csgf_src->extion = 1;
805f1d30e27SJun Yang 	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
806f1d30e27SJun Yang 	csgf_dest->extion = 1;
807f1d30e27SJun Yang 	/* This entry is the last entry. */
808f1d30e27SJun Yang 	csgf_dest->final = 1;
809f1d30e27SJun Yang 	for (i = 0; i < num; i++) {
810f1d30e27SJun Yang 		idx = (start + i) & (fsl_queue->pending_max - 1);
811f1d30e27SJun Yang 		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
812f1d30e27SJun Yang 			fsl_queue->pending_desc[idx].src);
813f1d30e27SJun Yang 		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
814f1d30e27SJun Yang 		ft->desc_ssge[i].final = 0;
815f1d30e27SJun Yang 		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
816f1d30e27SJun Yang 			fsl_queue->pending_desc[idx].dst);
817f1d30e27SJun Yang 		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
818f1d30e27SJun Yang 		ft->desc_dsge[i].final = 0;
819f1d30e27SJun Yang 		total_len += fsl_queue->pending_desc[idx].len;
820f1d30e27SJun Yang 		if ((i + 1) != num) {
821f1d30e27SJun Yang 			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
822f1d30e27SJun Yang 			if (fsl_queue->pending_desc[next_idx].len >
823f1d30e27SJun Yang 				s_sg_max_entry_sz) {
824f1d30e27SJun Yang 				num = i + 1;
825f1d30e27SJun Yang 				break;
826f1d30e27SJun Yang 			}
827f1d30e27SJun Yang 		}
828f1d30e27SJun Yang 	}
829f1d30e27SJun Yang 
830f1d30e27SJun Yang 	ft->desc_ssge[num - 1].final = 1;
831f1d30e27SJun Yang 	ft->desc_dsge[num - 1].final = 1;
832f1d30e27SJun Yang 	csgf_src->length = total_len;
833f1d30e27SJun Yang 	csgf_dest->length = total_len;
834bdcb782aSJun Yang #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
835bdcb782aSJun Yang 	sdf = &ft->df.sdf;
836bdcb782aSJun Yang 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
8378c53b9b7SJun Yang #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
8388c53b9b7SJun Yang 	sdf->prefetch = 1;
8398c53b9b7SJun Yang #endif
840bdcb782aSJun Yang 	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
841bdcb782aSJun Yang 		sdf->ssen = 1;
842bdcb782aSJun Yang 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
843bdcb782aSJun Yang 		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
844bdcb782aSJun Yang 	} else {
845bdcb782aSJun Yang 		sdf->ssen = 0;
846bdcb782aSJun Yang 		sdf->sss = 0;
847bdcb782aSJun Yang 		sdf->ssd = 0;
848bdcb782aSJun Yang 	}
849bdcb782aSJun Yang #endif
850f1d30e27SJun Yang 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
851453d8273SGagandeep Singh 	if (ret)
852453d8273SGagandeep Singh 		return ret;
853453d8273SGagandeep Singh 
854f1d30e27SJun Yang 	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
855f1d30e27SJun Yang 
856f1d30e27SJun Yang 	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
857f1d30e27SJun Yang 		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
858f1d30e27SJun Yang 	fsl_queue->stats.submitted++;
859f1d30e27SJun Yang 
860f1d30e27SJun Yang 	fsl_queue->pending_start =
861f1d30e27SJun Yang 		(start + num) & (fsl_queue->pending_max - 1);
862f1d30e27SJun Yang 	fsl_queue->pending_num -= num;
863f1d30e27SJun Yang 	if (fsl_queue->pending_num > 0)
864f1d30e27SJun Yang 		goto eq_sg;
865f1d30e27SJun Yang 
866453d8273SGagandeep Singh 	return 0;
867453d8273SGagandeep Singh }
868453d8273SGagandeep Singh 
869f1d30e27SJun Yang static int
870f1d30e27SJun Yang fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
871cc166b51SGagandeep Singh {
872f1d30e27SJun Yang 	uint16_t start = fsl_queue->pending_start;
873f1d30e27SJun Yang 	int ret;
874f1d30e27SJun Yang 
875f1d30e27SJun Yang 	if (fsl_queue->pending_num == 1) {
876f1d30e27SJun Yang 		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
877f1d30e27SJun Yang 			fsl_queue->pending_desc[start].dst,
878f1d30e27SJun Yang 			fsl_queue->pending_desc[start].src,
879f1d30e27SJun Yang 			fsl_queue->pending_desc[start].len);
880f1d30e27SJun Yang 		if (!ret) {
881f1d30e27SJun Yang 			fsl_queue->pending_start =
882f1d30e27SJun Yang 				(start + 1) & (fsl_queue->pending_max - 1);
883f1d30e27SJun Yang 			fsl_queue->pending_num = 0;
884f1d30e27SJun Yang 		}
885f1d30e27SJun Yang 		return ret;
886f1d30e27SJun Yang 	}
887f1d30e27SJun Yang 
888f1d30e27SJun Yang 	return fsl_qdma_enqueue_desc_sg(fsl_queue);
889f1d30e27SJun Yang }
890f1d30e27SJun Yang 
891f1d30e27SJun Yang static int
892f1d30e27SJun Yang dpaa_qdma_info_get(const struct rte_dma_dev *dev,
893f1d30e27SJun Yang 	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
894f1d30e27SJun Yang {
895f1d30e27SJun Yang 	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
896f1d30e27SJun Yang 
897f1d30e27SJun Yang 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
898f1d30e27SJun Yang 		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
899f1d30e27SJun Yang 		RTE_DMA_CAPA_OPS_COPY_SG;
900f1d30e27SJun Yang 	dev_info->dev_capa |= DPAA_QDMA_FLAGS_INDEX;
901f1d30e27SJun Yang 	dev_info->max_vchans = fsl_qdma->n_queues;
902f1d30e27SJun Yang 	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
903f1d30e27SJun Yang 	dev_info->min_desc = QDMA_QUEUE_SIZE;
904f1d30e27SJun Yang 	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
905f1d30e27SJun Yang 
906f1d30e27SJun Yang 	return 0;
907f1d30e27SJun Yang }
908f1d30e27SJun Yang 
909f1d30e27SJun Yang static int
910f1d30e27SJun Yang dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
911f1d30e27SJun Yang 	uint16_t vchan)
912f1d30e27SJun Yang {
913f1d30e27SJun Yang 	int ret, i, j, found = 0;
914f1d30e27SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
915f1d30e27SJun Yang 
916f1d30e27SJun Yang 	if (fsl_queue) {
917f1d30e27SJun Yang 		found = 1;
918f1d30e27SJun Yang 		goto queue_found;
919f1d30e27SJun Yang 	}
920f1d30e27SJun Yang 
921f1d30e27SJun Yang 	for (i = 0; i < QDMA_BLOCKS; i++) {
922f1d30e27SJun Yang 		for (j = 0; j < QDMA_QUEUES; j++) {
923f1d30e27SJun Yang 			fsl_queue = &fsl_qdma->cmd_queues[i][j];
924f1d30e27SJun Yang 
925f1d30e27SJun Yang 			if (fsl_queue->channel_id == vchan) {
926f1d30e27SJun Yang 				found = 1;
927f1d30e27SJun Yang 				fsl_qdma->chan[vchan] = fsl_queue;
928f1d30e27SJun Yang 				goto queue_found;
929f1d30e27SJun Yang 			}
930f1d30e27SJun Yang 		}
931f1d30e27SJun Yang 	}
932f1d30e27SJun Yang 
933f1d30e27SJun Yang queue_found:
934f1d30e27SJun Yang 	if (!found)
935f1d30e27SJun Yang 		return -ENXIO;
936f1d30e27SJun Yang 
937f1d30e27SJun Yang 	if (fsl_queue->used)
938f1d30e27SJun Yang 		return 0;
939f1d30e27SJun Yang 
940f1d30e27SJun Yang 	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
941f1d30e27SJun Yang 	if (ret)
942f1d30e27SJun Yang 		return ret;
943f1d30e27SJun Yang 
944f1d30e27SJun Yang 	fsl_queue->used = 1;
945f1d30e27SJun Yang 	fsl_qdma->block_queues[fsl_queue->block_id]++;
946f1d30e27SJun Yang 
947f1d30e27SJun Yang 	return 0;
948cc166b51SGagandeep Singh }
949cc166b51SGagandeep Singh 
950cc166b51SGagandeep Singh static int
9517a7bb89eSJun Yang dpaa_qdma_configure(struct rte_dma_dev *dmadev,
9527a7bb89eSJun Yang 	const struct rte_dma_conf *dev_conf,
953453d8273SGagandeep Singh 	__rte_unused uint32_t conf_sz)
954453d8273SGagandeep Singh {
9557a7bb89eSJun Yang 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
9567a7bb89eSJun Yang 
9577a7bb89eSJun Yang 	fsl_qdma->is_silent = dev_conf->enable_silent;
958453d8273SGagandeep Singh 	return 0;
959453d8273SGagandeep Singh }
960453d8273SGagandeep Singh 
961453d8273SGagandeep Singh static int
962453d8273SGagandeep Singh dpaa_qdma_start(__rte_unused struct rte_dma_dev *dev)
963453d8273SGagandeep Singh {
964453d8273SGagandeep Singh 	return 0;
965453d8273SGagandeep Singh }
966453d8273SGagandeep Singh 
967453d8273SGagandeep Singh static int
968453d8273SGagandeep Singh dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
969453d8273SGagandeep Singh {
970453d8273SGagandeep Singh 	return 0;
971453d8273SGagandeep Singh }
972453d8273SGagandeep Singh 
973453d8273SGagandeep Singh static int
974453d8273SGagandeep Singh dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
975453d8273SGagandeep Singh 		      uint16_t vchan,
976453d8273SGagandeep Singh 		      __rte_unused const struct rte_dma_vchan_conf *conf,
977453d8273SGagandeep Singh 		      __rte_unused uint32_t conf_sz)
978453d8273SGagandeep Singh {
979453d8273SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
980453d8273SGagandeep Singh 
981453d8273SGagandeep Singh 	return dpaa_get_channel(fsl_qdma, vchan);
982453d8273SGagandeep Singh }
983453d8273SGagandeep Singh 
9847da29a64SGagandeep Singh static int
9857da29a64SGagandeep Singh dpaa_qdma_submit(void *dev_private, uint16_t vchan)
9867da29a64SGagandeep Singh {
987f1d30e27SJun Yang 	struct fsl_qdma_engine *fsl_qdma = dev_private;
988f1d30e27SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
9897da29a64SGagandeep Singh 
990f1d30e27SJun Yang 	if (!fsl_queue->pending_num)
9917da29a64SGagandeep Singh 		return 0;
992f1d30e27SJun Yang 
993f1d30e27SJun Yang 	return fsl_qdma_enqueue_desc(fsl_queue);
9947da29a64SGagandeep Singh }
9957da29a64SGagandeep Singh 
9967da29a64SGagandeep Singh static int
9977da29a64SGagandeep Singh dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
9987da29a64SGagandeep Singh 	rte_iova_t src, rte_iova_t dst,
9997da29a64SGagandeep Singh 	uint32_t length, uint64_t flags)
10007da29a64SGagandeep Singh {
1001f1d30e27SJun Yang 	struct fsl_qdma_engine *fsl_qdma = dev_private;
1002f1d30e27SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
1003f1d30e27SJun Yang 	uint16_t start = fsl_queue->pending_start;
1004f1d30e27SJun Yang 	uint8_t pending = fsl_queue->pending_num;
1005f1d30e27SJun Yang 	uint16_t idx;
10067da29a64SGagandeep Singh 	int ret;
10077da29a64SGagandeep Singh 
1008f1d30e27SJun Yang 	if (pending >= fsl_queue->pending_max) {
1009f1d30e27SJun Yang 		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
1010f1d30e27SJun Yang 			pending, vchan);
1011f1d30e27SJun Yang 		return -ENOSPC;
10127da29a64SGagandeep Singh 	}
1013f1d30e27SJun Yang 	idx = (start + pending) & (fsl_queue->pending_max - 1);
1014f1d30e27SJun Yang 
1015f1d30e27SJun Yang 	fsl_queue->pending_desc[idx].src = src;
1016f1d30e27SJun Yang 	fsl_queue->pending_desc[idx].dst = dst;
1017f1d30e27SJun Yang 	fsl_queue->pending_desc[idx].flag =
1018f1d30e27SJun Yang 		DPAA_QDMA_IDX_FROM_FLAG(flags);
1019f1d30e27SJun Yang 	fsl_queue->pending_desc[idx].len = length;
1020f1d30e27SJun Yang 	fsl_queue->pending_num++;
1021f1d30e27SJun Yang 
1022f1d30e27SJun Yang 	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
1023f1d30e27SJun Yang 		return idx;
1024f1d30e27SJun Yang 
1025f1d30e27SJun Yang 	ret = fsl_qdma_enqueue_desc(fsl_queue);
1026f1d30e27SJun Yang 	if (!ret)
1027f1d30e27SJun Yang 		return fsl_queue->pending_start;
10287da29a64SGagandeep Singh 
10297da29a64SGagandeep Singh 	return ret;
10307da29a64SGagandeep Singh }
10317da29a64SGagandeep Singh 
1032a77261f6SJun Yang static int
1033a77261f6SJun Yang dpaa_qdma_copy_sg(void *dev_private,
1034a77261f6SJun Yang 	uint16_t vchan,
1035a77261f6SJun Yang 	const struct rte_dma_sge *src,
1036a77261f6SJun Yang 	const struct rte_dma_sge *dst,
1037a77261f6SJun Yang 	uint16_t nb_src, uint16_t nb_dst,
1038a77261f6SJun Yang 	uint64_t flags)
1039a77261f6SJun Yang {
1040a77261f6SJun Yang 	int ret;
1041a77261f6SJun Yang 	uint16_t i, start, idx;
1042a77261f6SJun Yang 	struct fsl_qdma_engine *fsl_qdma = dev_private;
1043a77261f6SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
1044a77261f6SJun Yang 	const uint16_t *idx_addr = NULL;
1045a77261f6SJun Yang 
1046a77261f6SJun Yang 	if (unlikely(nb_src != nb_dst)) {
1047a77261f6SJun Yang 		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
1048a77261f6SJun Yang 			__func__, nb_src, nb_dst, vchan);
1049a77261f6SJun Yang 		return -EINVAL;
1050a77261f6SJun Yang 	}
1051a77261f6SJun Yang 
1052a77261f6SJun Yang 	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
1053a77261f6SJun Yang 		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
1054a77261f6SJun Yang 			vchan);
1055a77261f6SJun Yang 		return -ENOSPC;
1056a77261f6SJun Yang 	}
1057a77261f6SJun Yang 	start = fsl_queue->pending_start + fsl_queue->pending_num;
1058a77261f6SJun Yang 	start = start & (fsl_queue->pending_max - 1);
1059a77261f6SJun Yang 	idx = start;
1060a77261f6SJun Yang 
1061a77261f6SJun Yang 	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
1062a77261f6SJun Yang 
1063a77261f6SJun Yang 	for (i = 0; i < nb_src; i++) {
1064a77261f6SJun Yang 		if (unlikely(src[i].length != dst[i].length)) {
1065a77261f6SJun Yang 			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
1066a77261f6SJun Yang 				src[i].length, dst[i].length);
1067a77261f6SJun Yang 			return -EINVAL;
1068a77261f6SJun Yang 		}
1069a77261f6SJun Yang 		idx = (start + i) & (fsl_queue->pending_max - 1);
1070a77261f6SJun Yang 		fsl_queue->pending_desc[idx].src = src[i].addr;
1071a77261f6SJun Yang 		fsl_queue->pending_desc[idx].dst = dst[i].addr;
1072a77261f6SJun Yang 		fsl_queue->pending_desc[idx].len = dst[i].length;
1073a77261f6SJun Yang 		fsl_queue->pending_desc[idx].flag = idx_addr[i];
1074a77261f6SJun Yang 	}
1075a77261f6SJun Yang 	fsl_queue->pending_num += nb_src;
1076a77261f6SJun Yang 
1077a77261f6SJun Yang 	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
1078a77261f6SJun Yang 		return idx;
1079a77261f6SJun Yang 
1080a77261f6SJun Yang 	ret = fsl_qdma_enqueue_desc(fsl_queue);
1081a77261f6SJun Yang 	if (!ret)
1082a77261f6SJun Yang 		return fsl_queue->pending_start;
1083a77261f6SJun Yang 
1084a77261f6SJun Yang 	return ret;
1085a77261f6SJun Yang }
1086f1d30e27SJun Yang 
1087*a63c6426SJun Yang static int
1088*a63c6426SJun Yang dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
1089*a63c6426SJun Yang {
1090*a63c6426SJun Yang 	struct fsl_qdma_err_reg local;
1091*a63c6426SJun Yang 	size_t i, offset = 0;
1092*a63c6426SJun Yang 	char err_msg[512];
1093*a63c6426SJun Yang 
1094*a63c6426SJun Yang 	local.dedr_be = rte_read32(&reg->dedr_be);
1095*a63c6426SJun Yang 	if (!local.dedr_be)
1096*a63c6426SJun Yang 		return 0;
1097*a63c6426SJun Yang 	offset = sprintf(err_msg, "ERR detected:");
1098*a63c6426SJun Yang 	if (local.dedr.ere) {
1099*a63c6426SJun Yang 		offset += sprintf(&err_msg[offset],
1100*a63c6426SJun Yang 			" ere(Enqueue rejection error)");
1101*a63c6426SJun Yang 	}
1102*a63c6426SJun Yang 	if (local.dedr.dde) {
1103*a63c6426SJun Yang 		offset += sprintf(&err_msg[offset],
1104*a63c6426SJun Yang 			" dde(Destination descriptor error)");
1105*a63c6426SJun Yang 	}
1106*a63c6426SJun Yang 	if (local.dedr.sde) {
1107*a63c6426SJun Yang 		offset += sprintf(&err_msg[offset],
1108*a63c6426SJun Yang 			" sde(Source descriptor error)");
1109*a63c6426SJun Yang 	}
1110*a63c6426SJun Yang 	if (local.dedr.cde) {
1111*a63c6426SJun Yang 		offset += sprintf(&err_msg[offset],
1112*a63c6426SJun Yang 			" cde(Command descriptor error)");
1113*a63c6426SJun Yang 	}
1114*a63c6426SJun Yang 	if (local.dedr.wte) {
1115*a63c6426SJun Yang 		offset += sprintf(&err_msg[offset],
1116*a63c6426SJun Yang 			" wte(Write transaction error)");
1117*a63c6426SJun Yang 	}
1118*a63c6426SJun Yang 	if (local.dedr.rte) {
1119*a63c6426SJun Yang 		offset += sprintf(&err_msg[offset],
1120*a63c6426SJun Yang 			" rte(Read transaction error)");
1121*a63c6426SJun Yang 	}
1122*a63c6426SJun Yang 	if (local.dedr.me) {
1123*a63c6426SJun Yang 		offset += sprintf(&err_msg[offset],
1124*a63c6426SJun Yang 			" me(Multiple errors of the same type)");
1125*a63c6426SJun Yang 	}
1126*a63c6426SJun Yang 	DPAA_QDMA_ERR("%s", err_msg);
1127*a63c6426SJun Yang 	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
1128*a63c6426SJun Yang 		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
1129*a63c6426SJun Yang 			QDMA_IN(&reg->deccd_le[i]);
1130*a63c6426SJun Yang 	}
1131*a63c6426SJun Yang 	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
1132*a63c6426SJun Yang 	local.decbr = rte_read32(&reg->decbr);
1133*a63c6426SJun Yang 
1134*a63c6426SJun Yang 	offset = sprintf(err_msg, "ERR command:");
1135*a63c6426SJun Yang 	offset += sprintf(&err_msg[offset],
1136*a63c6426SJun Yang 		" status: %02x, ser: %d, offset:%d, fmt: %02x",
1137*a63c6426SJun Yang 		local.err_cmd.status, local.err_cmd.ser,
1138*a63c6426SJun Yang 		local.err_cmd.offset, local.err_cmd.format);
1139*a63c6426SJun Yang 	offset += sprintf(&err_msg[offset],
1140*a63c6426SJun Yang 		" address: 0x%"PRIx64", queue: %d, dd: %02x",
1141*a63c6426SJun Yang 		(uint64_t)local.err_cmd.addr_hi << 32 |
1142*a63c6426SJun Yang 		local.err_cmd.addr_lo,
1143*a63c6426SJun Yang 		local.err_cmd.queue, local.err_cmd.dd);
1144*a63c6426SJun Yang 	DPAA_QDMA_ERR("%s", err_msg);
1145*a63c6426SJun Yang 	DPAA_QDMA_ERR("ERR command block: %d, queue: %d",
1146*a63c6426SJun Yang 		local.deccqidr.block, local.deccqidr.queue);
1147*a63c6426SJun Yang 
1148*a63c6426SJun Yang 	rte_write32(local.dedr_be, &reg->dedr_be);
1149*a63c6426SJun Yang 
1150*a63c6426SJun Yang 	return -EIO;
1151*a63c6426SJun Yang }
1152*a63c6426SJun Yang 
11537da29a64SGagandeep Singh static uint16_t
11547da29a64SGagandeep Singh dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
11557da29a64SGagandeep Singh 	const uint16_t nb_cpls, uint16_t *last_idx,
11567da29a64SGagandeep Singh 	enum rte_dma_status_code *st)
11577da29a64SGagandeep Singh {
1158f1d30e27SJun Yang 	struct fsl_qdma_engine *fsl_qdma = dev_private;
1159*a63c6426SJun Yang 	int err;
1160f1d30e27SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
1161*a63c6426SJun Yang 	void *status = fsl_qdma->status_base;
1162f1d30e27SJun Yang 	struct fsl_qdma_desc *desc_complete[nb_cpls];
1163f1d30e27SJun Yang 	uint16_t i, dq_num;
11647da29a64SGagandeep Singh 
11657a7bb89eSJun Yang 	if (unlikely(fsl_qdma->is_silent)) {
11667a7bb89eSJun Yang 		DPAA_QDMA_WARN("Can't dq in silent mode");
11677a7bb89eSJun Yang 
11687a7bb89eSJun Yang 		return 0;
11697a7bb89eSJun Yang 	}
11707a7bb89eSJun Yang 
1171f1d30e27SJun Yang 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
1172f1d30e27SJun Yang 			fsl_queue->block_id);
1173f1d30e27SJun Yang 	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
1174f1d30e27SJun Yang 		__func__, dq_num);
1175f1d30e27SJun Yang 
1176f1d30e27SJun Yang 	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
1177f1d30e27SJun Yang 			(void **)desc_complete, nb_cpls, NULL);
1178f1d30e27SJun Yang 	for (i = 0; i < dq_num; i++)
1179f1d30e27SJun Yang 		last_idx[i] = desc_complete[i]->flag;
1180f1d30e27SJun Yang 
1181f1d30e27SJun Yang 	if (st) {
1182f1d30e27SJun Yang 		for (i = 0; i < dq_num; i++)
1183f1d30e27SJun Yang 			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
11847da29a64SGagandeep Singh 	}
11857da29a64SGagandeep Singh 
1186*a63c6426SJun Yang 	if (s_hw_err_check) {
1187*a63c6426SJun Yang 		err = dpaa_qdma_err_handle(status +
1188*a63c6426SJun Yang 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
1189*a63c6426SJun Yang 		if (err)
1190*a63c6426SJun Yang 			fsl_queue->stats.errors++;
1191*a63c6426SJun Yang 	}
11927da29a64SGagandeep Singh 
1193f1d30e27SJun Yang 	return dq_num;
11947da29a64SGagandeep Singh }
11957da29a64SGagandeep Singh 
11967da29a64SGagandeep Singh static uint16_t
11977da29a64SGagandeep Singh dpaa_qdma_dequeue(void *dev_private,
11987da29a64SGagandeep Singh 	uint16_t vchan, const uint16_t nb_cpls,
11997da29a64SGagandeep Singh 	uint16_t *last_idx, bool *has_error)
12007da29a64SGagandeep Singh {
1201f1d30e27SJun Yang 	struct fsl_qdma_engine *fsl_qdma = dev_private;
1202*a63c6426SJun Yang 	int err;
1203f1d30e27SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
1204*a63c6426SJun Yang 	void *status = fsl_qdma->status_base;
1205f1d30e27SJun Yang 	struct fsl_qdma_desc *desc_complete[nb_cpls];
1206f1d30e27SJun Yang 	uint16_t i, dq_num;
12077da29a64SGagandeep Singh 
12087a7bb89eSJun Yang 	if (unlikely(fsl_qdma->is_silent)) {
12097a7bb89eSJun Yang 		DPAA_QDMA_WARN("Can't dq in silent mode");
12107a7bb89eSJun Yang 
12117a7bb89eSJun Yang 		return 0;
12127a7bb89eSJun Yang 	}
12137da29a64SGagandeep Singh 
1214f1d30e27SJun Yang 	*has_error = false;
1215f1d30e27SJun Yang 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
1216f1d30e27SJun Yang 		fsl_queue->block_id);
1217f1d30e27SJun Yang 	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
1218f1d30e27SJun Yang 		__func__, dq_num);
12197da29a64SGagandeep Singh 
1220f1d30e27SJun Yang 	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
1221f1d30e27SJun Yang 			(void **)desc_complete, nb_cpls, NULL);
1222f1d30e27SJun Yang 	for (i = 0; i < dq_num; i++)
1223f1d30e27SJun Yang 		last_idx[i] = desc_complete[i]->flag;
12247da29a64SGagandeep Singh 
1225*a63c6426SJun Yang 	if (s_hw_err_check) {
1226*a63c6426SJun Yang 		err = dpaa_qdma_err_handle(status +
1227*a63c6426SJun Yang 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
1228*a63c6426SJun Yang 		if (err) {
1229*a63c6426SJun Yang 			if (has_error)
1230*a63c6426SJun Yang 				*has_error = true;
1231*a63c6426SJun Yang 			fsl_queue->stats.errors++;
1232*a63c6426SJun Yang 		}
1233*a63c6426SJun Yang 	}
1234*a63c6426SJun Yang 
1235f1d30e27SJun Yang 	return dq_num;
12367da29a64SGagandeep Singh }
12377da29a64SGagandeep Singh 
123833441726SGagandeep Singh static int
1239f1d30e27SJun Yang dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
1240f1d30e27SJun Yang 	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
124133441726SGagandeep Singh {
124233441726SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
1243f1d30e27SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
124433441726SGagandeep Singh 	struct rte_dma_stats *stats = &fsl_queue->stats;
124533441726SGagandeep Singh 
124633441726SGagandeep Singh 	if (size < sizeof(rte_stats))
124733441726SGagandeep Singh 		return -EINVAL;
124833441726SGagandeep Singh 	if (rte_stats == NULL)
124933441726SGagandeep Singh 		return -EINVAL;
125033441726SGagandeep Singh 
125133441726SGagandeep Singh 	*rte_stats = *stats;
125233441726SGagandeep Singh 
125333441726SGagandeep Singh 	return 0;
125433441726SGagandeep Singh }
125533441726SGagandeep Singh 
125633441726SGagandeep Singh static int
125733441726SGagandeep Singh dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
125833441726SGagandeep Singh {
125933441726SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
1260f1d30e27SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
126133441726SGagandeep Singh 
1262f1d30e27SJun Yang 	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
126333441726SGagandeep Singh 
126433441726SGagandeep Singh 	return 0;
126533441726SGagandeep Singh }
126633441726SGagandeep Singh 
12671686d809SJun Yang static uint16_t
12681686d809SJun Yang dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
12691686d809SJun Yang {
12701686d809SJun Yang 	const struct fsl_qdma_engine *fsl_qdma = dev_private;
12711686d809SJun Yang 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
12721686d809SJun Yang 
12731686d809SJun Yang 	return fsl_queue->pending_max - fsl_queue->pending_num;
12741686d809SJun Yang }
12751686d809SJun Yang 
1276453d8273SGagandeep Singh static struct rte_dma_dev_ops dpaa_qdma_ops = {
1277f1d30e27SJun Yang 	.dev_info_get		  = dpaa_qdma_info_get,
1278453d8273SGagandeep Singh 	.dev_configure            = dpaa_qdma_configure,
1279453d8273SGagandeep Singh 	.dev_start                = dpaa_qdma_start,
1280453d8273SGagandeep Singh 	.dev_close                = dpaa_qdma_close,
1281453d8273SGagandeep Singh 	.vchan_setup		  = dpaa_qdma_queue_setup,
128233441726SGagandeep Singh 	.stats_get		  = dpaa_qdma_stats_get,
128333441726SGagandeep Singh 	.stats_reset		  = dpaa_qdma_stats_reset,
1284453d8273SGagandeep Singh };
1285453d8273SGagandeep Singh 
1286453d8273SGagandeep Singh static int
1287*a63c6426SJun Yang check_devargs_handler(__rte_unused const char *key, const char *value,
1288*a63c6426SJun Yang 		      __rte_unused void *opaque)
1289*a63c6426SJun Yang {
1290*a63c6426SJun Yang 	if (strcmp(value, "1"))
1291*a63c6426SJun Yang 		return -1;
1292*a63c6426SJun Yang 
1293*a63c6426SJun Yang 	return 0;
1294*a63c6426SJun Yang }
1295*a63c6426SJun Yang 
1296*a63c6426SJun Yang static int
1297*a63c6426SJun Yang dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
1298*a63c6426SJun Yang {
1299*a63c6426SJun Yang 	struct rte_kvargs *kvlist;
1300*a63c6426SJun Yang 
1301*a63c6426SJun Yang 	if (!devargs)
1302*a63c6426SJun Yang 		return 0;
1303*a63c6426SJun Yang 
1304*a63c6426SJun Yang 	kvlist = rte_kvargs_parse(devargs->args, NULL);
1305*a63c6426SJun Yang 	if (!kvlist)
1306*a63c6426SJun Yang 		return 0;
1307*a63c6426SJun Yang 
1308*a63c6426SJun Yang 	if (!rte_kvargs_count(kvlist, key)) {
1309*a63c6426SJun Yang 		rte_kvargs_free(kvlist);
1310*a63c6426SJun Yang 		return 0;
1311*a63c6426SJun Yang 	}
1312*a63c6426SJun Yang 
1313*a63c6426SJun Yang 	if (rte_kvargs_process(kvlist, key,
1314*a63c6426SJun Yang 			       check_devargs_handler, NULL) < 0) {
1315*a63c6426SJun Yang 		rte_kvargs_free(kvlist);
1316*a63c6426SJun Yang 		return 0;
1317*a63c6426SJun Yang 	}
1318*a63c6426SJun Yang 	rte_kvargs_free(kvlist);
1319*a63c6426SJun Yang 
1320*a63c6426SJun Yang 	return 1;
1321*a63c6426SJun Yang }
1322*a63c6426SJun Yang 
1323*a63c6426SJun Yang static int
1324cc166b51SGagandeep Singh dpaa_qdma_init(struct rte_dma_dev *dmadev)
1325cc166b51SGagandeep Singh {
1326cc166b51SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
1327cc166b51SGagandeep Singh 	uint64_t phys_addr;
1328cc166b51SGagandeep Singh 	int ccsr_qdma_fd;
1329cc166b51SGagandeep Singh 	int regs_size;
1330cc166b51SGagandeep Singh 	int ret;
1331f1d30e27SJun Yang 	uint32_t i, j, k;
1332cc166b51SGagandeep Singh 
1333*a63c6426SJun Yang 	if (dpaa_get_devargs(dmadev->device->devargs, DPAA_DMA_ERROR_CHECK)) {
1334*a63c6426SJun Yang 		s_hw_err_check = true;
1335*a63c6426SJun Yang 		DPAA_QDMA_INFO("Enable DMA error checks");
1336*a63c6426SJun Yang 	}
1337*a63c6426SJun Yang 
1338f1d30e27SJun Yang 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
1339cc166b51SGagandeep Singh 	fsl_qdma->num_blocks = QDMA_BLOCKS;
1340cc166b51SGagandeep Singh 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
1341cc166b51SGagandeep Singh 
1342cc166b51SGagandeep Singh 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
1343cc166b51SGagandeep Singh 	if (unlikely(ccsr_qdma_fd < 0)) {
1344cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
1345f1d30e27SJun Yang 		return ccsr_qdma_fd;
1346cc166b51SGagandeep Singh 	}
1347cc166b51SGagandeep Singh 
1348f1d30e27SJun Yang 	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
1349f1d30e27SJun Yang 	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
1350cc166b51SGagandeep Singh 	phys_addr = QDMA_CCSR_BASE;
1351f1d30e27SJun Yang 	fsl_qdma->reg_base = mmap(NULL, regs_size,
1352f1d30e27SJun Yang 		PROT_READ | PROT_WRITE, MAP_SHARED,
1353cc166b51SGagandeep Singh 		ccsr_qdma_fd, phys_addr);
1354cc166b51SGagandeep Singh 
1355cc166b51SGagandeep Singh 	close(ccsr_qdma_fd);
1356f1d30e27SJun Yang 	if (fsl_qdma->reg_base == MAP_FAILED) {
1357f1d30e27SJun Yang 		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIx64"), size(%d)",
1358f1d30e27SJun Yang 			phys_addr, regs_size);
1359f1d30e27SJun Yang 		return -ENOMEM;
1360cc166b51SGagandeep Singh 	}
1361cc166b51SGagandeep Singh 
1362f1d30e27SJun Yang 	fsl_qdma->ctrl_base =
1363f1d30e27SJun Yang 		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
1364f1d30e27SJun Yang 	fsl_qdma->status_base =
1365f1d30e27SJun Yang 		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
1366f1d30e27SJun Yang 	fsl_qdma->block_base =
1367f1d30e27SJun Yang 		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
1368cc166b51SGagandeep Singh 
1369f1d30e27SJun Yang 	for (i = 0; i < QDMA_BLOCKS; i++) {
1370f1d30e27SJun Yang 		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
1371f1d30e27SJun Yang 		if (ret)
1372f1d30e27SJun Yang 			goto mem_free;
1373cc166b51SGagandeep Singh 	}
1374cc166b51SGagandeep Singh 
1375f1d30e27SJun Yang 	k = 0;
1376f1d30e27SJun Yang 	for (i = 0; i < QDMA_QUEUES; i++) {
1377f1d30e27SJun Yang 		for (j = 0; j < QDMA_BLOCKS; j++) {
1378f1d30e27SJun Yang 			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
1379f1d30e27SJun Yang 			if (ret)
1380f1d30e27SJun Yang 				goto mem_free;
1381f1d30e27SJun Yang 			fsl_qdma->cmd_queues[j][i].channel_id = k;
1382f1d30e27SJun Yang 			k++;
1383f1d30e27SJun Yang 		}
1384cc166b51SGagandeep Singh 	}
1385cc166b51SGagandeep Singh 
1386cc166b51SGagandeep Singh 	ret = fsl_qdma_reg_init(fsl_qdma);
1387cc166b51SGagandeep Singh 	if (ret) {
1388f665790aSDavid Marchand 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.");
1389f1d30e27SJun Yang 		goto mem_free;
1390cc166b51SGagandeep Singh 	}
1391cc166b51SGagandeep Singh 
1392cc166b51SGagandeep Singh 	return 0;
1393cc166b51SGagandeep Singh 
1394f1d30e27SJun Yang mem_free:
1395f1d30e27SJun Yang 	for (i = 0; i < fsl_qdma->num_blocks; i++)
1396f1d30e27SJun Yang 		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
1397cc166b51SGagandeep Singh 
1398f1d30e27SJun Yang 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
1399f1d30e27SJun Yang 		for (j = 0; j < QDMA_QUEUES; j++)
1400f1d30e27SJun Yang 			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
1401f1d30e27SJun Yang 	}
1402f1d30e27SJun Yang 
1403f1d30e27SJun Yang 	munmap(fsl_qdma->ctrl_base, regs_size);
1404f1d30e27SJun Yang 
1405f1d30e27SJun Yang 	return ret;
1406cc166b51SGagandeep Singh }
1407cc166b51SGagandeep Singh 
1408cc166b51SGagandeep Singh static int
1409cc166b51SGagandeep Singh dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
1410cc166b51SGagandeep Singh 		struct rte_dpaa_device *dpaa_dev)
1411cc166b51SGagandeep Singh {
1412cc166b51SGagandeep Singh 	struct rte_dma_dev *dmadev;
1413cc166b51SGagandeep Singh 	int ret;
1414cc166b51SGagandeep Singh 
1415cc166b51SGagandeep Singh 	dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
1416cc166b51SGagandeep Singh 				      rte_socket_id(),
1417cc166b51SGagandeep Singh 				      sizeof(struct fsl_qdma_engine));
1418cc166b51SGagandeep Singh 	if (!dmadev) {
1419cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("Unable to allocate dmadevice");
1420cc166b51SGagandeep Singh 		return -EINVAL;
1421cc166b51SGagandeep Singh 	}
1422cc166b51SGagandeep Singh 
1423cc166b51SGagandeep Singh 	dpaa_dev->dmadev = dmadev;
1424453d8273SGagandeep Singh 	dmadev->dev_ops = &dpaa_qdma_ops;
1425453d8273SGagandeep Singh 	dmadev->device = &dpaa_dev->device;
1426453d8273SGagandeep Singh 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
14277da29a64SGagandeep Singh 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
1428a77261f6SJun Yang 	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
14297da29a64SGagandeep Singh 	dmadev->fp_obj->submit = dpaa_qdma_submit;
14307da29a64SGagandeep Singh 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
14317da29a64SGagandeep Singh 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
14321686d809SJun Yang 	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
1433cc166b51SGagandeep Singh 
1434cc166b51SGagandeep Singh 	/* Invoke PMD device initialization function */
1435cc166b51SGagandeep Singh 	ret = dpaa_qdma_init(dmadev);
1436cc166b51SGagandeep Singh 	if (ret) {
1437cc166b51SGagandeep Singh 		(void)rte_dma_pmd_release(dpaa_dev->device.name);
1438cc166b51SGagandeep Singh 		return ret;
1439cc166b51SGagandeep Singh 	}
1440cc166b51SGagandeep Singh 
1441cc166b51SGagandeep Singh 	dmadev->state = RTE_DMA_DEV_READY;
1442cc166b51SGagandeep Singh 	return 0;
1443cc166b51SGagandeep Singh }
1444cc166b51SGagandeep Singh 
1445cc166b51SGagandeep Singh static int
1446cc166b51SGagandeep Singh dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
1447cc166b51SGagandeep Singh {
1448cc166b51SGagandeep Singh 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
1449cc166b51SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
1450f1d30e27SJun Yang 	uint32_t i, j, regs_size;
1451cc166b51SGagandeep Singh 
1452f1d30e27SJun Yang 	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
1453f1d30e27SJun Yang 	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
1454cc166b51SGagandeep Singh 
1455f1d30e27SJun Yang 	for (i = 0; i < QDMA_BLOCKS; i++)
1456f1d30e27SJun Yang 		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
1457f1d30e27SJun Yang 
1458f1d30e27SJun Yang 	for (i = 0; i < QDMA_BLOCKS; i++) {
1459f1d30e27SJun Yang 		for (j = 0; j < QDMA_QUEUES; j++)
1460f1d30e27SJun Yang 			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
1461cc166b51SGagandeep Singh 	}
1462cc166b51SGagandeep Singh 
1463f1d30e27SJun Yang 	munmap(fsl_qdma->ctrl_base, regs_size);
1464cc166b51SGagandeep Singh 
1465cc166b51SGagandeep Singh 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
1466cc166b51SGagandeep Singh 
1467583f3732SGagandeep Singh 	return 0;
1468583f3732SGagandeep Singh }
1469583f3732SGagandeep Singh 
1470583f3732SGagandeep Singh static struct rte_dpaa_driver rte_dpaa_qdma_pmd;
1471583f3732SGagandeep Singh 
1472583f3732SGagandeep Singh static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
1473583f3732SGagandeep Singh 	.drv_type = FSL_DPAA_QDMA,
1474583f3732SGagandeep Singh 	.probe = dpaa_qdma_probe,
1475583f3732SGagandeep Singh 	.remove = dpaa_qdma_remove,
1476583f3732SGagandeep Singh };
1477583f3732SGagandeep Singh 
1478583f3732SGagandeep Singh RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
1479*a63c6426SJun Yang RTE_PMD_REGISTER_PARAM_STRING(dpaa_qdma, DPAA_DMA_ERROR_CHECK "=<int>");
1480583f3732SGagandeep Singh RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
1481