xref: /dpdk/drivers/dma/dpaa/dpaa_qdma.c (revision cc166b51c352039e07d2b93692ee362a4e29b0c4)
1583f3732SGagandeep Singh /* SPDX-License-Identifier: BSD-3-Clause
2583f3732SGagandeep Singh  * Copyright 2021 NXP
3583f3732SGagandeep Singh  */
4583f3732SGagandeep Singh 
5583f3732SGagandeep Singh #include <rte_dpaa_bus.h>
6*cc166b51SGagandeep Singh #include <rte_dmadev_pmd.h>
7*cc166b51SGagandeep Singh 
8*cc166b51SGagandeep Singh #include "dpaa_qdma.h"
9*cc166b51SGagandeep Singh #include "dpaa_qdma_logs.h"
10*cc166b51SGagandeep Singh 
11*cc166b51SGagandeep Singh static inline int
12*cc166b51SGagandeep Singh ilog2(int x)
13*cc166b51SGagandeep Singh {
14*cc166b51SGagandeep Singh 	int log = 0;
15*cc166b51SGagandeep Singh 
16*cc166b51SGagandeep Singh 	x >>= 1;
17*cc166b51SGagandeep Singh 
18*cc166b51SGagandeep Singh 	while (x) {
19*cc166b51SGagandeep Singh 		log++;
20*cc166b51SGagandeep Singh 		x >>= 1;
21*cc166b51SGagandeep Singh 	}
22*cc166b51SGagandeep Singh 	return log;
23*cc166b51SGagandeep Singh }
24*cc166b51SGagandeep Singh 
25*cc166b51SGagandeep Singh static u32
26*cc166b51SGagandeep Singh qdma_readl(void *addr)
27*cc166b51SGagandeep Singh {
28*cc166b51SGagandeep Singh 	return QDMA_IN(addr);
29*cc166b51SGagandeep Singh }
30*cc166b51SGagandeep Singh 
31*cc166b51SGagandeep Singh static void
32*cc166b51SGagandeep Singh qdma_writel(u32 val, void *addr)
33*cc166b51SGagandeep Singh {
34*cc166b51SGagandeep Singh 	QDMA_OUT(addr, val);
35*cc166b51SGagandeep Singh }
36*cc166b51SGagandeep Singh 
37*cc166b51SGagandeep Singh static void
38*cc166b51SGagandeep Singh *dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
39*cc166b51SGagandeep Singh {
40*cc166b51SGagandeep Singh 	void *virt_addr;
41*cc166b51SGagandeep Singh 
42*cc166b51SGagandeep Singh 	virt_addr = rte_malloc("dma pool alloc", size, aligned);
43*cc166b51SGagandeep Singh 	if (!virt_addr)
44*cc166b51SGagandeep Singh 		return NULL;
45*cc166b51SGagandeep Singh 
46*cc166b51SGagandeep Singh 	*phy_addr = rte_mem_virt2iova(virt_addr);
47*cc166b51SGagandeep Singh 
48*cc166b51SGagandeep Singh 	return virt_addr;
49*cc166b51SGagandeep Singh }
50*cc166b51SGagandeep Singh 
51*cc166b51SGagandeep Singh static void
52*cc166b51SGagandeep Singh dma_pool_free(void *addr)
53*cc166b51SGagandeep Singh {
54*cc166b51SGagandeep Singh 	rte_free(addr);
55*cc166b51SGagandeep Singh }
56*cc166b51SGagandeep Singh 
57*cc166b51SGagandeep Singh static void
58*cc166b51SGagandeep Singh fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
59*cc166b51SGagandeep Singh {
60*cc166b51SGagandeep Singh 	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
61*cc166b51SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
62*cc166b51SGagandeep Singh 	struct fsl_qdma_comp *comp_temp, *_comp_temp;
63*cc166b51SGagandeep Singh 	int id;
64*cc166b51SGagandeep Singh 
65*cc166b51SGagandeep Singh 	if (--fsl_queue->count)
66*cc166b51SGagandeep Singh 		goto finally;
67*cc166b51SGagandeep Singh 
68*cc166b51SGagandeep Singh 	id = (fsl_qdma->block_base - fsl_queue->block_base) /
69*cc166b51SGagandeep Singh 	      fsl_qdma->block_offset;
70*cc166b51SGagandeep Singh 
71*cc166b51SGagandeep Singh 	while (rte_atomic32_read(&wait_task[id]) == 1)
72*cc166b51SGagandeep Singh 		rte_delay_us(QDMA_DELAY);
73*cc166b51SGagandeep Singh 
74*cc166b51SGagandeep Singh 	list_for_each_entry_safe(comp_temp, _comp_temp,
75*cc166b51SGagandeep Singh 				 &fsl_queue->comp_used,	list) {
76*cc166b51SGagandeep Singh 		list_del(&comp_temp->list);
77*cc166b51SGagandeep Singh 		dma_pool_free(comp_temp->virt_addr);
78*cc166b51SGagandeep Singh 		dma_pool_free(comp_temp->desc_virt_addr);
79*cc166b51SGagandeep Singh 		rte_free(comp_temp);
80*cc166b51SGagandeep Singh 	}
81*cc166b51SGagandeep Singh 
82*cc166b51SGagandeep Singh 	list_for_each_entry_safe(comp_temp, _comp_temp,
83*cc166b51SGagandeep Singh 				 &fsl_queue->comp_free, list) {
84*cc166b51SGagandeep Singh 		list_del(&comp_temp->list);
85*cc166b51SGagandeep Singh 		dma_pool_free(comp_temp->virt_addr);
86*cc166b51SGagandeep Singh 		dma_pool_free(comp_temp->desc_virt_addr);
87*cc166b51SGagandeep Singh 		rte_free(comp_temp);
88*cc166b51SGagandeep Singh 	}
89*cc166b51SGagandeep Singh 
90*cc166b51SGagandeep Singh finally:
91*cc166b51SGagandeep Singh 	fsl_qdma->desc_allocated--;
92*cc166b51SGagandeep Singh }
93*cc166b51SGagandeep Singh 
94*cc166b51SGagandeep Singh static struct fsl_qdma_queue
95*cc166b51SGagandeep Singh *fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
96*cc166b51SGagandeep Singh {
97*cc166b51SGagandeep Singh 	struct fsl_qdma_queue *queue_head, *queue_temp;
98*cc166b51SGagandeep Singh 	int len, i, j;
99*cc166b51SGagandeep Singh 	int queue_num;
100*cc166b51SGagandeep Singh 	int blocks;
101*cc166b51SGagandeep Singh 	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
102*cc166b51SGagandeep Singh 
103*cc166b51SGagandeep Singh 	queue_num = fsl_qdma->n_queues;
104*cc166b51SGagandeep Singh 	blocks = fsl_qdma->num_blocks;
105*cc166b51SGagandeep Singh 
106*cc166b51SGagandeep Singh 	len = sizeof(*queue_head) * queue_num * blocks;
107*cc166b51SGagandeep Singh 	queue_head = rte_zmalloc("qdma: queue head", len, 0);
108*cc166b51SGagandeep Singh 	if (!queue_head)
109*cc166b51SGagandeep Singh 		return NULL;
110*cc166b51SGagandeep Singh 
111*cc166b51SGagandeep Singh 	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
112*cc166b51SGagandeep Singh 		queue_size[i] = QDMA_QUEUE_SIZE;
113*cc166b51SGagandeep Singh 
114*cc166b51SGagandeep Singh 	for (j = 0; j < blocks; j++) {
115*cc166b51SGagandeep Singh 		for (i = 0; i < queue_num; i++) {
116*cc166b51SGagandeep Singh 			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
117*cc166b51SGagandeep Singh 			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
118*cc166b51SGagandeep Singh 				DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
119*cc166b51SGagandeep Singh 				goto fail;
120*cc166b51SGagandeep Singh 			}
121*cc166b51SGagandeep Singh 			queue_temp = queue_head + i + (j * queue_num);
122*cc166b51SGagandeep Singh 
123*cc166b51SGagandeep Singh 			queue_temp->cq =
124*cc166b51SGagandeep Singh 			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
125*cc166b51SGagandeep Singh 				       queue_size[i],
126*cc166b51SGagandeep Singh 				       sizeof(struct fsl_qdma_format) *
127*cc166b51SGagandeep Singh 				       queue_size[i], &queue_temp->bus_addr);
128*cc166b51SGagandeep Singh 
129*cc166b51SGagandeep Singh 			if (!queue_temp->cq)
130*cc166b51SGagandeep Singh 				goto fail;
131*cc166b51SGagandeep Singh 
132*cc166b51SGagandeep Singh 			memset(queue_temp->cq, 0x0, queue_size[i] *
133*cc166b51SGagandeep Singh 			       sizeof(struct fsl_qdma_format));
134*cc166b51SGagandeep Singh 
135*cc166b51SGagandeep Singh 			queue_temp->block_base = fsl_qdma->block_base +
136*cc166b51SGagandeep Singh 				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
137*cc166b51SGagandeep Singh 			queue_temp->n_cq = queue_size[i];
138*cc166b51SGagandeep Singh 			queue_temp->id = i;
139*cc166b51SGagandeep Singh 			queue_temp->count = 0;
140*cc166b51SGagandeep Singh 			queue_temp->pending = 0;
141*cc166b51SGagandeep Singh 			queue_temp->virt_head = queue_temp->cq;
142*cc166b51SGagandeep Singh 
143*cc166b51SGagandeep Singh 		}
144*cc166b51SGagandeep Singh 	}
145*cc166b51SGagandeep Singh 	return queue_head;
146*cc166b51SGagandeep Singh 
147*cc166b51SGagandeep Singh fail:
148*cc166b51SGagandeep Singh 	for (j = 0; j < blocks; j++) {
149*cc166b51SGagandeep Singh 		for (i = 0; i < queue_num; i++) {
150*cc166b51SGagandeep Singh 			queue_temp = queue_head + i + (j * queue_num);
151*cc166b51SGagandeep Singh 			dma_pool_free(queue_temp->cq);
152*cc166b51SGagandeep Singh 		}
153*cc166b51SGagandeep Singh 	}
154*cc166b51SGagandeep Singh 	rte_free(queue_head);
155*cc166b51SGagandeep Singh 
156*cc166b51SGagandeep Singh 	return NULL;
157*cc166b51SGagandeep Singh }
158*cc166b51SGagandeep Singh 
159*cc166b51SGagandeep Singh static struct
160*cc166b51SGagandeep Singh fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
161*cc166b51SGagandeep Singh {
162*cc166b51SGagandeep Singh 	struct fsl_qdma_queue *status_head;
163*cc166b51SGagandeep Singh 	unsigned int status_size;
164*cc166b51SGagandeep Singh 
165*cc166b51SGagandeep Singh 	status_size = QDMA_STATUS_SIZE;
166*cc166b51SGagandeep Singh 	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
167*cc166b51SGagandeep Singh 	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
168*cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("Get wrong status_size.\n");
169*cc166b51SGagandeep Singh 		return NULL;
170*cc166b51SGagandeep Singh 	}
171*cc166b51SGagandeep Singh 
172*cc166b51SGagandeep Singh 	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
173*cc166b51SGagandeep Singh 	if (!status_head)
174*cc166b51SGagandeep Singh 		return NULL;
175*cc166b51SGagandeep Singh 
176*cc166b51SGagandeep Singh 	/*
177*cc166b51SGagandeep Singh 	 * Buffer for queue command
178*cc166b51SGagandeep Singh 	 */
179*cc166b51SGagandeep Singh 	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
180*cc166b51SGagandeep Singh 					 status_size,
181*cc166b51SGagandeep Singh 					 sizeof(struct fsl_qdma_format) *
182*cc166b51SGagandeep Singh 					 status_size,
183*cc166b51SGagandeep Singh 					 &status_head->bus_addr);
184*cc166b51SGagandeep Singh 
185*cc166b51SGagandeep Singh 	if (!status_head->cq) {
186*cc166b51SGagandeep Singh 		rte_free(status_head);
187*cc166b51SGagandeep Singh 		return NULL;
188*cc166b51SGagandeep Singh 	}
189*cc166b51SGagandeep Singh 
190*cc166b51SGagandeep Singh 	memset(status_head->cq, 0x0, status_size *
191*cc166b51SGagandeep Singh 	       sizeof(struct fsl_qdma_format));
192*cc166b51SGagandeep Singh 	status_head->n_cq = status_size;
193*cc166b51SGagandeep Singh 	status_head->virt_head = status_head->cq;
194*cc166b51SGagandeep Singh 
195*cc166b51SGagandeep Singh 	return status_head;
196*cc166b51SGagandeep Singh }
197583f3732SGagandeep Singh 
198583f3732SGagandeep Singh static int
199*cc166b51SGagandeep Singh fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
200583f3732SGagandeep Singh {
201*cc166b51SGagandeep Singh 	void *ctrl = fsl_qdma->ctrl_base;
202*cc166b51SGagandeep Singh 	void *block;
203*cc166b51SGagandeep Singh 	int i, count = RETRIES;
204*cc166b51SGagandeep Singh 	unsigned int j;
205*cc166b51SGagandeep Singh 	u32 reg;
206*cc166b51SGagandeep Singh 
207*cc166b51SGagandeep Singh 	/* Disable the command queue and wait for idle state. */
208*cc166b51SGagandeep Singh 	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
209*cc166b51SGagandeep Singh 	reg |= FSL_QDMA_DMR_DQD;
210*cc166b51SGagandeep Singh 	qdma_writel(reg, ctrl + FSL_QDMA_DMR);
211*cc166b51SGagandeep Singh 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
212*cc166b51SGagandeep Singh 		block = fsl_qdma->block_base +
213*cc166b51SGagandeep Singh 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
214*cc166b51SGagandeep Singh 		for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
215*cc166b51SGagandeep Singh 			qdma_writel(0, block + FSL_QDMA_BCQMR(i));
216*cc166b51SGagandeep Singh 	}
217*cc166b51SGagandeep Singh 	while (true) {
218*cc166b51SGagandeep Singh 		reg = qdma_readl(ctrl + FSL_QDMA_DSR);
219*cc166b51SGagandeep Singh 		if (!(reg & FSL_QDMA_DSR_DB))
220*cc166b51SGagandeep Singh 			break;
221*cc166b51SGagandeep Singh 		if (count-- < 0)
222*cc166b51SGagandeep Singh 			return -EBUSY;
223*cc166b51SGagandeep Singh 		rte_delay_us(100);
224*cc166b51SGagandeep Singh 	}
225*cc166b51SGagandeep Singh 
226*cc166b51SGagandeep Singh 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
227*cc166b51SGagandeep Singh 		block = fsl_qdma->block_base +
228*cc166b51SGagandeep Singh 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
229*cc166b51SGagandeep Singh 
230*cc166b51SGagandeep Singh 		/* Disable status queue. */
231*cc166b51SGagandeep Singh 		qdma_writel(0, block + FSL_QDMA_BSQMR);
232*cc166b51SGagandeep Singh 
233*cc166b51SGagandeep Singh 		/*
234*cc166b51SGagandeep Singh 		 * clear the command queue interrupt detect register for
235*cc166b51SGagandeep Singh 		 * all queues.
236*cc166b51SGagandeep Singh 		 */
237*cc166b51SGagandeep Singh 		qdma_writel(0xffffffff, block + FSL_QDMA_BCQIDR(0));
238*cc166b51SGagandeep Singh 	}
239*cc166b51SGagandeep Singh 
240583f3732SGagandeep Singh 	return 0;
241583f3732SGagandeep Singh }
242583f3732SGagandeep Singh 
243583f3732SGagandeep Singh static int
244*cc166b51SGagandeep Singh fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
245583f3732SGagandeep Singh {
246*cc166b51SGagandeep Singh 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
247*cc166b51SGagandeep Singh 	struct fsl_qdma_queue *temp;
248*cc166b51SGagandeep Singh 	void *ctrl = fsl_qdma->ctrl_base;
249*cc166b51SGagandeep Singh 	void *block;
250*cc166b51SGagandeep Singh 	u32 i, j;
251*cc166b51SGagandeep Singh 	u32 reg;
252*cc166b51SGagandeep Singh 	int ret, val;
253*cc166b51SGagandeep Singh 
254*cc166b51SGagandeep Singh 	/* Try to halt the qDMA engine first. */
255*cc166b51SGagandeep Singh 	ret = fsl_qdma_halt(fsl_qdma);
256*cc166b51SGagandeep Singh 	if (ret) {
257*cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("DMA halt failed!");
258*cc166b51SGagandeep Singh 		return ret;
259*cc166b51SGagandeep Singh 	}
260*cc166b51SGagandeep Singh 
261*cc166b51SGagandeep Singh 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
262*cc166b51SGagandeep Singh 		block = fsl_qdma->block_base +
263*cc166b51SGagandeep Singh 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
264*cc166b51SGagandeep Singh 		for (i = 0; i < fsl_qdma->n_queues; i++) {
265*cc166b51SGagandeep Singh 			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
266*cc166b51SGagandeep Singh 			/*
267*cc166b51SGagandeep Singh 			 * Initialize Command Queue registers to
268*cc166b51SGagandeep Singh 			 * point to the first
269*cc166b51SGagandeep Singh 			 * command descriptor in memory.
270*cc166b51SGagandeep Singh 			 * Dequeue Pointer Address Registers
271*cc166b51SGagandeep Singh 			 * Enqueue Pointer Address Registers
272*cc166b51SGagandeep Singh 			 */
273*cc166b51SGagandeep Singh 
274*cc166b51SGagandeep Singh 			qdma_writel(lower_32_bits(temp->bus_addr),
275*cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQDPA_SADDR(i));
276*cc166b51SGagandeep Singh 			qdma_writel(upper_32_bits(temp->bus_addr),
277*cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQEDPA_SADDR(i));
278*cc166b51SGagandeep Singh 			qdma_writel(lower_32_bits(temp->bus_addr),
279*cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQEPA_SADDR(i));
280*cc166b51SGagandeep Singh 			qdma_writel(upper_32_bits(temp->bus_addr),
281*cc166b51SGagandeep Singh 				    block + FSL_QDMA_BCQEEPA_SADDR(i));
282*cc166b51SGagandeep Singh 
283*cc166b51SGagandeep Singh 			/* Initialize the queue mode. */
284*cc166b51SGagandeep Singh 			reg = FSL_QDMA_BCQMR_EN;
285*cc166b51SGagandeep Singh 			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
286*cc166b51SGagandeep Singh 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
287*cc166b51SGagandeep Singh 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
288*cc166b51SGagandeep Singh 		}
289*cc166b51SGagandeep Singh 
290*cc166b51SGagandeep Singh 		/*
291*cc166b51SGagandeep Singh 		 * Workaround for erratum: ERR010812.
292*cc166b51SGagandeep Singh 		 * We must enable XOFF to avoid the enqueue rejection occurs.
293*cc166b51SGagandeep Singh 		 * Setting SQCCMR ENTER_WM to 0x20.
294*cc166b51SGagandeep Singh 		 */
295*cc166b51SGagandeep Singh 
296*cc166b51SGagandeep Singh 		qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
297*cc166b51SGagandeep Singh 			    block + FSL_QDMA_SQCCMR);
298*cc166b51SGagandeep Singh 
299*cc166b51SGagandeep Singh 		/*
300*cc166b51SGagandeep Singh 		 * Initialize status queue registers to point to the first
301*cc166b51SGagandeep Singh 		 * command descriptor in memory.
302*cc166b51SGagandeep Singh 		 * Dequeue Pointer Address Registers
303*cc166b51SGagandeep Singh 		 * Enqueue Pointer Address Registers
304*cc166b51SGagandeep Singh 		 */
305*cc166b51SGagandeep Singh 
306*cc166b51SGagandeep Singh 		qdma_writel(
307*cc166b51SGagandeep Singh 			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
308*cc166b51SGagandeep Singh 			    block + FSL_QDMA_SQEEPAR);
309*cc166b51SGagandeep Singh 		qdma_writel(
310*cc166b51SGagandeep Singh 			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
311*cc166b51SGagandeep Singh 			    block + FSL_QDMA_SQEPAR);
312*cc166b51SGagandeep Singh 		qdma_writel(
313*cc166b51SGagandeep Singh 			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
314*cc166b51SGagandeep Singh 			    block + FSL_QDMA_SQEDPAR);
315*cc166b51SGagandeep Singh 		qdma_writel(
316*cc166b51SGagandeep Singh 			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
317*cc166b51SGagandeep Singh 			    block + FSL_QDMA_SQDPAR);
318*cc166b51SGagandeep Singh 		/* Desiable status queue interrupt. */
319*cc166b51SGagandeep Singh 
320*cc166b51SGagandeep Singh 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
321*cc166b51SGagandeep Singh 		qdma_writel(0x0, block + FSL_QDMA_BSQICR);
322*cc166b51SGagandeep Singh 		qdma_writel(0x0, block + FSL_QDMA_CQIER);
323*cc166b51SGagandeep Singh 
324*cc166b51SGagandeep Singh 		/* Initialize the status queue mode. */
325*cc166b51SGagandeep Singh 		reg = FSL_QDMA_BSQMR_EN;
326*cc166b51SGagandeep Singh 		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
327*cc166b51SGagandeep Singh 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
328*cc166b51SGagandeep Singh 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
329*cc166b51SGagandeep Singh 	}
330*cc166b51SGagandeep Singh 
331*cc166b51SGagandeep Singh 	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
332*cc166b51SGagandeep Singh 	reg &= ~FSL_QDMA_DMR_DQD;
333*cc166b51SGagandeep Singh 	qdma_writel(reg, ctrl + FSL_QDMA_DMR);
334*cc166b51SGagandeep Singh 
335*cc166b51SGagandeep Singh 	return 0;
336*cc166b51SGagandeep Singh }
337*cc166b51SGagandeep Singh 
338*cc166b51SGagandeep Singh static void
339*cc166b51SGagandeep Singh dma_release(void *fsl_chan)
340*cc166b51SGagandeep Singh {
341*cc166b51SGagandeep Singh 	((struct fsl_qdma_chan *)fsl_chan)->free = true;
342*cc166b51SGagandeep Singh 	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
343*cc166b51SGagandeep Singh }
344*cc166b51SGagandeep Singh 
345*cc166b51SGagandeep Singh static int
346*cc166b51SGagandeep Singh dpaa_qdma_init(struct rte_dma_dev *dmadev)
347*cc166b51SGagandeep Singh {
348*cc166b51SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
349*cc166b51SGagandeep Singh 	struct fsl_qdma_chan *fsl_chan;
350*cc166b51SGagandeep Singh 	uint64_t phys_addr;
351*cc166b51SGagandeep Singh 	unsigned int len;
352*cc166b51SGagandeep Singh 	int ccsr_qdma_fd;
353*cc166b51SGagandeep Singh 	int regs_size;
354*cc166b51SGagandeep Singh 	int ret;
355*cc166b51SGagandeep Singh 	u32 i;
356*cc166b51SGagandeep Singh 
357*cc166b51SGagandeep Singh 	fsl_qdma->desc_allocated = 0;
358*cc166b51SGagandeep Singh 	fsl_qdma->n_chans = VIRT_CHANNELS;
359*cc166b51SGagandeep Singh 	fsl_qdma->n_queues = QDMA_QUEUES;
360*cc166b51SGagandeep Singh 	fsl_qdma->num_blocks = QDMA_BLOCKS;
361*cc166b51SGagandeep Singh 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
362*cc166b51SGagandeep Singh 
363*cc166b51SGagandeep Singh 	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
364*cc166b51SGagandeep Singh 	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
365*cc166b51SGagandeep Singh 	if (!fsl_qdma->chans)
366*cc166b51SGagandeep Singh 		return -1;
367*cc166b51SGagandeep Singh 
368*cc166b51SGagandeep Singh 	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
369*cc166b51SGagandeep Singh 	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
370*cc166b51SGagandeep Singh 	if (!fsl_qdma->status) {
371*cc166b51SGagandeep Singh 		rte_free(fsl_qdma->chans);
372*cc166b51SGagandeep Singh 		return -1;
373*cc166b51SGagandeep Singh 	}
374*cc166b51SGagandeep Singh 
375*cc166b51SGagandeep Singh 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
376*cc166b51SGagandeep Singh 		rte_atomic32_init(&wait_task[i]);
377*cc166b51SGagandeep Singh 		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
378*cc166b51SGagandeep Singh 		if (!fsl_qdma->status[i])
379*cc166b51SGagandeep Singh 			goto err;
380*cc166b51SGagandeep Singh 	}
381*cc166b51SGagandeep Singh 
382*cc166b51SGagandeep Singh 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
383*cc166b51SGagandeep Singh 	if (unlikely(ccsr_qdma_fd < 0)) {
384*cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
385*cc166b51SGagandeep Singh 		goto err;
386*cc166b51SGagandeep Singh 	}
387*cc166b51SGagandeep Singh 
388*cc166b51SGagandeep Singh 	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
389*cc166b51SGagandeep Singh 	phys_addr = QDMA_CCSR_BASE;
390*cc166b51SGagandeep Singh 	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
391*cc166b51SGagandeep Singh 					 PROT_WRITE, MAP_SHARED,
392*cc166b51SGagandeep Singh 					 ccsr_qdma_fd, phys_addr);
393*cc166b51SGagandeep Singh 
394*cc166b51SGagandeep Singh 	close(ccsr_qdma_fd);
395*cc166b51SGagandeep Singh 	if (fsl_qdma->ctrl_base == MAP_FAILED) {
396*cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
397*cc166b51SGagandeep Singh 		       "size %d\n", phys_addr, regs_size);
398*cc166b51SGagandeep Singh 		goto err;
399*cc166b51SGagandeep Singh 	}
400*cc166b51SGagandeep Singh 
401*cc166b51SGagandeep Singh 	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
402*cc166b51SGagandeep Singh 	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
403*cc166b51SGagandeep Singh 
404*cc166b51SGagandeep Singh 	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
405*cc166b51SGagandeep Singh 	if (!fsl_qdma->queue) {
406*cc166b51SGagandeep Singh 		munmap(fsl_qdma->ctrl_base, regs_size);
407*cc166b51SGagandeep Singh 		goto err;
408*cc166b51SGagandeep Singh 	}
409*cc166b51SGagandeep Singh 
410*cc166b51SGagandeep Singh 	for (i = 0; i < fsl_qdma->n_chans; i++) {
411*cc166b51SGagandeep Singh 		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
412*cc166b51SGagandeep Singh 
413*cc166b51SGagandeep Singh 		fsl_chan->qdma = fsl_qdma;
414*cc166b51SGagandeep Singh 		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
415*cc166b51SGagandeep Singh 							fsl_qdma->num_blocks);
416*cc166b51SGagandeep Singh 		fsl_chan->free = true;
417*cc166b51SGagandeep Singh 	}
418*cc166b51SGagandeep Singh 
419*cc166b51SGagandeep Singh 	ret = fsl_qdma_reg_init(fsl_qdma);
420*cc166b51SGagandeep Singh 	if (ret) {
421*cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
422*cc166b51SGagandeep Singh 		munmap(fsl_qdma->ctrl_base, regs_size);
423*cc166b51SGagandeep Singh 		goto err;
424*cc166b51SGagandeep Singh 	}
425*cc166b51SGagandeep Singh 
426*cc166b51SGagandeep Singh 	return 0;
427*cc166b51SGagandeep Singh 
428*cc166b51SGagandeep Singh err:
429*cc166b51SGagandeep Singh 	rte_free(fsl_qdma->chans);
430*cc166b51SGagandeep Singh 	rte_free(fsl_qdma->status);
431*cc166b51SGagandeep Singh 
432*cc166b51SGagandeep Singh 	return -1;
433*cc166b51SGagandeep Singh }
434*cc166b51SGagandeep Singh 
435*cc166b51SGagandeep Singh static int
436*cc166b51SGagandeep Singh dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
437*cc166b51SGagandeep Singh 		struct rte_dpaa_device *dpaa_dev)
438*cc166b51SGagandeep Singh {
439*cc166b51SGagandeep Singh 	struct rte_dma_dev *dmadev;
440*cc166b51SGagandeep Singh 	int ret;
441*cc166b51SGagandeep Singh 
442*cc166b51SGagandeep Singh 	dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
443*cc166b51SGagandeep Singh 				      rte_socket_id(),
444*cc166b51SGagandeep Singh 				      sizeof(struct fsl_qdma_engine));
445*cc166b51SGagandeep Singh 	if (!dmadev) {
446*cc166b51SGagandeep Singh 		DPAA_QDMA_ERR("Unable to allocate dmadevice");
447*cc166b51SGagandeep Singh 		return -EINVAL;
448*cc166b51SGagandeep Singh 	}
449*cc166b51SGagandeep Singh 
450*cc166b51SGagandeep Singh 	dpaa_dev->dmadev = dmadev;
451*cc166b51SGagandeep Singh 
452*cc166b51SGagandeep Singh 	/* Invoke PMD device initialization function */
453*cc166b51SGagandeep Singh 	ret = dpaa_qdma_init(dmadev);
454*cc166b51SGagandeep Singh 	if (ret) {
455*cc166b51SGagandeep Singh 		(void)rte_dma_pmd_release(dpaa_dev->device.name);
456*cc166b51SGagandeep Singh 		return ret;
457*cc166b51SGagandeep Singh 	}
458*cc166b51SGagandeep Singh 
459*cc166b51SGagandeep Singh 	dmadev->state = RTE_DMA_DEV_READY;
460*cc166b51SGagandeep Singh 	return 0;
461*cc166b51SGagandeep Singh }
462*cc166b51SGagandeep Singh 
463*cc166b51SGagandeep Singh static int
464*cc166b51SGagandeep Singh dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
465*cc166b51SGagandeep Singh {
466*cc166b51SGagandeep Singh 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
467*cc166b51SGagandeep Singh 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
468*cc166b51SGagandeep Singh 	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
469*cc166b51SGagandeep Singh 
470*cc166b51SGagandeep Singh 	for (i = 0; i < max; i++) {
471*cc166b51SGagandeep Singh 		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
472*cc166b51SGagandeep Singh 
473*cc166b51SGagandeep Singh 		if (fsl_chan->free == false)
474*cc166b51SGagandeep Singh 			dma_release(fsl_chan);
475*cc166b51SGagandeep Singh 	}
476*cc166b51SGagandeep Singh 
477*cc166b51SGagandeep Singh 	rte_free(fsl_qdma->status);
478*cc166b51SGagandeep Singh 	rte_free(fsl_qdma->chans);
479*cc166b51SGagandeep Singh 
480*cc166b51SGagandeep Singh 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
481*cc166b51SGagandeep Singh 
482583f3732SGagandeep Singh 	return 0;
483583f3732SGagandeep Singh }
484583f3732SGagandeep Singh 
485583f3732SGagandeep Singh static struct rte_dpaa_driver rte_dpaa_qdma_pmd;
486583f3732SGagandeep Singh 
487583f3732SGagandeep Singh static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
488583f3732SGagandeep Singh 	.drv_type = FSL_DPAA_QDMA,
489583f3732SGagandeep Singh 	.probe = dpaa_qdma_probe,
490583f3732SGagandeep Singh 	.remove = dpaa_qdma_remove,
491583f3732SGagandeep Singh };
492583f3732SGagandeep Singh 
493583f3732SGagandeep Singh RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
494583f3732SGagandeep Singh RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
495