xref: /netbsd-src/sys/arch/arm/sunxi/sun4i_dma.c (revision 1580a27b92f58fcdcb23fdfbc04a7c2b54a0b7c8)
1 /* $NetBSD: sun4i_dma.c,v 1.1 2017/08/27 16:05:26 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_ddb.h"
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sun4i_dma.c,v 1.1 2017/08/27 16:05:26 jmcneill Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/mutex.h>
40 #include <sys/bitops.h>
41 #include <sys/kmem.h>
42 
43 #include <dev/fdt/fdtvar.h>
44 
45 #define	DMA_MAX_TYPES		2
46 #define	 DMA_TYPE_NORMAL	0
47 #define	 DMA_TYPE_DEDICATED	1
48 #define	DMA_MAX_CHANNELS	8
49 #define	DMA_MAX_DRQS		32
50 
51 #define	DRQ_TYPE_SDRAM		0x16
52 
53 #define	DMA_IRQ_EN_REG		0x00
54 #define	DMA_IRQ_PEND_STAS_REG	0x04
55 #define	 DMA_IRQ_PEND_STAS_END_MASK	0xaaaaaaaa
56 #define	NDMA_CTRL_REG(n)	(0x100 + (n) * 0x20)
57 #define	 NDMA_CTRL_LOAD			__BIT(31)
58 #define	 NDMA_CTRL_CONTI_EN		__BIT(30)
59 #define	 NDMA_CTRL_WAIT_STATE		__BITS(29,27)
60 #define	 NDMA_CTRL_DST_DATA_WIDTH	__BITS(26,25)
61 #define	 NDMA_CTRL_DST_BST_LEN		__BITS(24,23)
62 #define	 NDMA_CTRL_DST_ADDR_TYPE	__BIT(21)
63 #define	 NDMA_CTRL_DST_DRQ_TYPE		__BITS(20,16)
64 #define	 NDMA_CTRL_BC_MODE_SEL		__BIT(15)
65 #define	 NDMA_CTRL_SRC_DATA_WIDTH	__BITS(10,9)
66 #define	 NDMA_CTRL_SRC_BST_LEN		__BITS(8,7)
67 #define	 NDMA_CTRL_SRC_ADDR_TYPE	__BIT(5)
68 #define	 NDMA_CTRL_SRC_DRQ_TYPE		__BITS(4,0)
69 #define	NDMA_SRC_ADDR_REG(n)	(0x100 + (n) * 0x20 + 0x4)
70 #define	NDMA_DEST_ADDR_REG(n)	(0x100 + (n) * 0x20 + 0x8)
71 #define	NDMA_BC_REG(n)		(0x100 + (n) * 0x20 + 0xc)
72 #define	DDMA_CTRL_REG(n)	(0x300 + (n) * 0x20)
73 #define	 DDMA_CTRL_LOAD			__BIT(31)
74 #define	 DDMA_CTRL_BSY_STA		__BIT(30)
75 #define	 DDMA_CTRL_CONTI_EN		__BIT(29)
76 #define	 DDMA_CTRL_DST_DATA_WIDTH	__BITS(26,25)
77 #define	 DDMA_CTRL_DST_BST_LEN		__BITS(24,23)
78 #define	 DDMA_CTRL_DST_ADDR_MODE	__BITS(22,21)
79 #define	 DDMA_CTRL_DST_DRQ_TYPE		__BITS(20,16)
80 #define	 DDMA_CTRL_BC_MODE_SEL		__BIT(15)
81 #define	 DDMA_CTRL_SRC_DATA_WIDTH	__BITS(10,9)
82 #define	 DDMA_CTRL_SRC_BST_LEN		__BITS(8,7)
83 #define	 DDMA_CTRL_SRC_ADDR_MODE	__BITS(6,5)
84 #define	 DDMA_CTRL_SRC_DRQ_TYPE		__BITS(4,0)
85 #define	DDMA_SRC_ADDR_REG(n)	(0x300 + (n) * 0x20 + 0x4)
86 #define	DDMA_DEST_ADDR_REG(n)	(0x300 + (n) * 0x20 + 0x8)
87 #define	DDMA_BC_REG(n)		(0x300 + (n) * 0x20 + 0xc)
88 #define	DDMA_PARA_REG(n)	(0x300 + (n) * 0x20 + 0x18)
89 #define	 DDMA_PARA_DST_DATA_BLK_SIZE	__BITS(31,24)
90 #define	 DDMA_PARA_DST_WAIT_CLK_CYC	__BITS(23,16)
91 #define	 DDMA_PARA_SRC_DATA_BLK_SIZE	__BITS(15,8)
92 #define	 DDMA_PARA_SRC_WAIT_CLK_CYC	__BITS(7,0)
93 
94 static const struct of_compat_data compat_data[] = {
95 	{ "allwinner,sun4i-a10-dma",		1 },
96 	{ NULL }
97 };
98 
99 struct sun4idma_channel {
100 	uint8_t			ch_type;
101 	uint8_t			ch_index;
102 	uint32_t		ch_irqmask;
103 	void			(*ch_callback)(void *);
104 	void			*ch_callbackarg;
105 	u_int			ch_drq;
106 };
107 
108 struct sun4idma_softc {
109 	device_t		sc_dev;
110 	bus_space_tag_t		sc_bst;
111 	bus_space_handle_t	sc_bsh;
112 	bus_dma_tag_t		sc_dmat;
113 	int			sc_phandle;
114 	void			*sc_ih;
115 
116 	kmutex_t		sc_lock;
117 
118 	struct sun4idma_channel	sc_chan[DMA_MAX_TYPES][DMA_MAX_CHANNELS];
119 };
120 
121 #define DMA_READ(sc, reg)		\
122 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
123 #define DMA_WRITE(sc, reg, val)		\
124 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
125 
126 static void *
127 sun4idma_acquire(device_t dev, const void *data, size_t len,
128     void (*cb)(void *), void *cbarg)
129 {
130 	struct sun4idma_softc *sc = device_private(dev);
131 	struct sun4idma_channel *ch = NULL;
132 	const uint32_t *specifier = data;
133 	uint32_t irqen;
134 	uint8_t index;
135 
136 	if (len != 8)
137 		return NULL;
138 
139 	const u_int type = be32toh(specifier[0]);
140 	const u_int drq = be32toh(specifier[1]);
141 
142 	if (type >= DMA_MAX_TYPES || drq >= DMA_MAX_DRQS)
143 		return NULL;
144 
145 	mutex_enter(&sc->sc_lock);
146 
147 	for (index = 0; index < DMA_MAX_CHANNELS; index++) {
148 		if (sc->sc_chan[type][index].ch_callback == NULL) {
149 			ch = &sc->sc_chan[type][index];
150 			ch->ch_callback = cb;
151 			ch->ch_callbackarg = cbarg;
152 			ch->ch_drq = drq;
153 
154 			irqen = DMA_READ(sc, DMA_IRQ_EN_REG);
155 			irqen |= ch->ch_irqmask;
156 			DMA_WRITE(sc, DMA_IRQ_EN_REG, irqen);
157 
158 			break;
159 		}
160 	}
161 
162 	mutex_exit(&sc->sc_lock);
163 
164 	return ch;
165 }
166 
167 static void
168 sun4idma_release(device_t dev, void *priv)
169 {
170 	struct sun4idma_softc *sc = device_private(dev);
171 	struct sun4idma_channel *ch = priv;
172 	uint32_t irqen;
173 
174 	mutex_enter(&sc->sc_lock);
175 
176 	irqen = DMA_READ(sc, DMA_IRQ_EN_REG);
177 	irqen &= ~ch->ch_irqmask;
178 	DMA_WRITE(sc, DMA_IRQ_EN_REG, irqen);
179 
180 	ch->ch_callback = NULL;
181 	ch->ch_callbackarg = NULL;
182 
183 	mutex_exit(&sc->sc_lock);
184 }
185 
186 static int
187 sun4idma_transfer_ndma(struct sun4idma_softc *sc, struct sun4idma_channel *ch,
188    struct fdtbus_dma_req *req)
189 {
190 	uint32_t cfg, mem_cfg, dev_cfg, src, dst;
191 	uint32_t mem_width, dev_width, mem_burst, dev_burst;
192 
193 	mem_width = req->dreq_mem_opt.opt_bus_width >> 4;
194 	dev_width = req->dreq_dev_opt.opt_bus_width >> 4;
195 	mem_burst = req->dreq_mem_opt.opt_burst_len == 1 ? 0 :
196 		    (req->dreq_mem_opt.opt_burst_len >> 3) + 1;
197 	dev_burst = req->dreq_dev_opt.opt_burst_len == 1 ? 0 :
198 		    (req->dreq_dev_opt.opt_burst_len >> 3) + 1;
199 
200 	mem_cfg = __SHIFTIN(mem_width, NDMA_CTRL_SRC_DATA_WIDTH) |
201 	    __SHIFTIN(mem_burst, NDMA_CTRL_SRC_BST_LEN) |
202 	    __SHIFTIN(DRQ_TYPE_SDRAM, NDMA_CTRL_SRC_DRQ_TYPE);
203 	dev_cfg = __SHIFTIN(dev_width, NDMA_CTRL_SRC_DATA_WIDTH) |
204 	    __SHIFTIN(dev_burst, NDMA_CTRL_SRC_BST_LEN) |
205 	    __SHIFTIN(ch->ch_drq, NDMA_CTRL_SRC_DRQ_TYPE) |
206 	    NDMA_CTRL_SRC_ADDR_TYPE;
207 
208 	if (req->dreq_dir == FDT_DMA_READ) {
209 		src = req->dreq_dev_phys;
210 		dst = req->dreq_segs[0].ds_addr;
211 		cfg = mem_cfg << 16 | dev_cfg;
212 	} else {
213 		src = req->dreq_segs[0].ds_addr;
214 		dst = req->dreq_dev_phys;
215 		cfg = dev_cfg << 16 | mem_cfg;
216 	}
217 
218 	DMA_WRITE(sc, NDMA_SRC_ADDR_REG(ch->ch_index), src);
219 	DMA_WRITE(sc, NDMA_DEST_ADDR_REG(ch->ch_index), dst);
220 	DMA_WRITE(sc, NDMA_BC_REG(ch->ch_index), req->dreq_segs[0].ds_len);
221 	DMA_WRITE(sc, NDMA_CTRL_REG(ch->ch_index), cfg | NDMA_CTRL_LOAD);
222 
223 	return 0;
224 }
225 
226 static int
227 sun4idma_transfer_ddma(struct sun4idma_softc *sc, struct sun4idma_channel *ch,
228    struct fdtbus_dma_req *req)
229 {
230 	uint32_t cfg, mem_cfg, dev_cfg, src, dst;
231 	uint32_t mem_width, dev_width, mem_burst, dev_burst;
232 
233 	mem_width = req->dreq_mem_opt.opt_bus_width >> 4;
234 	dev_width = req->dreq_dev_opt.opt_bus_width >> 4;
235 	mem_burst = req->dreq_mem_opt.opt_burst_len == 1 ? 0 :
236 		    (req->dreq_mem_opt.opt_burst_len >> 3) + 1;
237 	dev_burst = req->dreq_dev_opt.opt_burst_len == 1 ? 0 :
238 		    (req->dreq_dev_opt.opt_burst_len >> 3) + 1;
239 
240 	mem_cfg = __SHIFTIN(mem_width, DDMA_CTRL_SRC_DATA_WIDTH) |
241 	    __SHIFTIN(mem_burst, DDMA_CTRL_SRC_BST_LEN) |
242 	    __SHIFTIN(DRQ_TYPE_SDRAM, DDMA_CTRL_SRC_DRQ_TYPE) |
243 	    __SHIFTIN(0, DDMA_CTRL_SRC_ADDR_MODE);
244 	dev_cfg = __SHIFTIN(dev_width, DDMA_CTRL_SRC_DATA_WIDTH) |
245 	    __SHIFTIN(dev_burst, DDMA_CTRL_SRC_BST_LEN) |
246 	    __SHIFTIN(ch->ch_drq, DDMA_CTRL_SRC_DRQ_TYPE) |
247 	    __SHIFTIN(1, DDMA_CTRL_SRC_ADDR_MODE);
248 
249 	if (req->dreq_dir == FDT_DMA_READ) {
250 		src = req->dreq_dev_phys;
251 		dst = req->dreq_segs[0].ds_addr;
252 		cfg = mem_cfg << 16 | dev_cfg;
253 	} else {
254 		src = req->dreq_segs[0].ds_addr;
255 		dst = req->dreq_dev_phys;
256 		cfg = dev_cfg << 16 | mem_cfg;
257 	}
258 
259 	DMA_WRITE(sc, DDMA_SRC_ADDR_REG(ch->ch_index), src);
260 	DMA_WRITE(sc, DDMA_DEST_ADDR_REG(ch->ch_index), dst);
261 	DMA_WRITE(sc, DDMA_BC_REG(ch->ch_index), req->dreq_segs[0].ds_len);
262 	DMA_WRITE(sc, DDMA_PARA_REG(ch->ch_index), 0);
263 	DMA_WRITE(sc, DDMA_CTRL_REG(ch->ch_index), cfg | DDMA_CTRL_LOAD);
264 
265 	return 0;
266 }
267 
268 static int
269 sun4idma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req)
270 {
271 	struct sun4idma_softc *sc = device_private(dev);
272 	struct sun4idma_channel *ch = priv;
273 
274 	if (req->dreq_nsegs != 1)
275 		return EINVAL;
276 
277 	if (ch->ch_type == DMA_TYPE_NORMAL)
278 		return sun4idma_transfer_ndma(sc, ch, req);
279 	else
280 		return sun4idma_transfer_ddma(sc, ch, req);
281 }
282 
283 static void
284 sun4idma_halt(device_t dev, void *priv)
285 {
286 	struct sun4idma_softc *sc = device_private(dev);
287 	struct sun4idma_channel *ch = priv;
288 
289 	if (ch->ch_type == DMA_TYPE_NORMAL)
290 		DMA_WRITE(sc, NDMA_CTRL_REG(ch->ch_index), 0);
291 	else
292 		DMA_WRITE(sc, DDMA_CTRL_REG(ch->ch_index), 0);
293 }
294 
295 static const struct fdtbus_dma_controller_func sun4idma_funcs = {
296 	.acquire = sun4idma_acquire,
297 	.release = sun4idma_release,
298 	.transfer = sun4idma_transfer,
299 	.halt = sun4idma_halt
300 };
301 
302 static int
303 sun4idma_intr(void *priv)
304 {
305 	struct sun4idma_softc *sc = priv;
306 	uint32_t pend, mask, bit;
307 	uint8_t type, index;
308 
309 	pend = DMA_READ(sc, DMA_IRQ_PEND_STAS_REG);
310 	if (pend == 0)
311 		return 0;
312 
313 	DMA_WRITE(sc, DMA_IRQ_PEND_STAS_REG, pend);
314 
315 	pend &= DMA_IRQ_PEND_STAS_END_MASK;
316 
317 	while ((bit = ffs32(pend)) != 0) {
318 		mask = __BIT(bit - 1);
319 		pend &= ~mask;
320 		type = ((bit - 1) / 2) / 8;
321 		index = ((bit - 1) / 2) % 8;
322 
323 		if (sc->sc_chan[type][index].ch_callback == NULL)
324 			continue;
325 		sc->sc_chan[type][index].ch_callback(
326 		    sc->sc_chan[type][index].ch_callbackarg);
327 	}
328 
329 	return 1;
330 }
331 
332 static int
333 sun4idma_match(device_t parent, cfdata_t cf, void *aux)
334 {
335 	struct fdt_attach_args * const faa = aux;
336 
337 	return of_match_compat_data(faa->faa_phandle, compat_data);
338 }
339 
340 static void
341 sun4idma_attach(device_t parent, device_t self, void *aux)
342 {
343 	struct sun4idma_softc * const sc = device_private(self);
344 	struct fdt_attach_args * const faa = aux;
345 	const int phandle = faa->faa_phandle;
346 	struct clk *clk;
347 	char intrstr[128];
348 	bus_addr_t addr;
349 	bus_size_t size;
350 	u_int index, type;
351 
352 	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
353 		aprint_error(": couldn't get registers\n");
354 		return;
355 	}
356 
357 	if ((clk = fdtbus_clock_get_index(phandle, 0)) == NULL ||
358 	    clk_enable(clk) != 0) {
359 		aprint_error(": couldn't enable clock\n");
360 		return;
361 	}
362 
363 	sc->sc_dev = self;
364 	sc->sc_phandle = phandle;
365 	sc->sc_dmat = faa->faa_dmat;
366 	sc->sc_bst = faa->faa_bst;
367 	if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
368 		aprint_error(": couldn't map registers\n");
369 		return;
370 	}
371 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SCHED);
372 
373 	if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
374 		aprint_error(": failed to decode interrupt\n");
375 		return;
376 	}
377 
378 	aprint_naive("\n");
379 	aprint_normal(": DMA controller\n");
380 
381 	DMA_WRITE(sc, DMA_IRQ_EN_REG, 0);
382 	DMA_WRITE(sc, DMA_IRQ_PEND_STAS_REG, ~0);
383 
384 	for (type = 0; type < DMA_MAX_TYPES; type++) {
385 		for (index = 0; index < DMA_MAX_CHANNELS; index++) {
386 			struct sun4idma_channel *ch = &sc->sc_chan[type][index];
387 			ch->ch_type = type;
388 			ch->ch_index = index;
389 			ch->ch_irqmask = __BIT((type * 16) + (index * 2) + 1);
390 			ch->ch_callback = NULL;
391 			ch->ch_callbackarg = NULL;
392 
393 			if (type == DMA_TYPE_NORMAL)
394 				DMA_WRITE(sc, NDMA_CTRL_REG(index), 0);
395 			else
396 				DMA_WRITE(sc, DDMA_CTRL_REG(index), 0);
397 		}
398 	}
399 
400 	sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_SCHED,
401 	    FDT_INTR_MPSAFE, sun4idma_intr, sc);
402 	if (sc->sc_ih == NULL) {
403 		aprint_error_dev(sc->sc_dev,
404 		    "couldn't establish interrupt on %s\n", intrstr);
405 		return;
406 	}
407 	aprint_normal_dev(sc->sc_dev, "interrupting on %s\n", intrstr);
408 
409 	fdtbus_register_dma_controller(self, phandle, &sun4idma_funcs);
410 }
411 
412 CFATTACH_DECL_NEW(sun4i_dma, sizeof(struct sun4idma_softc),
413         sun4idma_match, sun4idma_attach, NULL, NULL);
414