xref: /netbsd-src/sys/arch/arm/sunxi/sun4i_dma.c (revision 6e54367a22fbc89a1139d033e95bec0c0cf0975b)
1 /* $NetBSD: sun4i_dma.c,v 1.8 2021/01/27 03:10:20 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_ddb.h"
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sun4i_dma.c,v 1.8 2021/01/27 03:10:20 thorpej Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/mutex.h>
40 #include <sys/bitops.h>
41 #include <sys/kmem.h>
42 
43 #include <dev/fdt/fdtvar.h>
44 
45 #define	DMA_MAX_TYPES		2
46 #define	 DMA_TYPE_NORMAL	0
47 #define	 DMA_TYPE_DEDICATED	1
48 #define	DMA_MAX_CHANNELS	8
49 #define	DMA_MAX_DRQS		32
50 
51 #define	DRQ_TYPE_SDRAM		0x16
52 
53 #define	DMA_IRQ_EN_REG		0x00
54 #define	DMA_IRQ_PEND_STAS_REG	0x04
55 #define	 DMA_IRQ_PEND_STAS_END_MASK	0xaaaaaaaa
56 #define	NDMA_CTRL_REG(n)	(0x100 + (n) * 0x20)
57 #define	 NDMA_CTRL_LOAD			__BIT(31)
58 #define	 NDMA_CTRL_CONTI_EN		__BIT(30)
59 #define	 NDMA_CTRL_WAIT_STATE		__BITS(29,27)
60 #define	 NDMA_CTRL_DST_DATA_WIDTH	__BITS(26,25)
61 #define	 NDMA_CTRL_DST_BST_LEN		__BITS(24,23)
62 #define	 NDMA_CTRL_DST_ADDR_TYPE	__BIT(21)
63 #define	 NDMA_CTRL_DST_DRQ_TYPE		__BITS(20,16)
64 #define	 NDMA_CTRL_BC_MODE_SEL		__BIT(15)
65 #define	 NDMA_CTRL_SRC_DATA_WIDTH	__BITS(10,9)
66 #define	 NDMA_CTRL_SRC_BST_LEN		__BITS(8,7)
67 #define	 NDMA_CTRL_SRC_ADDR_TYPE	__BIT(5)
68 #define	 NDMA_CTRL_SRC_DRQ_TYPE		__BITS(4,0)
69 #define	NDMA_SRC_ADDR_REG(n)	(0x100 + (n) * 0x20 + 0x4)
70 #define	NDMA_DEST_ADDR_REG(n)	(0x100 + (n) * 0x20 + 0x8)
71 #define	NDMA_BC_REG(n)		(0x100 + (n) * 0x20 + 0xc)
72 #define	DDMA_CTRL_REG(n)	(0x300 + (n) * 0x20)
73 #define	 DDMA_CTRL_LOAD			__BIT(31)
74 #define	 DDMA_CTRL_BSY_STA		__BIT(30)
75 #define	 DDMA_CTRL_CONTI_EN		__BIT(29)
76 #define	 DDMA_CTRL_DST_DATA_WIDTH	__BITS(26,25)
77 #define	 DDMA_CTRL_DST_BST_LEN		__BITS(24,23)
78 #define	 DDMA_CTRL_DST_ADDR_MODE	__BITS(22,21)
79 #define	 DDMA_CTRL_DST_DRQ_TYPE		__BITS(20,16)
80 #define	 DDMA_CTRL_BC_MODE_SEL		__BIT(15)
81 #define	 DDMA_CTRL_SRC_DATA_WIDTH	__BITS(10,9)
82 #define	 DDMA_CTRL_SRC_BST_LEN		__BITS(8,7)
83 #define	 DDMA_CTRL_SRC_ADDR_MODE	__BITS(6,5)
84 #define	 DDMA_CTRL_SRC_DRQ_TYPE		__BITS(4,0)
85 #define	DDMA_SRC_ADDR_REG(n)	(0x300 + (n) * 0x20 + 0x4)
86 #define	DDMA_DEST_ADDR_REG(n)	(0x300 + (n) * 0x20 + 0x8)
87 #define	DDMA_BC_REG(n)		(0x300 + (n) * 0x20 + 0xc)
88 #define	DDMA_PARA_REG(n)	(0x300 + (n) * 0x20 + 0x18)
89 #define	 DDMA_PARA_DST_DATA_BLK_SIZE	__BITS(31,24)
90 #define	 DDMA_PARA_DST_WAIT_CLK_CYC	__BITS(23,16)
91 #define	 DDMA_PARA_SRC_DATA_BLK_SIZE	__BITS(15,8)
92 #define	 DDMA_PARA_SRC_WAIT_CLK_CYC	__BITS(7,0)
93 #define	 DDMA_PARA_VALUE				\
94 	  (__SHIFTIN(1, DDMA_PARA_DST_DATA_BLK_SIZE) |	\
95 	   __SHIFTIN(1, DDMA_PARA_SRC_DATA_BLK_SIZE) |	\
96 	   __SHIFTIN(2, DDMA_PARA_DST_WAIT_CLK_CYC) |	\
97 	   __SHIFTIN(2, DDMA_PARA_SRC_WAIT_CLK_CYC))
98 
99 static const struct device_compatible_entry compat_data[] = {
100 	{ .compat = "allwinner,sun4i-a10-dma" },
101 	DEVICE_COMPAT_EOL
102 };
103 
104 struct sun4idma_channel {
105 	uint8_t			ch_type;
106 	uint8_t			ch_index;
107 	uint32_t		ch_irqmask;
108 	void			(*ch_callback)(void *);
109 	void			*ch_callbackarg;
110 	u_int			ch_drq;
111 };
112 
113 struct sun4idma_softc {
114 	device_t		sc_dev;
115 	bus_space_tag_t		sc_bst;
116 	bus_space_handle_t	sc_bsh;
117 	bus_dma_tag_t		sc_dmat;
118 	int			sc_phandle;
119 	void			*sc_ih;
120 
121 	kmutex_t		sc_lock;
122 
123 	struct sun4idma_channel	sc_chan[DMA_MAX_TYPES][DMA_MAX_CHANNELS];
124 };
125 
126 #define DMA_READ(sc, reg)		\
127 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
128 #define DMA_WRITE(sc, reg, val)		\
129 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
130 
131 static void *
sun4idma_acquire(device_t dev,const void * data,size_t len,void (* cb)(void *),void * cbarg)132 sun4idma_acquire(device_t dev, const void *data, size_t len,
133     void (*cb)(void *), void *cbarg)
134 {
135 	struct sun4idma_softc *sc = device_private(dev);
136 	struct sun4idma_channel *ch = NULL;
137 	const uint32_t *specifier = data;
138 	uint32_t irqen;
139 	uint8_t index;
140 
141 	if (len != 8)
142 		return NULL;
143 
144 	const u_int type = be32toh(specifier[0]);
145 	const u_int drq = be32toh(specifier[1]);
146 
147 	if (type >= DMA_MAX_TYPES || drq >= DMA_MAX_DRQS)
148 		return NULL;
149 
150 	mutex_enter(&sc->sc_lock);
151 
152 	for (index = 0; index < DMA_MAX_CHANNELS; index++) {
153 		if (sc->sc_chan[type][index].ch_callback == NULL) {
154 			ch = &sc->sc_chan[type][index];
155 			ch->ch_callback = cb;
156 			ch->ch_callbackarg = cbarg;
157 			ch->ch_drq = drq;
158 
159 			irqen = DMA_READ(sc, DMA_IRQ_EN_REG);
160 			irqen |= ch->ch_irqmask;
161 			DMA_WRITE(sc, DMA_IRQ_EN_REG, irqen);
162 
163 			break;
164 		}
165 	}
166 
167 	mutex_exit(&sc->sc_lock);
168 
169 	return ch;
170 }
171 
172 static void
sun4idma_release(device_t dev,void * priv)173 sun4idma_release(device_t dev, void *priv)
174 {
175 	struct sun4idma_softc *sc = device_private(dev);
176 	struct sun4idma_channel *ch = priv;
177 	uint32_t irqen;
178 
179 	mutex_enter(&sc->sc_lock);
180 
181 	irqen = DMA_READ(sc, DMA_IRQ_EN_REG);
182 	irqen &= ~ch->ch_irqmask;
183 	DMA_WRITE(sc, DMA_IRQ_EN_REG, irqen);
184 
185 	ch->ch_callback = NULL;
186 	ch->ch_callbackarg = NULL;
187 
188 	mutex_exit(&sc->sc_lock);
189 }
190 
191 static int
sun4idma_transfer_ndma(struct sun4idma_softc * sc,struct sun4idma_channel * ch,struct fdtbus_dma_req * req)192 sun4idma_transfer_ndma(struct sun4idma_softc *sc, struct sun4idma_channel *ch,
193    struct fdtbus_dma_req *req)
194 {
195 	uint32_t cfg, mem_cfg, dev_cfg, src, dst;
196 	uint32_t mem_width, dev_width, mem_burst, dev_burst;
197 
198 	mem_width = req->dreq_mem_opt.opt_bus_width >> 4;
199 	dev_width = req->dreq_dev_opt.opt_bus_width >> 4;
200 	mem_burst = req->dreq_mem_opt.opt_burst_len == 1 ? 0 :
201 		    (req->dreq_mem_opt.opt_burst_len >> 3) + 1;
202 	dev_burst = req->dreq_dev_opt.opt_burst_len == 1 ? 0 :
203 		    (req->dreq_dev_opt.opt_burst_len >> 3) + 1;
204 
205 	mem_cfg = __SHIFTIN(mem_width, NDMA_CTRL_SRC_DATA_WIDTH) |
206 	    __SHIFTIN(mem_burst, NDMA_CTRL_SRC_BST_LEN) |
207 	    __SHIFTIN(DRQ_TYPE_SDRAM, NDMA_CTRL_SRC_DRQ_TYPE);
208 	dev_cfg = __SHIFTIN(dev_width, NDMA_CTRL_SRC_DATA_WIDTH) |
209 	    __SHIFTIN(dev_burst, NDMA_CTRL_SRC_BST_LEN) |
210 	    __SHIFTIN(ch->ch_drq, NDMA_CTRL_SRC_DRQ_TYPE) |
211 	    NDMA_CTRL_SRC_ADDR_TYPE;
212 
213 	if (req->dreq_dir == FDT_DMA_READ) {
214 		src = req->dreq_dev_phys;
215 		dst = req->dreq_segs[0].ds_addr;
216 		cfg = mem_cfg << 16 | dev_cfg;
217 	} else {
218 		src = req->dreq_segs[0].ds_addr;
219 		dst = req->dreq_dev_phys;
220 		cfg = dev_cfg << 16 | mem_cfg;
221 	}
222 
223 	DMA_WRITE(sc, NDMA_SRC_ADDR_REG(ch->ch_index), src);
224 	DMA_WRITE(sc, NDMA_DEST_ADDR_REG(ch->ch_index), dst);
225 	DMA_WRITE(sc, NDMA_BC_REG(ch->ch_index), req->dreq_segs[0].ds_len);
226 	DMA_WRITE(sc, NDMA_CTRL_REG(ch->ch_index), cfg | NDMA_CTRL_LOAD);
227 
228 	return 0;
229 }
230 
231 static int
sun4idma_transfer_ddma(struct sun4idma_softc * sc,struct sun4idma_channel * ch,struct fdtbus_dma_req * req)232 sun4idma_transfer_ddma(struct sun4idma_softc *sc, struct sun4idma_channel *ch,
233    struct fdtbus_dma_req *req)
234 {
235 	uint32_t cfg, mem_cfg, dev_cfg, src, dst;
236 	uint32_t mem_width, dev_width, mem_burst, dev_burst;
237 
238 	mem_width = req->dreq_mem_opt.opt_bus_width >> 4;
239 	dev_width = req->dreq_dev_opt.opt_bus_width >> 4;
240 	mem_burst = req->dreq_mem_opt.opt_burst_len == 1 ? 0 :
241 		    (req->dreq_mem_opt.opt_burst_len >> 3) + 1;
242 	dev_burst = req->dreq_dev_opt.opt_burst_len == 1 ? 0 :
243 		    (req->dreq_dev_opt.opt_burst_len >> 3) + 1;
244 
245 	mem_cfg = __SHIFTIN(mem_width, DDMA_CTRL_SRC_DATA_WIDTH) |
246 	    __SHIFTIN(mem_burst, DDMA_CTRL_SRC_BST_LEN) |
247 	    __SHIFTIN(DRQ_TYPE_SDRAM, DDMA_CTRL_SRC_DRQ_TYPE) |
248 	    __SHIFTIN(0, DDMA_CTRL_SRC_ADDR_MODE);
249 	dev_cfg = __SHIFTIN(dev_width, DDMA_CTRL_SRC_DATA_WIDTH) |
250 	    __SHIFTIN(dev_burst, DDMA_CTRL_SRC_BST_LEN) |
251 	    __SHIFTIN(ch->ch_drq, DDMA_CTRL_SRC_DRQ_TYPE) |
252 	    __SHIFTIN(1, DDMA_CTRL_SRC_ADDR_MODE);
253 
254 	if (req->dreq_dir == FDT_DMA_READ) {
255 		src = req->dreq_dev_phys;
256 		dst = req->dreq_segs[0].ds_addr;
257 		cfg = mem_cfg << 16 | dev_cfg;
258 	} else {
259 		src = req->dreq_segs[0].ds_addr;
260 		dst = req->dreq_dev_phys;
261 		cfg = dev_cfg << 16 | mem_cfg;
262 	}
263 
264 	DMA_WRITE(sc, DDMA_SRC_ADDR_REG(ch->ch_index), src);
265 	DMA_WRITE(sc, DDMA_DEST_ADDR_REG(ch->ch_index), dst);
266 	DMA_WRITE(sc, DDMA_BC_REG(ch->ch_index), req->dreq_segs[0].ds_len);
267 	DMA_WRITE(sc, DDMA_PARA_REG(ch->ch_index), DDMA_PARA_VALUE);
268 	DMA_WRITE(sc, DDMA_CTRL_REG(ch->ch_index), cfg | DDMA_CTRL_LOAD);
269 
270 	return 0;
271 }
272 
273 static int
sun4idma_transfer(device_t dev,void * priv,struct fdtbus_dma_req * req)274 sun4idma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req)
275 {
276 	struct sun4idma_softc *sc = device_private(dev);
277 	struct sun4idma_channel *ch = priv;
278 
279 	if (req->dreq_nsegs != 1)
280 		return EINVAL;
281 
282 	if (ch->ch_type == DMA_TYPE_NORMAL)
283 		return sun4idma_transfer_ndma(sc, ch, req);
284 	else
285 		return sun4idma_transfer_ddma(sc, ch, req);
286 }
287 
288 static void
sun4idma_halt(device_t dev,void * priv)289 sun4idma_halt(device_t dev, void *priv)
290 {
291 	struct sun4idma_softc *sc = device_private(dev);
292 	struct sun4idma_channel *ch = priv;
293 	uint32_t val;
294 
295 	if (ch->ch_type == DMA_TYPE_NORMAL) {
296 		val = DMA_READ(sc, NDMA_CTRL_REG(ch->ch_index));
297 		val &= ~NDMA_CTRL_LOAD;
298 		DMA_WRITE(sc, NDMA_CTRL_REG(ch->ch_index), val);
299 	} else {
300 		val = DMA_READ(sc, DDMA_CTRL_REG(ch->ch_index));
301 		val &= ~DDMA_CTRL_LOAD;
302 		DMA_WRITE(sc, DDMA_CTRL_REG(ch->ch_index), val);
303 	}
304 }
305 
306 static const struct fdtbus_dma_controller_func sun4idma_funcs = {
307 	.acquire = sun4idma_acquire,
308 	.release = sun4idma_release,
309 	.transfer = sun4idma_transfer,
310 	.halt = sun4idma_halt
311 };
312 
313 static int
sun4idma_intr(void * priv)314 sun4idma_intr(void *priv)
315 {
316 	struct sun4idma_softc *sc = priv;
317 	uint32_t pend, mask, bit;
318 	uint8_t type, index;
319 
320 	pend = DMA_READ(sc, DMA_IRQ_PEND_STAS_REG);
321 	if (pend == 0)
322 		return 0;
323 
324 	DMA_WRITE(sc, DMA_IRQ_PEND_STAS_REG, pend);
325 
326 	pend &= DMA_IRQ_PEND_STAS_END_MASK;
327 
328 	while ((bit = ffs32(pend)) != 0) {
329 		mask = __BIT(bit - 1);
330 		pend &= ~mask;
331 		type = ((bit - 1) / 2) / 8;
332 		index = ((bit - 1) / 2) % 8;
333 
334 		if (sc->sc_chan[type][index].ch_callback == NULL)
335 			continue;
336 		sc->sc_chan[type][index].ch_callback(
337 		    sc->sc_chan[type][index].ch_callbackarg);
338 	}
339 
340 	return 1;
341 }
342 
343 static int
sun4idma_match(device_t parent,cfdata_t cf,void * aux)344 sun4idma_match(device_t parent, cfdata_t cf, void *aux)
345 {
346 	struct fdt_attach_args * const faa = aux;
347 
348 	return of_compatible_match(faa->faa_phandle, compat_data);
349 }
350 
351 static void
sun4idma_attach(device_t parent,device_t self,void * aux)352 sun4idma_attach(device_t parent, device_t self, void *aux)
353 {
354 	struct sun4idma_softc * const sc = device_private(self);
355 	struct fdt_attach_args * const faa = aux;
356 	const int phandle = faa->faa_phandle;
357 	struct clk *clk;
358 	char intrstr[128];
359 	bus_addr_t addr;
360 	bus_size_t size;
361 	u_int index, type;
362 
363 	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
364 		aprint_error(": couldn't get registers\n");
365 		return;
366 	}
367 
368 	if ((clk = fdtbus_clock_get_index(phandle, 0)) == NULL ||
369 	    clk_enable(clk) != 0) {
370 		aprint_error(": couldn't enable clock\n");
371 		return;
372 	}
373 
374 	sc->sc_dev = self;
375 	sc->sc_phandle = phandle;
376 	sc->sc_dmat = faa->faa_dmat;
377 	sc->sc_bst = faa->faa_bst;
378 	if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
379 		aprint_error(": couldn't map registers\n");
380 		return;
381 	}
382 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SCHED);
383 
384 	if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
385 		aprint_error(": failed to decode interrupt\n");
386 		return;
387 	}
388 
389 	aprint_naive("\n");
390 	aprint_normal(": DMA controller\n");
391 
392 	DMA_WRITE(sc, DMA_IRQ_EN_REG, 0);
393 	DMA_WRITE(sc, DMA_IRQ_PEND_STAS_REG, ~0);
394 
395 	for (type = 0; type < DMA_MAX_TYPES; type++) {
396 		for (index = 0; index < DMA_MAX_CHANNELS; index++) {
397 			struct sun4idma_channel *ch = &sc->sc_chan[type][index];
398 			ch->ch_type = type;
399 			ch->ch_index = index;
400 			ch->ch_irqmask = __BIT((type * 16) + (index * 2) + 1);
401 			ch->ch_callback = NULL;
402 			ch->ch_callbackarg = NULL;
403 
404 			if (type == DMA_TYPE_NORMAL)
405 				DMA_WRITE(sc, NDMA_CTRL_REG(index), 0);
406 			else
407 				DMA_WRITE(sc, DDMA_CTRL_REG(index), 0);
408 		}
409 	}
410 
411 	sc->sc_ih = fdtbus_intr_establish_xname(phandle, 0, IPL_SCHED,
412 	    FDT_INTR_MPSAFE, sun4idma_intr, sc, device_xname(sc->sc_dev));
413 	if (sc->sc_ih == NULL) {
414 		aprint_error_dev(sc->sc_dev,
415 		    "couldn't establish interrupt on %s\n", intrstr);
416 		return;
417 	}
418 	aprint_normal_dev(sc->sc_dev, "interrupting on %s\n", intrstr);
419 
420 	fdtbus_register_dma_controller(self, phandle, &sun4idma_funcs);
421 }
422 
423 CFATTACH_DECL_NEW(sun4i_dma, sizeof(struct sun4idma_softc),
424         sun4idma_match, sun4idma_attach, NULL, NULL);
425