xref: /netbsd-src/sys/arch/arm/sunxi/sun6i_dma.c (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 /* $NetBSD: sun6i_dma.c,v 1.15 2021/05/05 10:24:04 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_ddb.h"
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: sun6i_dma.c,v 1.15 2021/05/05 10:24:04 jmcneill Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/intr.h>
38 #include <sys/systm.h>
39 #include <sys/mutex.h>
40 #include <sys/bitops.h>
41 #include <sys/kmem.h>
42 
43 #include <dev/fdt/fdtvar.h>
44 
45 #define DMA_IRQ_EN_REG0_REG		0x0000
46 #define DMA_IRQ_EN_REG1_REG		0x0004
47 #define  DMA_IRQ_EN_REG0_QUEUE_IRQ_EN(n)	__BIT(n * 4 + 2)
48 #define  DMA_IRQ_EN_REG0_PKG_IRQ_EN(n)		__BIT(n * 4 + 1)
49 #define  DMA_IRQ_EN_REG0_HLAF_IRQ_EN(n)		__BIT(n * 4 + 0)
50 #define  DMA_IRQ_EN_REG1_QUEUE_IRQ_EN(n)	__BIT((n - 8) * 4 + 2)
51 #define  DMA_IRQ_EN_REG1_PKG_IRQ_EN(n)		__BIT((n - 8) * 4 + 1)
52 #define  DMA_IRQ_EN_REG1_HLAF_IRQ_EN(n)		__BIT((n - 8) * 4 + 0)
53 #define DMA_IRQ_PEND_REG0_REG		0x0010
54 #define DMA_IRQ_PEND_REG1_REG		0x0014
55 #define  DMA_IRQ_QUEUE_MASK			0x4444444444444444ULL
56 #define  DMA_IRQ_PKG_MASK			0x2222222222222222ULL
57 #define  DMA_IRQ_HF_MASK			0x1111111111111111ULL
58 #define DMA_STA_REG			0x0030
59 #define DMA_EN_REG(n)			(0x0100 + (n) * 0x40 + 0x00)
60 #define  DMA_EN_EN				__BIT(0)
61 #define DMA_PAU_REG(n)			(0x0100 + (n) * 0x40 + 0x04)
62 #define  DMA_PAU_PAUSE				__BIT(0)
63 #define DMA_START_ADDR_REG(n)		(0x0100 + (n) * 0x40 + 0x08)
64 #define DMA_CFG_REG(n)			(0x0100 + (n) * 0x40 + 0x0C)
65 #define  DMA_CFG_DEST_DATA_WIDTH		__BITS(26,25)
66 #define   DMA_CFG_DATA_WIDTH(n)			((n) >> 4)
67 #define	  DMA_CFG_BST_LEN(n)			((n) == 1 ? 0 : (((n) >> 3) + 1))
68 #define  DMA_CFG_DEST_ADDR_MODE			__BITS(22,21)
69 #define   DMA_CFG_ADDR_MODE_LINEAR		0
70 #define   DMA_CFG_ADDR_MODE_IO			1
71 #define  DMA_CFG_DEST_DRQ_TYPE			__BITS(20,16)
72 #define	  DMA_CFG_DRQ_TYPE_SDRAM		1
73 #define  DMA_CFG_SRC_DATA_WIDTH			__BITS(10,9)
74 #define  DMA_CFG_SRC_ADDR_MODE			__BITS(6,5)
75 #define  DMA_CFG_SRC_DRQ_TYPE			__BITS(4,0)
76 #define DMA_CUR_SRC_REG(n)		(0x0100 + (n) * 0x40 + 0x10)
77 #define DMA_CUR_DEST_REG(n)		(0x0100 + (n) * 0x40 + 0x14)
78 #define DMA_BCNT_LEFT_REG(n)		(0x0100 + (n) * 0x40 + 0x18)
79 #define DMA_PARA_REG(n)			(0x0100 + (n) * 0x40 + 0x1C)
80 #define  DMA_PARA_DATA_BLK_SIZE			__BITS(15,8)
81 #define  DMA_PARA_WAIT_CYC			__BITS(7,0)
82 #define DMA_MODE_REG(n)			(0x0100 + (n) * 0x40 + 0x28)
83 #define  MODE_WAIT				0b0
84 #define  MODE_HANDSHAKE				0b1
85 #define  DMA_MODE_DST(m)			__SHIFTIN((m), __BIT(3))
86 #define  DMA_MODE_SRC(m)			__SHIFTIN((m), __BIT(2))
87 #define DMA_FDESC_ADDR_REG(n)		(0x0100 + (n) * 0x40 + 0x2C)
88 #define DMA_PKG_NUM_REG(n)		(0x0100 + (n) * 0x40 + 0x30)
89 
90 struct sun6idma_desc {
91 	uint32_t	dma_config;
92 	uint32_t	dma_srcaddr;
93 	uint32_t	dma_dstaddr;
94 	uint32_t	dma_bcnt;
95 	uint32_t	dma_para;
96 	uint32_t	dma_next;
97 #define DMA_NULL	0xfffff800
98 };
99 
100 struct sun6idma_config {
101 	u_int		num_channels;
102 	bool		autogate;
103 	uint8_t		bursts;
104 	uint8_t		widths;
105 	bus_size_t	autogate_reg;
106 	uint32_t	autogate_mask;
107 	uint32_t	burst_mask;
108 };
109 
110 #define IL2B(x)			__BIT(ilog2(x))
111 #define IL2B_RANGE(x, y)	__BITS(ilog2(x), ilog2(y))
112 #define WIDTHS_1_2_4		IL2B_RANGE(4, 1)
113 #define WIDTHS_1_2_4_8		IL2B_RANGE(8, 1)
114 #define BURSTS_1_8		(IL2B(8)|IL2B(1))
115 #define BURSTS_1_4_8_16		(IL2B(16)|IL2B(8)|IL2B(4)|IL2B(1))
116 
117 static const struct sun6idma_config sun6i_a31_dma_config = {
118 	.num_channels = 16,
119 	.burst_mask = __BITS(8,7),
120 	.bursts = BURSTS_1_8,
121 	.widths = WIDTHS_1_2_4,
122 };
123 
124 static const struct sun6idma_config sun8i_a83t_dma_config = {
125 	.num_channels = 8,
126 	.autogate = true,
127 	.autogate_reg = 0x20,
128 	.autogate_mask = 0x4,
129 	.burst_mask = __BITS(8,7),
130 	.bursts = BURSTS_1_8,
131 	.widths = WIDTHS_1_2_4,
132 };
133 
134 static const struct sun6idma_config sun8i_h3_dma_config = {
135 	.num_channels = 12,
136 	.autogate = true,
137 	.autogate_reg = 0x28,
138 	.autogate_mask = 0x4,
139 	.burst_mask = __BITS(7,6),
140 	.bursts = BURSTS_1_4_8_16,
141 	.widths = WIDTHS_1_2_4_8,
142 };
143 
144 static const struct sun6idma_config sun8i_v3s_dma_config = {
145 	.num_channels = 8,
146 	.autogate = true,
147 	.autogate_reg = 0x20,
148 	.autogate_mask = 0x4,
149 	.burst_mask = __BITS(8,7),
150 	.bursts = BURSTS_1_8,
151 	.widths = WIDTHS_1_2_4,
152 };
153 
154 static const struct sun6idma_config sun50i_a64_dma_config = {
155 	.num_channels = 8,
156 	.autogate = true,
157 	.autogate_reg = 0x28,
158 	.autogate_mask = 0x4,
159 	.burst_mask = __BITS(7,6),
160 	.bursts = BURSTS_1_4_8_16,
161 	.widths = WIDTHS_1_2_4_8,
162 };
163 
164 static const struct device_compatible_entry compat_data[] = {
165 	{ .compat = "allwinner,sun6i-a31-dma",
166 	  .data = &sun6i_a31_dma_config },
167 	{ .compat = "allwinner,sun8i-a83t-dma",
168 	  .data = &sun8i_a83t_dma_config },
169 	{ .compat = "allwinner,sun8i-h3-dma",
170 	  .data = &sun8i_h3_dma_config },
171 	{ .compat = "allwinner,sun8i-v3s-dma",
172 	  .data = &sun8i_v3s_dma_config },
173 	{ .compat = "allwinner,sun50i-a64-dma",
174 	  .data = &sun50i_a64_dma_config },
175 
176 	DEVICE_COMPAT_EOL
177 };
178 
179 struct sun6idma_channel {
180 	uint8_t			ch_index;
181 	void			(*ch_callback)(void *);
182 	void			*ch_callbackarg;
183 	u_int			ch_portid;
184 	void			*ch_dmadesc;
185 };
186 
187 struct sun6idma_softc {
188 	device_t		sc_dev;
189 	bus_space_tag_t		sc_bst;
190 	bus_space_handle_t	sc_bsh;
191 	bus_dma_tag_t		sc_dmat;
192 	int			sc_phandle;
193 	void			*sc_ih;
194 
195 	uint32_t		sc_burst_mask;
196 
197 	kmutex_t		sc_lock;
198 
199 	struct sun6idma_channel	*sc_chan;
200 	u_int			sc_nchan;
201 	u_int			sc_ndesc_ch;
202 	uint8_t			sc_widths;
203 	uint8_t			sc_bursts;
204 
205 	bus_dma_segment_t	sc_dmasegs[1];
206 	bus_dmamap_t		sc_dmamap;
207 	void			*sc_dmadescs;
208 };
209 
210 #define DMA_READ(sc, reg)		\
211     bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
212 #define DMA_WRITE(sc, reg, val)		\
213     bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
214 
215 #define DESC_NUM			((MAXPHYS / MIN_PAGE_SIZE + 1) + 1)
216 #define DESC_LEN(n)			\
217     (sizeof(struct sun6idma_desc) * (n))
218 #define DESC_OFFS(ch, n)		\
219     ((ch) * roundup2(DESC_LEN(DESC_NUM), COHERENCY_UNIT) + DESC_LEN(n))
220 #define DESC_ADDR(sc, chp, n)		\
221     ((sc)->sc_dmamap->dm_segs[0].ds_addr + DESC_OFFS((chp)->ch_index, (n)))
222 
223 static void *
224 sun6idma_acquire(device_t dev, const void *data, size_t len,
225     void (*cb)(void *), void *cbarg)
226 {
227 	struct sun6idma_softc *sc = device_private(dev);
228 	struct sun6idma_channel *ch = NULL;
229 	uint32_t irqen;
230 	uint8_t index;
231 
232 	if (len != 4)
233 		return NULL;
234 
235 	const u_int portid = be32dec(data);
236 	if (portid > __SHIFTOUT_MASK(DMA_CFG_SRC_DRQ_TYPE))
237 		return NULL;
238 
239 	mutex_enter(&sc->sc_lock);
240 
241 	for (index = 0; index < sc->sc_nchan; index++) {
242 		if (sc->sc_chan[index].ch_callback == NULL) {
243 			ch = &sc->sc_chan[index];
244 			ch->ch_callback = cb;
245 			ch->ch_callbackarg = cbarg;
246 			ch->ch_portid = portid;
247 
248 			irqen = DMA_READ(sc, index < 8 ?
249 			    DMA_IRQ_EN_REG0_REG :
250 			    DMA_IRQ_EN_REG1_REG);
251 			irqen |= (index < 8 ?
252 			    DMA_IRQ_EN_REG0_PKG_IRQ_EN(index) :
253 			    DMA_IRQ_EN_REG1_PKG_IRQ_EN(index));
254 			DMA_WRITE(sc, index < 8 ?
255 			    DMA_IRQ_EN_REG0_REG :
256 			    DMA_IRQ_EN_REG1_REG, irqen);
257 
258 			break;
259 		}
260 	}
261 
262 	mutex_exit(&sc->sc_lock);
263 
264 	return ch;
265 }
266 
267 static void
268 sun6idma_release(device_t dev, void *priv)
269 {
270 	struct sun6idma_softc *sc = device_private(dev);
271 	struct sun6idma_channel *ch = priv;
272 	uint32_t irqen;
273 	uint8_t index = ch->ch_index;
274 
275 	mutex_enter(&sc->sc_lock);
276 
277 	irqen = DMA_READ(sc, index < 8 ?
278 	    DMA_IRQ_EN_REG0_REG :
279 	    DMA_IRQ_EN_REG1_REG);
280 	irqen &= ~(index < 8 ?
281 	    DMA_IRQ_EN_REG0_PKG_IRQ_EN(index) :
282 	    DMA_IRQ_EN_REG1_PKG_IRQ_EN(index));
283 	DMA_WRITE(sc, index < 8 ?
284 	    DMA_IRQ_EN_REG0_REG :
285 	    DMA_IRQ_EN_REG1_REG, irqen);
286 
287 	ch->ch_callback = NULL;
288 	ch->ch_callbackarg = NULL;
289 
290 	mutex_exit(&sc->sc_lock);
291 }
292 
293 static int
294 sun6idma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req)
295 {
296 	struct sun6idma_softc *sc = device_private(dev);
297 	struct sun6idma_channel *ch = priv;
298 	struct sun6idma_desc *desc = ch->ch_dmadesc;
299 	uint32_t src, dst, len, cfg, mem_cfg, dev_cfg;
300 	uint32_t mem_width, dev_width, mem_burst, dev_burst;
301 
302 	if (req->dreq_nsegs > sc->sc_ndesc_ch)
303 		return EINVAL;
304 
305 	if ((sc->sc_widths &
306 	    IL2B(req->dreq_mem_opt.opt_bus_width/NBBY)) == 0)
307 		return EINVAL;
308 	if ((sc->sc_widths &
309 	    IL2B(req->dreq_dev_opt.opt_bus_width/NBBY)) == 0)
310 		return EINVAL;
311 	if ((sc->sc_bursts &
312 	    IL2B(req->dreq_mem_opt.opt_burst_len)) == 0)
313 		return EINVAL;
314 	if ((sc->sc_bursts &
315 	    IL2B(req->dreq_dev_opt.opt_burst_len)) == 0)
316 		return EINVAL;
317 
318 	mem_width = DMA_CFG_DATA_WIDTH(req->dreq_mem_opt.opt_bus_width);
319 	dev_width = DMA_CFG_DATA_WIDTH(req->dreq_dev_opt.opt_bus_width);
320 	mem_burst = DMA_CFG_BST_LEN(req->dreq_mem_opt.opt_burst_len);
321 	dev_burst = DMA_CFG_BST_LEN(req->dreq_dev_opt.opt_burst_len);
322 
323 	mem_cfg = __SHIFTIN(mem_width, DMA_CFG_SRC_DATA_WIDTH) |
324 	    __SHIFTIN(mem_burst, sc->sc_burst_mask) |
325 	    __SHIFTIN(DMA_CFG_ADDR_MODE_LINEAR, DMA_CFG_SRC_ADDR_MODE) |
326 	    __SHIFTIN(DMA_CFG_DRQ_TYPE_SDRAM, DMA_CFG_SRC_DRQ_TYPE);
327 	dev_cfg = __SHIFTIN(dev_width, DMA_CFG_SRC_DATA_WIDTH) |
328 	    __SHIFTIN(dev_burst, sc->sc_burst_mask) |
329 	    __SHIFTIN(DMA_CFG_ADDR_MODE_IO, DMA_CFG_SRC_ADDR_MODE) |
330 	    __SHIFTIN(ch->ch_portid, DMA_CFG_SRC_DRQ_TYPE);
331 
332 	for (size_t j = 0; j < req->dreq_nsegs; j++) {
333 		if (req->dreq_dir == FDT_DMA_READ) {
334 			src = req->dreq_dev_phys;
335 			dst = req->dreq_segs[j].ds_addr;
336 			cfg = mem_cfg << 16 | dev_cfg;
337 		} else {
338 			src = req->dreq_segs[j].ds_addr;
339 			dst = req->dreq_dev_phys;
340 			cfg = dev_cfg << 16 | mem_cfg;
341 		}
342 		len = req->dreq_segs[j].ds_len;
343 
344 		desc[j].dma_config = htole32(cfg);
345 		desc[j].dma_srcaddr = htole32(src);
346 		desc[j].dma_dstaddr = htole32(dst);
347 		desc[j].dma_bcnt = htole32(len);
348 		desc[j].dma_para = htole32(0);
349 		if (j < req->dreq_nsegs - 1)
350 			desc[j].dma_next = htole32(DESC_ADDR(sc, ch, j + 1));
351 		else
352 			desc[j].dma_next = htole32(DMA_NULL);
353 	}
354 
355 #if notyet && maybenever
356 	DMA_WRITE(sc, DMA_MODE_REG(ch->ch_index),
357 	    DMA_MODE_DST(MODE_HANDSHAKE)|DMA_MODE_SRC(MODE_HANDSHAKE));
358 #endif
359 
360 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, DESC_OFFS(ch->ch_index, 0),
361 	    DESC_LEN(req->dreq_nsegs), BUS_DMASYNC_PREWRITE);
362 
363 	DMA_WRITE(sc, DMA_START_ADDR_REG(ch->ch_index),
364 	    DESC_ADDR(sc, ch, 0));
365 	DMA_WRITE(sc, DMA_EN_REG(ch->ch_index), DMA_EN_EN);
366 
367 	if ((DMA_READ(sc, DMA_EN_REG(ch->ch_index)) & DMA_EN_EN) == 0) {
368 		aprint_error_dev(sc->sc_dev,
369 		    "DMA Channel %u failed to start\n", ch->ch_index);
370 		return EIO;
371 	}
372 
373 	return 0;
374 }
375 
376 static void
377 sun6idma_halt(device_t dev, void *priv)
378 {
379 	struct sun6idma_softc *sc = device_private(dev);
380 	struct sun6idma_channel *ch = priv;
381 
382 	DMA_WRITE(sc, DMA_EN_REG(ch->ch_index), 0);
383 }
384 
385 static const struct fdtbus_dma_controller_func sun6idma_funcs = {
386 	.acquire = sun6idma_acquire,
387 	.release = sun6idma_release,
388 	.transfer = sun6idma_transfer,
389 	.halt = sun6idma_halt
390 };
391 
392 static int
393 sun6idma_intr(void *priv)
394 {
395 	struct sun6idma_softc *sc = priv;
396 	uint32_t pend0, pend1, bit;
397 	uint64_t pend, mask;
398 	uint8_t index;
399 
400 	pend0 = DMA_READ(sc, DMA_IRQ_PEND_REG0_REG);
401 	pend1 = DMA_READ(sc, DMA_IRQ_PEND_REG1_REG);
402 	if (!pend0 && !pend1)
403 		return 0;
404 
405 	DMA_WRITE(sc, DMA_IRQ_PEND_REG0_REG, pend0);
406 	DMA_WRITE(sc, DMA_IRQ_PEND_REG1_REG, pend1);
407 
408 	pend = pend0 | ((uint64_t)pend1 << 32);
409 
410 	while ((bit = ffs64(pend & DMA_IRQ_PKG_MASK)) != 0) {
411 		mask = __BIT(bit - 1);
412 		pend &= ~mask;
413 		index = (bit - 1) / 4;
414 
415 		if (sc->sc_chan[index].ch_callback == NULL)
416 			continue;
417 		sc->sc_chan[index].ch_callback(
418 		    sc->sc_chan[index].ch_callbackarg);
419 	}
420 
421 	return 1;
422 }
423 
424 static int
425 sun6idma_match(device_t parent, cfdata_t cf, void *aux)
426 {
427 	struct fdt_attach_args * const faa = aux;
428 
429 	return of_compatible_match(faa->faa_phandle, compat_data);
430 }
431 
432 static void
433 sun6idma_attach(device_t parent, device_t self, void *aux)
434 {
435 	struct sun6idma_softc * const sc = device_private(self);
436 	struct fdt_attach_args * const faa = aux;
437 	const int phandle = faa->faa_phandle;
438 	size_t desclen;
439 	const struct sun6idma_config *conf;
440 	struct fdtbus_reset *rst;
441 	struct clk *clk;
442 	char intrstr[128];
443 	bus_addr_t addr;
444 	bus_size_t size;
445 	int error, nsegs;
446 	u_int index;
447 
448 	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
449 		aprint_error(": couldn't get registers\n");
450 		return;
451 	}
452 
453 	if ((clk = fdtbus_clock_get_index(phandle, 0)) == NULL ||
454 	    clk_enable(clk) != 0) {
455 		aprint_error(": couldn't enable clock\n");
456 		return;
457 	}
458 	if ((rst = fdtbus_reset_get_index(phandle, 0)) == NULL ||
459 	    fdtbus_reset_deassert(rst) != 0) {
460 		aprint_error(": couldn't de-assert reset\n");
461 		return;
462 	}
463 
464 	sc->sc_dev = self;
465 	sc->sc_phandle = phandle;
466 	sc->sc_dmat = faa->faa_dmat;
467 	sc->sc_bst = faa->faa_bst;
468 	if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) {
469 		aprint_error(": couldn't map registers\n");
470 		return;
471 	}
472 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SCHED);
473 
474 	if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
475 		aprint_error(": failed to decode interrupt\n");
476 		return;
477 	}
478 
479 	conf = of_compatible_lookup(phandle, compat_data)->data;
480 
481 	sc->sc_burst_mask = conf->burst_mask;
482 	sc->sc_nchan = conf->num_channels;
483 	sc->sc_widths = conf->widths;
484 	sc->sc_bursts = conf->bursts;
485 	sc->sc_chan = kmem_alloc(sizeof(*sc->sc_chan) * sc->sc_nchan, KM_SLEEP);
486 	desclen = DESC_OFFS(sc->sc_nchan, 0);
487 	sc->sc_ndesc_ch = DESC_OFFS(1, 0) / sizeof(struct sun6idma_desc);
488 
489 	aprint_naive("\n");
490 	aprint_normal(": DMA controller (%u channels)\n", sc->sc_nchan);
491 
492 	DMA_WRITE(sc, DMA_IRQ_EN_REG0_REG, 0);
493 	DMA_WRITE(sc, DMA_IRQ_EN_REG1_REG, 0);
494 	DMA_WRITE(sc, DMA_IRQ_PEND_REG0_REG, ~0);
495 	DMA_WRITE(sc, DMA_IRQ_PEND_REG1_REG, ~0);
496 
497 	error = bus_dmamem_alloc(sc->sc_dmat, desclen, 0, 0,
498 	    sc->sc_dmasegs, 1, &nsegs, BUS_DMA_WAITOK);
499 	if (error)
500 		panic("bus_dmamem_alloc failed: %d", error);
501 	error = bus_dmamem_map(sc->sc_dmat, sc->sc_dmasegs, nsegs,
502 	    desclen, (void **)&sc->sc_dmadescs, BUS_DMA_WAITOK);
503 	if (error)
504 		panic("bus_dmamem_map failed: %d", error);
505 	error = bus_dmamap_create(sc->sc_dmat, desclen, 1, desclen, 0,
506 	    BUS_DMA_WAITOK, &sc->sc_dmamap);
507 	if (error)
508 		panic("bus_dmamap_create failed: %d", error);
509 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
510 	    sc->sc_dmadescs, desclen, NULL, BUS_DMA_WAITOK);
511 	if (error)
512 		panic("bus_dmamap_load failed: %d", error);
513 
514 	for (index = 0; index < sc->sc_nchan; index++) {
515 		struct sun6idma_channel *ch = &sc->sc_chan[index];
516 		ch->ch_index = index;
517 		ch->ch_dmadesc = (void *)((uintptr_t)sc->sc_dmadescs + DESC_OFFS(index, 0));
518 		ch->ch_callback = NULL;
519 		ch->ch_callbackarg = NULL;
520 
521 		DMA_WRITE(sc, DMA_EN_REG(index), 0);
522 	}
523 
524 	if (conf->autogate)
525 		DMA_WRITE(sc, conf->autogate_reg, conf->autogate_mask);
526 
527 	sc->sc_ih = fdtbus_intr_establish_xname(phandle, 0, IPL_SCHED,
528 	    FDT_INTR_MPSAFE, sun6idma_intr, sc, device_xname(sc->sc_dev));
529 	if (sc->sc_ih == NULL) {
530 		aprint_error_dev(sc->sc_dev,
531 		    "couldn't establish interrupt on %s\n", intrstr);
532 		return;
533 	}
534 	aprint_normal_dev(sc->sc_dev, "interrupting on %s\n", intrstr);
535 
536 	fdtbus_register_dma_controller(self, phandle, &sun6idma_funcs);
537 }
538 
539 CFATTACH_DECL_NEW(sun6i_dma, sizeof(struct sun6idma_softc),
540         sun6idma_match, sun6idma_attach, NULL, NULL);
541 
542 #ifdef DDB
543 void sun6idma_dump(void);
544 
545 void
546 sun6idma_dump(void)
547 {
548 	struct sun6idma_softc *sc;
549 	device_t dev;
550 	u_int index;
551 
552 	dev = device_find_by_driver_unit("sun6idma", 0);
553 	if (dev == NULL)
554 		return;
555 	sc = device_private(dev);
556 
557 	device_printf(dev, "DMA_IRQ_EN_REG0_REG:   %08x\n", DMA_READ(sc, DMA_IRQ_EN_REG0_REG));
558 	device_printf(dev, "DMA_IRQ_EN_REG1_REG:   %08x\n", DMA_READ(sc, DMA_IRQ_EN_REG1_REG));
559 	device_printf(dev, "DMA_IRQ_PEND_REG0_REG: %08x\n", DMA_READ(sc, DMA_IRQ_PEND_REG0_REG));
560 	device_printf(dev, "DMA_IRQ_PEND_REG1_REG: %08x\n", DMA_READ(sc, DMA_IRQ_PEND_REG1_REG));
561 	device_printf(dev, "DMA_STA_REG:           %08x\n", DMA_READ(sc, DMA_STA_REG));
562 
563 	for (index = 0; index < sc->sc_nchan; index++) {
564 		struct sun6idma_channel *ch = &sc->sc_chan[index];
565 		if (ch->ch_callback == NULL)
566 			continue;
567 		device_printf(dev, " %2d: DMA_EN_REG:         %08x\n", index, DMA_READ(sc, DMA_EN_REG(index)));
568 		device_printf(dev, " %2d: DMA_PAU_REG:        %08x\n", index, DMA_READ(sc, DMA_PAU_REG(index)));
569 		device_printf(dev, " %2d: DMA_START_ADDR_REG: %08x\n", index, DMA_READ(sc, DMA_START_ADDR_REG(index)));
570 		device_printf(dev, " %2d: DMA_CFG_REG:        %08x\n", index, DMA_READ(sc, DMA_CFG_REG(index)));
571 		device_printf(dev, " %2d: DMA_CUR_SRC_REG:    %08x\n", index, DMA_READ(sc, DMA_CUR_SRC_REG(index)));
572 		device_printf(dev, " %2d: DMA_CUR_DEST_REG:   %08x\n", index, DMA_READ(sc, DMA_CUR_DEST_REG(index)));
573 		device_printf(dev, " %2d: DMA_BCNT_LEFT_REG:  %08x\n", index, DMA_READ(sc, DMA_BCNT_LEFT_REG(index)));
574 		device_printf(dev, " %2d: DMA_PARA_REG:       %08x\n", index, DMA_READ(sc, DMA_PARA_REG(index)));
575 		device_printf(dev, " %2d: DMA_MODE_REG:       %08x\n", index, DMA_READ(sc, DMA_MODE_REG(index)));
576 		device_printf(dev, " %2d: DMA_FDESC_ADDR_REG: %08x\n", index, DMA_READ(sc, DMA_FDESC_ADDR_REG(index)));
577 		device_printf(dev, " %2d: DMA_PKG_NUM_REG:    %08x\n", index, DMA_READ(sc, DMA_PKG_NUM_REG(index)));
578 	}
579 }
580 #endif
581