xref: /netbsd-src/sys/arch/arm/imx/imx23_apbdma.c (revision 433506fb11036cf1b9a0af3e1ec573185e67b2ff)
1*433506fbSjmcneill /* $Id: imx23_apbdma.c,v 1.4 2015/01/10 12:13:00 jmcneill Exp $ */
2eba5cacbSjkunz 
3eba5cacbSjkunz /*
4eba5cacbSjkunz  * Copyright (c) 2012 The NetBSD Foundation, Inc.
5eba5cacbSjkunz  * All rights reserved.
6eba5cacbSjkunz  *
7eba5cacbSjkunz  * This code is derived from software contributed to The NetBSD Foundation
8eba5cacbSjkunz  * by Petri Laakso.
9eba5cacbSjkunz  *
10eba5cacbSjkunz  * Redistribution and use in source and binary forms, with or without
11eba5cacbSjkunz  * modification, are permitted provided that the following conditions
12eba5cacbSjkunz  * are met:
13eba5cacbSjkunz  * 1. Redistributions of source code must retain the above copyright
14eba5cacbSjkunz  *    notice, this list of conditions and the following disclaimer.
15eba5cacbSjkunz  * 2. Redistributions in binary form must reproduce the above copyright
16eba5cacbSjkunz  *    notice, this list of conditions and the following disclaimer in the
17eba5cacbSjkunz  *    documentation and/or other materials provided with the distribution.
18eba5cacbSjkunz  *
19eba5cacbSjkunz  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20eba5cacbSjkunz  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21eba5cacbSjkunz  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22eba5cacbSjkunz  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23eba5cacbSjkunz  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24eba5cacbSjkunz  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25eba5cacbSjkunz  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26eba5cacbSjkunz  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27eba5cacbSjkunz  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28eba5cacbSjkunz  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29eba5cacbSjkunz  * POSSIBILITY OF SUCH DAMAGE.
30eba5cacbSjkunz  */
31eba5cacbSjkunz 
32eba5cacbSjkunz #include <sys/param.h>
33aca15765Sjkunz #include <sys/types.h>
34eba5cacbSjkunz #include <sys/bus.h>
35eba5cacbSjkunz #include <sys/device.h>
36eba5cacbSjkunz #include <sys/errno.h>
37aca15765Sjkunz #include <sys/mutex.h>
38eba5cacbSjkunz #include <sys/kmem.h>
39eba5cacbSjkunz #include <sys/systm.h>
40eba5cacbSjkunz 
41aca15765Sjkunz #include <arm/imx/imx23_apbdma.h>
42eba5cacbSjkunz #include <arm/imx/imx23_apbdmareg.h>
43aca15765Sjkunz #include <arm/imx/imx23_apbdmavar.h>
44eba5cacbSjkunz #include <arm/imx/imx23_apbhdmareg.h>
45eba5cacbSjkunz #include <arm/imx/imx23_apbxdmareg.h>
46eba5cacbSjkunz #include <arm/imx/imx23var.h>
47eba5cacbSjkunz 
48eba5cacbSjkunz static int	apbdma_match(device_t, cfdata_t, void *);
49eba5cacbSjkunz static void	apbdma_attach(device_t, device_t, void *);
50eba5cacbSjkunz static int	apbdma_activate(device_t, enum devact);
51eba5cacbSjkunz 
52eba5cacbSjkunz CFATTACH_DECL3_NEW(apbdma,
53eba5cacbSjkunz 	sizeof(struct apbdma_softc),
54eba5cacbSjkunz 	apbdma_match,
55eba5cacbSjkunz 	apbdma_attach,
56eba5cacbSjkunz 	NULL,
57eba5cacbSjkunz 	apbdma_activate,
58eba5cacbSjkunz 	NULL,
59eba5cacbSjkunz 	NULL,
60eba5cacbSjkunz 	0);
61eba5cacbSjkunz 
62eba5cacbSjkunz static void	apbdma_reset(struct apbdma_softc *);
63aca15765Sjkunz static void	apbdma_init(struct apbdma_softc *);
64aca15765Sjkunz 
65aca15765Sjkunz #define DMA_RD(sc, reg)							\
66aca15765Sjkunz 		bus_space_read_4(sc->sc_iot, sc->sc_ioh, (reg))
67aca15765Sjkunz #define DMA_WR(sc, reg, val)						\
68aca15765Sjkunz 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, (reg), (val))
69aca15765Sjkunz 
70aca15765Sjkunz #define APBDMA_SOFT_RST_LOOP 455 /* At least 1 us ... */
71eba5cacbSjkunz 
72eba5cacbSjkunz static int
apbdma_match(device_t parent,cfdata_t match,void * aux)73eba5cacbSjkunz apbdma_match(device_t parent, cfdata_t match, void *aux)
74eba5cacbSjkunz {
75eba5cacbSjkunz 	struct apb_attach_args *aa = aux;
76eba5cacbSjkunz 
77eba5cacbSjkunz 	if (aa->aa_addr == HW_APBHDMA_BASE && aa->aa_size == HW_APBHDMA_SIZE)
78eba5cacbSjkunz 			return 1;
79eba5cacbSjkunz 
80eba5cacbSjkunz 	if (aa->aa_addr == HW_APBXDMA_BASE && aa->aa_size == HW_APBXDMA_SIZE)
81eba5cacbSjkunz 			return 1;
82eba5cacbSjkunz 
83eba5cacbSjkunz 	return 0;
84eba5cacbSjkunz }
85eba5cacbSjkunz 
86eba5cacbSjkunz static void
apbdma_attach(device_t parent,device_t self,void * aux)87eba5cacbSjkunz apbdma_attach(device_t parent, device_t self, void *aux)
88eba5cacbSjkunz {
89eba5cacbSjkunz 	struct apb_attach_args *aa = aux;
90eba5cacbSjkunz 	struct apbdma_softc *sc = device_private(self);
91aca15765Sjkunz 	struct apb_softc *sc_parent = device_private(parent);
92aca15765Sjkunz 	static u_int apbdma_attached = 0;
93eba5cacbSjkunz 
94aca15765Sjkunz 	if ((strncmp(device_xname(parent), "apbh", 4) == 0) &&
95*433506fbSjmcneill 	    (apbdma_attached & F_APBH_DMA))
96aca15765Sjkunz 		return;
97aca15765Sjkunz 	if ((strncmp(device_xname(parent), "apbx", 4) == 0) &&
98*433506fbSjmcneill 	    (apbdma_attached & F_APBX_DMA))
99aca15765Sjkunz 		return;
100eba5cacbSjkunz 
101eba5cacbSjkunz 	sc->sc_dev = self;
102eba5cacbSjkunz 	sc->sc_iot = aa->aa_iot;
103eba5cacbSjkunz 	sc->sc_dmat = aa->aa_dmat;
104eba5cacbSjkunz 
105eba5cacbSjkunz 	if (bus_space_map(sc->sc_iot,
106aca15765Sjkunz 	    aa->aa_addr, aa->aa_size, 0, &sc->sc_ioh)) {
107eba5cacbSjkunz 		aprint_error_dev(sc->sc_dev, "unable to map bus space\n");
108eba5cacbSjkunz 		return;
109eba5cacbSjkunz 	}
110eba5cacbSjkunz 
111aca15765Sjkunz 	if (strncmp(device_xname(parent), "apbh", 4) == 0)
112*433506fbSjmcneill 		sc->flags = F_APBH_DMA;
113eba5cacbSjkunz 
114aca15765Sjkunz 	if (strncmp(device_xname(parent), "apbx", 4) == 0)
115*433506fbSjmcneill 		sc->flags = F_APBX_DMA;
116eba5cacbSjkunz 
117eba5cacbSjkunz 	apbdma_reset(sc);
118aca15765Sjkunz 	apbdma_init(sc);
119aca15765Sjkunz 
120*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA)
121*433506fbSjmcneill 		apbdma_attached |= F_APBH_DMA;
122*433506fbSjmcneill 	if (sc->flags & F_APBX_DMA)
123*433506fbSjmcneill 		apbdma_attached |= F_APBX_DMA;
124aca15765Sjkunz 
125aca15765Sjkunz 	sc_parent->dmac = self;
126aca15765Sjkunz 
127aca15765Sjkunz 	/* Initialize mutex to control concurrent access from the drivers. */
128aca15765Sjkunz 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
129eba5cacbSjkunz 
130*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA)
131*433506fbSjmcneill 		aprint_normal(": APBH DMA\n");
132*433506fbSjmcneill 	else if (sc->flags & F_APBX_DMA)
133*433506fbSjmcneill 		aprint_normal(": APBX DMA\n");
134*433506fbSjmcneill 	else
135*433506fbSjmcneill 		panic("dma flag missing!\n");
136eba5cacbSjkunz 
137eba5cacbSjkunz 	return;
138eba5cacbSjkunz }
139eba5cacbSjkunz 
140eba5cacbSjkunz static int
apbdma_activate(device_t self,enum devact act)141eba5cacbSjkunz apbdma_activate(device_t self, enum devact act)
142eba5cacbSjkunz {
143eba5cacbSjkunz 	return EOPNOTSUPP;
144eba5cacbSjkunz }
145eba5cacbSjkunz 
146eba5cacbSjkunz /*
147eba5cacbSjkunz  * Reset the APB{H,X}DMA block.
148eba5cacbSjkunz  *
1498a7d722dSjkunz  * Inspired by i.MX23 RM "39.3.10 Correct Way to Soft Reset a Block"
150eba5cacbSjkunz  */
151eba5cacbSjkunz static void
apbdma_reset(struct apbdma_softc * sc)152eba5cacbSjkunz apbdma_reset(struct apbdma_softc *sc)
153eba5cacbSjkunz {
154eba5cacbSjkunz 	unsigned int loop;
155eba5cacbSjkunz 
156eba5cacbSjkunz 	/*
157eba5cacbSjkunz 	 * Prepare for soft-reset by making sure that SFTRST is not currently
158eba5cacbSjkunz 	 * asserted. Also clear CLKGATE so we can wait for its assertion below.
159eba5cacbSjkunz 	 */
160aca15765Sjkunz 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_SFTRST);
161eba5cacbSjkunz 
162eba5cacbSjkunz 	/* Wait at least a microsecond for SFTRST to deassert. */
163eba5cacbSjkunz 	loop = 0;
164aca15765Sjkunz 	while ((DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_SFTRST) ||
165eba5cacbSjkunz 	    (loop < APBDMA_SOFT_RST_LOOP))
166eba5cacbSjkunz 		loop++;
167eba5cacbSjkunz 
168eba5cacbSjkunz 	/* Clear CLKGATE so we can wait for its assertion below. */
169aca15765Sjkunz 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_CLKGATE);
170eba5cacbSjkunz 
171eba5cacbSjkunz 	/* Soft-reset the block. */
172aca15765Sjkunz 	DMA_WR(sc, HW_APB_CTRL0_SET, HW_APB_CTRL0_SFTRST);
173eba5cacbSjkunz 
174eba5cacbSjkunz 	/* Wait until clock is in the gated state. */
175aca15765Sjkunz 	while (!(DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_CLKGATE));
176eba5cacbSjkunz 
177eba5cacbSjkunz 	/* Bring block out of reset. */
178aca15765Sjkunz 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_SFTRST);
179eba5cacbSjkunz 
180eba5cacbSjkunz 	loop = 0;
181aca15765Sjkunz 	while ((DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_SFTRST) ||
182eba5cacbSjkunz 	    (loop < APBDMA_SOFT_RST_LOOP))
183eba5cacbSjkunz 		loop++;
184eba5cacbSjkunz 
185aca15765Sjkunz 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_CLKGATE);
186eba5cacbSjkunz 
187eba5cacbSjkunz 	/* Wait until clock is in the NON-gated state. */
188aca15765Sjkunz 	while (DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_CLKGATE);
189eba5cacbSjkunz 
190eba5cacbSjkunz 	return;
191eba5cacbSjkunz }
192eba5cacbSjkunz 
193eba5cacbSjkunz /*
194aca15765Sjkunz  * Initialize APB{H,X}DMA block.
195eba5cacbSjkunz  */
196aca15765Sjkunz static void
apbdma_init(struct apbdma_softc * sc)197aca15765Sjkunz apbdma_init(struct apbdma_softc *sc)
198eba5cacbSjkunz {
199eba5cacbSjkunz 
200*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA) {
201aca15765Sjkunz 		DMA_WR(sc, HW_APBH_CTRL0_SET, HW_APBH_CTRL0_AHB_BURST8_EN);
202aca15765Sjkunz 		DMA_WR(sc, HW_APBH_CTRL0_SET, HW_APBH_CTRL0_APB_BURST4_EN);
203aca15765Sjkunz 	}
204aca15765Sjkunz 	return;
205aca15765Sjkunz }
206eba5cacbSjkunz 
207aca15765Sjkunz /*
208aca15765Sjkunz  * Chain DMA commands together.
209aca15765Sjkunz  *
210aca15765Sjkunz  * Set src->next point to trg's physical DMA mapped address.
211aca15765Sjkunz  */
212aca15765Sjkunz void
apbdma_cmd_chain(apbdma_command_t src,apbdma_command_t trg,void * buf,bus_dmamap_t dmap)213aca15765Sjkunz apbdma_cmd_chain(apbdma_command_t src, apbdma_command_t trg, void *buf,
214aca15765Sjkunz     bus_dmamap_t dmap)
215aca15765Sjkunz {
216aca15765Sjkunz 	int i;
217aca15765Sjkunz 	bus_size_t daddr;
218aca15765Sjkunz 	bus_addr_t trg_offset;
219eba5cacbSjkunz 
220aca15765Sjkunz 	trg_offset = (bus_addr_t)trg - (bus_addr_t)buf;
221aca15765Sjkunz 	daddr = 0;
222eba5cacbSjkunz 
223aca15765Sjkunz 	for (i = 0; i < dmap->dm_nsegs; i++) {
224aca15765Sjkunz 		daddr += dmap->dm_segs[i].ds_len;
225aca15765Sjkunz 		if (trg_offset < daddr) {
226aca15765Sjkunz 			src->next = (void *)(dmap->dm_segs[i].ds_addr +
227aca15765Sjkunz 			    (trg_offset - (daddr - dmap->dm_segs[i].ds_len)));
228aca15765Sjkunz 			break;
229aca15765Sjkunz 		}
230aca15765Sjkunz 	}
231eba5cacbSjkunz 
232aca15765Sjkunz 	return;
233aca15765Sjkunz }
234eba5cacbSjkunz 
235aca15765Sjkunz /*
236aca15765Sjkunz  * Set DMA command buffer.
237aca15765Sjkunz  *
238aca15765Sjkunz  * Set cmd->buffer point to physical DMA address at offset in DMA map.
239aca15765Sjkunz  */
240aca15765Sjkunz void
apbdma_cmd_buf(apbdma_command_t cmd,bus_addr_t offset,bus_dmamap_t dmap)241aca15765Sjkunz apbdma_cmd_buf(apbdma_command_t cmd, bus_addr_t offset, bus_dmamap_t dmap)
242aca15765Sjkunz {
243aca15765Sjkunz 	int i;
244aca15765Sjkunz 	bus_size_t daddr;
245aca15765Sjkunz 
246aca15765Sjkunz 	daddr = 0;
247aca15765Sjkunz 
248aca15765Sjkunz 	for (i = 0; i < dmap->dm_nsegs; i++) {
249aca15765Sjkunz 		daddr += dmap->dm_segs[i].ds_len;
250aca15765Sjkunz 		if (offset < daddr) {
251aca15765Sjkunz 			cmd->buffer = (void *)(dmap->dm_segs[i].ds_addr +
252aca15765Sjkunz 			    (offset - (daddr - dmap->dm_segs[i].ds_len)));
253aca15765Sjkunz 			break;
254aca15765Sjkunz 		}
255aca15765Sjkunz 	}
256aca15765Sjkunz 
257aca15765Sjkunz 	return;
258aca15765Sjkunz }
259aca15765Sjkunz 
260aca15765Sjkunz /*
261aca15765Sjkunz  * Initialize DMA channel.
262aca15765Sjkunz  */
263aca15765Sjkunz void
apbdma_chan_init(struct apbdma_softc * sc,unsigned int channel)264aca15765Sjkunz apbdma_chan_init(struct apbdma_softc *sc, unsigned int channel)
265aca15765Sjkunz {
266aca15765Sjkunz 
267aca15765Sjkunz 	mutex_enter(&sc->sc_lock);
268aca15765Sjkunz 
269aca15765Sjkunz 	/* Enable CMDCMPLT_IRQ. */
270aca15765Sjkunz 	DMA_WR(sc, HW_APB_CTRL1_SET, (1<<channel)<<16);
271aca15765Sjkunz 
272aca15765Sjkunz 	mutex_exit(&sc->sc_lock);
273aca15765Sjkunz 
274aca15765Sjkunz 	return;
275aca15765Sjkunz }
276aca15765Sjkunz 
277aca15765Sjkunz /*
278aca15765Sjkunz  * Set command chain for DMA channel.
279aca15765Sjkunz  */
280aca15765Sjkunz #define HW_APB_CHN_NXTCMDAR(base, channel)	(base + (0x70 * channel))
281aca15765Sjkunz void
apbdma_chan_set_chain(struct apbdma_softc * sc,unsigned int channel,bus_dmamap_t dmap)282aca15765Sjkunz apbdma_chan_set_chain(struct apbdma_softc *sc, unsigned int channel,
283aca15765Sjkunz 	bus_dmamap_t dmap)
284aca15765Sjkunz {
285aca15765Sjkunz 	uint32_t reg;
286aca15765Sjkunz 
287*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA)
288aca15765Sjkunz 		reg = HW_APB_CHN_NXTCMDAR(HW_APBH_CH0_NXTCMDAR, channel);
289aca15765Sjkunz 	else
290aca15765Sjkunz 		reg = HW_APB_CHN_NXTCMDAR(HW_APBX_CH0_NXTCMDAR, channel);
291aca15765Sjkunz 
292aca15765Sjkunz 	mutex_enter(&sc->sc_lock);
293aca15765Sjkunz 	DMA_WR(sc, reg, dmap->dm_segs[0].ds_addr);
294aca15765Sjkunz 	mutex_exit(&sc->sc_lock);
295aca15765Sjkunz 
296aca15765Sjkunz 	return;
297aca15765Sjkunz }
298aca15765Sjkunz 
299aca15765Sjkunz /*
300aca15765Sjkunz  * Initiate DMA transfer.
301aca15765Sjkunz  */
302aca15765Sjkunz #define HW_APB_CHN_SEMA(base, channel)	(base + (0x70 * channel))
303aca15765Sjkunz void
apbdma_run(struct apbdma_softc * sc,unsigned int channel)304aca15765Sjkunz apbdma_run(struct apbdma_softc *sc, unsigned int channel)
305aca15765Sjkunz {
306aca15765Sjkunz 	uint32_t reg;
307aca15765Sjkunz 	uint8_t val;
308aca15765Sjkunz 
309*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA) {
310aca15765Sjkunz 		reg = HW_APB_CHN_SEMA(HW_APBH_CH0_SEMA, channel);
311aca15765Sjkunz 		val = __SHIFTIN(1, HW_APBH_CH0_SEMA_INCREMENT_SEMA);
312aca15765Sjkunz 	 } else {
313aca15765Sjkunz 		reg = HW_APB_CHN_SEMA(HW_APBX_CH0_SEMA, channel);
314aca15765Sjkunz 		val = __SHIFTIN(1, HW_APBX_CH0_SEMA_INCREMENT_SEMA);
315aca15765Sjkunz 	}
316aca15765Sjkunz 
317aca15765Sjkunz 	mutex_enter(&sc->sc_lock);
318aca15765Sjkunz 	DMA_WR(sc, reg, val);
319aca15765Sjkunz 	mutex_exit(&sc->sc_lock);
320aca15765Sjkunz 
321aca15765Sjkunz 	return;
322aca15765Sjkunz }
323aca15765Sjkunz 
324aca15765Sjkunz /*
325aca15765Sjkunz  * Acknowledge command complete IRQ.
326aca15765Sjkunz  */
327aca15765Sjkunz void
apbdma_ack_intr(struct apbdma_softc * sc,unsigned int channel)328aca15765Sjkunz apbdma_ack_intr(struct apbdma_softc *sc, unsigned int channel)
329aca15765Sjkunz {
330aca15765Sjkunz 
331aca15765Sjkunz 	mutex_enter(&sc->sc_lock);
332*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA) {
333aca15765Sjkunz 		DMA_WR(sc, HW_APB_CTRL1_CLR, (1<<channel));
334*433506fbSjmcneill 	} else {
335*433506fbSjmcneill 		DMA_WR(sc, HW_APB_CTRL1_CLR, (1<<channel));
336*433506fbSjmcneill 	}
337aca15765Sjkunz 	mutex_exit(&sc->sc_lock);
338aca15765Sjkunz 
339aca15765Sjkunz 	return;
340aca15765Sjkunz }
341aca15765Sjkunz 
342aca15765Sjkunz /*
343aca15765Sjkunz  * Acknowledge error IRQ.
344aca15765Sjkunz  */
345aca15765Sjkunz void
apbdma_ack_error_intr(struct apbdma_softc * sc,unsigned int channel)346aca15765Sjkunz apbdma_ack_error_intr(struct apbdma_softc *sc, unsigned int channel)
347aca15765Sjkunz {
348aca15765Sjkunz 
349aca15765Sjkunz 	mutex_enter(&sc->sc_lock);
350aca15765Sjkunz 	DMA_WR(sc, HW_APB_CTRL2_CLR, (1<<channel));
351aca15765Sjkunz 	mutex_exit(&sc->sc_lock);
352aca15765Sjkunz 
353aca15765Sjkunz 	return;
354aca15765Sjkunz }
355aca15765Sjkunz 
356aca15765Sjkunz /*
357aca15765Sjkunz  * Return reason for the IRQ.
358aca15765Sjkunz  */
359aca15765Sjkunz unsigned int
apbdma_intr_status(struct apbdma_softc * sc,unsigned int channel)360aca15765Sjkunz apbdma_intr_status(struct apbdma_softc *sc, unsigned int channel)
361aca15765Sjkunz {
362aca15765Sjkunz 	unsigned int reason;
363aca15765Sjkunz 
364aca15765Sjkunz 	reason = 0;
365aca15765Sjkunz 
366aca15765Sjkunz 	mutex_enter(&sc->sc_lock);
367aca15765Sjkunz 
368aca15765Sjkunz 	/* Check if this was command complete IRQ. */
369aca15765Sjkunz 	if (DMA_RD(sc, HW_APB_CTRL1) & (1<<channel))
370aca15765Sjkunz 		reason = DMA_IRQ_CMDCMPLT;
371aca15765Sjkunz 
372aca15765Sjkunz 	/* Check if error was set. */
373aca15765Sjkunz 	if (DMA_RD(sc, HW_APB_CTRL2) & (1<<channel)) {
374aca15765Sjkunz 		if (DMA_RD(sc, HW_APB_CTRL2) & (1<<channel)<<16)
375aca15765Sjkunz 			reason = DMA_IRQ_BUS_ERROR;
376aca15765Sjkunz 		else
377aca15765Sjkunz 			reason = DMA_IRQ_TERM;
378aca15765Sjkunz 	}
379aca15765Sjkunz 
380aca15765Sjkunz 	mutex_exit(&sc->sc_lock);
381aca15765Sjkunz 
382aca15765Sjkunz 	return reason;
383aca15765Sjkunz }
384aca15765Sjkunz 
385aca15765Sjkunz /*
386aca15765Sjkunz  * Reset DMA channel.
387aca15765Sjkunz  * Use only for devices on APBH bus.
388aca15765Sjkunz  */
389aca15765Sjkunz void
apbdma_chan_reset(struct apbdma_softc * sc,unsigned int channel)390aca15765Sjkunz apbdma_chan_reset(struct apbdma_softc *sc, unsigned int channel)
391aca15765Sjkunz {
392aca15765Sjkunz 
393aca15765Sjkunz 	mutex_enter(&sc->sc_lock);
394aca15765Sjkunz 
395*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA) {
396aca15765Sjkunz 		DMA_WR(sc, HW_APB_CTRL0_SET,
397aca15765Sjkunz 		    __SHIFTIN((1<<channel), HW_APBH_CTRL0_RESET_CHANNEL));
398aca15765Sjkunz 		while(DMA_RD(sc, HW_APB_CTRL0) & HW_APBH_CTRL0_RESET_CHANNEL);
399*433506fbSjmcneill 	} else {
400*433506fbSjmcneill 		DMA_WR(sc, HW_APBX_CHANNEL_CTRL_SET,
401*433506fbSjmcneill 			__SHIFTIN((1<<channel), HW_APBH_CTRL0_RESET_CHANNEL));
402*433506fbSjmcneill 		while(DMA_RD(sc, HW_APBX_CHANNEL_CTRL) & (1<<channel));
403*433506fbSjmcneill 	}
404aca15765Sjkunz 
405aca15765Sjkunz 	mutex_exit(&sc->sc_lock);
406aca15765Sjkunz 
407aca15765Sjkunz 	return;
408eba5cacbSjkunz }
409*433506fbSjmcneill 
410*433506fbSjmcneill void
apbdma_wait(struct apbdma_softc * sc,unsigned int channel)411*433506fbSjmcneill apbdma_wait(struct apbdma_softc *sc, unsigned int channel)
412*433506fbSjmcneill {
413*433506fbSjmcneill 
414*433506fbSjmcneill 	mutex_enter(&sc->sc_lock);
415*433506fbSjmcneill 
416*433506fbSjmcneill 	if (sc->flags & F_APBH_DMA) {
417*433506fbSjmcneill 		while (DMA_RD(sc, HW_APB_CHN_SEMA(HW_APBH_CH0_SEMA, channel)) & HW_APBH_CH0_SEMA_PHORE)
418*433506fbSjmcneill 			;
419*433506fbSjmcneill 	 } else {
420*433506fbSjmcneill 		while (DMA_RD(sc, HW_APB_CHN_SEMA(HW_APBX_CH0_SEMA, channel)) & HW_APBX_CH0_SEMA_PHORE)
421*433506fbSjmcneill 			;
422*433506fbSjmcneill 	}
423*433506fbSjmcneill 
424*433506fbSjmcneill 	mutex_exit(&sc->sc_lock);
425*433506fbSjmcneill }
426