xref: /netbsd-src/sys/arch/arm/imx/imx23_apbdma.c (revision 433506fb11036cf1b9a0af3e1ec573185e67b2ff)
1 /* $Id: imx23_apbdma.c,v 1.4 2015/01/10 12:13:00 jmcneill Exp $ */
2 
3 /*
4  * Copyright (c) 2012 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Petri Laakso.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/bus.h>
35 #include <sys/device.h>
36 #include <sys/errno.h>
37 #include <sys/mutex.h>
38 #include <sys/kmem.h>
39 #include <sys/systm.h>
40 
41 #include <arm/imx/imx23_apbdma.h>
42 #include <arm/imx/imx23_apbdmareg.h>
43 #include <arm/imx/imx23_apbdmavar.h>
44 #include <arm/imx/imx23_apbhdmareg.h>
45 #include <arm/imx/imx23_apbxdmareg.h>
46 #include <arm/imx/imx23var.h>
47 
48 static int	apbdma_match(device_t, cfdata_t, void *);
49 static void	apbdma_attach(device_t, device_t, void *);
50 static int	apbdma_activate(device_t, enum devact);
51 
52 CFATTACH_DECL3_NEW(apbdma,
53 	sizeof(struct apbdma_softc),
54 	apbdma_match,
55 	apbdma_attach,
56 	NULL,
57 	apbdma_activate,
58 	NULL,
59 	NULL,
60 	0);
61 
62 static void	apbdma_reset(struct apbdma_softc *);
63 static void	apbdma_init(struct apbdma_softc *);
64 
65 #define DMA_RD(sc, reg)							\
66 		bus_space_read_4(sc->sc_iot, sc->sc_ioh, (reg))
67 #define DMA_WR(sc, reg, val)						\
68 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, (reg), (val))
69 
70 #define APBDMA_SOFT_RST_LOOP 455 /* At least 1 us ... */
71 
72 static int
apbdma_match(device_t parent,cfdata_t match,void * aux)73 apbdma_match(device_t parent, cfdata_t match, void *aux)
74 {
75 	struct apb_attach_args *aa = aux;
76 
77 	if (aa->aa_addr == HW_APBHDMA_BASE && aa->aa_size == HW_APBHDMA_SIZE)
78 			return 1;
79 
80 	if (aa->aa_addr == HW_APBXDMA_BASE && aa->aa_size == HW_APBXDMA_SIZE)
81 			return 1;
82 
83 	return 0;
84 }
85 
86 static void
apbdma_attach(device_t parent,device_t self,void * aux)87 apbdma_attach(device_t parent, device_t self, void *aux)
88 {
89 	struct apb_attach_args *aa = aux;
90 	struct apbdma_softc *sc = device_private(self);
91 	struct apb_softc *sc_parent = device_private(parent);
92 	static u_int apbdma_attached = 0;
93 
94 	if ((strncmp(device_xname(parent), "apbh", 4) == 0) &&
95 	    (apbdma_attached & F_APBH_DMA))
96 		return;
97 	if ((strncmp(device_xname(parent), "apbx", 4) == 0) &&
98 	    (apbdma_attached & F_APBX_DMA))
99 		return;
100 
101 	sc->sc_dev = self;
102 	sc->sc_iot = aa->aa_iot;
103 	sc->sc_dmat = aa->aa_dmat;
104 
105 	if (bus_space_map(sc->sc_iot,
106 	    aa->aa_addr, aa->aa_size, 0, &sc->sc_ioh)) {
107 		aprint_error_dev(sc->sc_dev, "unable to map bus space\n");
108 		return;
109 	}
110 
111 	if (strncmp(device_xname(parent), "apbh", 4) == 0)
112 		sc->flags = F_APBH_DMA;
113 
114 	if (strncmp(device_xname(parent), "apbx", 4) == 0)
115 		sc->flags = F_APBX_DMA;
116 
117 	apbdma_reset(sc);
118 	apbdma_init(sc);
119 
120 	if (sc->flags & F_APBH_DMA)
121 		apbdma_attached |= F_APBH_DMA;
122 	if (sc->flags & F_APBX_DMA)
123 		apbdma_attached |= F_APBX_DMA;
124 
125 	sc_parent->dmac = self;
126 
127 	/* Initialize mutex to control concurrent access from the drivers. */
128 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
129 
130 	if (sc->flags & F_APBH_DMA)
131 		aprint_normal(": APBH DMA\n");
132 	else if (sc->flags & F_APBX_DMA)
133 		aprint_normal(": APBX DMA\n");
134 	else
135 		panic("dma flag missing!\n");
136 
137 	return;
138 }
139 
140 static int
apbdma_activate(device_t self,enum devact act)141 apbdma_activate(device_t self, enum devact act)
142 {
143 	return EOPNOTSUPP;
144 }
145 
146 /*
147  * Reset the APB{H,X}DMA block.
148  *
149  * Inspired by i.MX23 RM "39.3.10 Correct Way to Soft Reset a Block"
150  */
151 static void
apbdma_reset(struct apbdma_softc * sc)152 apbdma_reset(struct apbdma_softc *sc)
153 {
154 	unsigned int loop;
155 
156 	/*
157 	 * Prepare for soft-reset by making sure that SFTRST is not currently
158 	 * asserted. Also clear CLKGATE so we can wait for its assertion below.
159 	 */
160 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_SFTRST);
161 
162 	/* Wait at least a microsecond for SFTRST to deassert. */
163 	loop = 0;
164 	while ((DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_SFTRST) ||
165 	    (loop < APBDMA_SOFT_RST_LOOP))
166 		loop++;
167 
168 	/* Clear CLKGATE so we can wait for its assertion below. */
169 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_CLKGATE);
170 
171 	/* Soft-reset the block. */
172 	DMA_WR(sc, HW_APB_CTRL0_SET, HW_APB_CTRL0_SFTRST);
173 
174 	/* Wait until clock is in the gated state. */
175 	while (!(DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_CLKGATE));
176 
177 	/* Bring block out of reset. */
178 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_SFTRST);
179 
180 	loop = 0;
181 	while ((DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_SFTRST) ||
182 	    (loop < APBDMA_SOFT_RST_LOOP))
183 		loop++;
184 
185 	DMA_WR(sc, HW_APB_CTRL0_CLR, HW_APB_CTRL0_CLKGATE);
186 
187 	/* Wait until clock is in the NON-gated state. */
188 	while (DMA_RD(sc, HW_APB_CTRL0) & HW_APB_CTRL0_CLKGATE);
189 
190 	return;
191 }
192 
193 /*
194  * Initialize APB{H,X}DMA block.
195  */
196 static void
apbdma_init(struct apbdma_softc * sc)197 apbdma_init(struct apbdma_softc *sc)
198 {
199 
200 	if (sc->flags & F_APBH_DMA) {
201 		DMA_WR(sc, HW_APBH_CTRL0_SET, HW_APBH_CTRL0_AHB_BURST8_EN);
202 		DMA_WR(sc, HW_APBH_CTRL0_SET, HW_APBH_CTRL0_APB_BURST4_EN);
203 	}
204 	return;
205 }
206 
207 /*
208  * Chain DMA commands together.
209  *
210  * Set src->next point to trg's physical DMA mapped address.
211  */
212 void
apbdma_cmd_chain(apbdma_command_t src,apbdma_command_t trg,void * buf,bus_dmamap_t dmap)213 apbdma_cmd_chain(apbdma_command_t src, apbdma_command_t trg, void *buf,
214     bus_dmamap_t dmap)
215 {
216 	int i;
217 	bus_size_t daddr;
218 	bus_addr_t trg_offset;
219 
220 	trg_offset = (bus_addr_t)trg - (bus_addr_t)buf;
221 	daddr = 0;
222 
223 	for (i = 0; i < dmap->dm_nsegs; i++) {
224 		daddr += dmap->dm_segs[i].ds_len;
225 		if (trg_offset < daddr) {
226 			src->next = (void *)(dmap->dm_segs[i].ds_addr +
227 			    (trg_offset - (daddr - dmap->dm_segs[i].ds_len)));
228 			break;
229 		}
230 	}
231 
232 	return;
233 }
234 
235 /*
236  * Set DMA command buffer.
237  *
238  * Set cmd->buffer point to physical DMA address at offset in DMA map.
239  */
240 void
apbdma_cmd_buf(apbdma_command_t cmd,bus_addr_t offset,bus_dmamap_t dmap)241 apbdma_cmd_buf(apbdma_command_t cmd, bus_addr_t offset, bus_dmamap_t dmap)
242 {
243 	int i;
244 	bus_size_t daddr;
245 
246 	daddr = 0;
247 
248 	for (i = 0; i < dmap->dm_nsegs; i++) {
249 		daddr += dmap->dm_segs[i].ds_len;
250 		if (offset < daddr) {
251 			cmd->buffer = (void *)(dmap->dm_segs[i].ds_addr +
252 			    (offset - (daddr - dmap->dm_segs[i].ds_len)));
253 			break;
254 		}
255 	}
256 
257 	return;
258 }
259 
260 /*
261  * Initialize DMA channel.
262  */
263 void
apbdma_chan_init(struct apbdma_softc * sc,unsigned int channel)264 apbdma_chan_init(struct apbdma_softc *sc, unsigned int channel)
265 {
266 
267 	mutex_enter(&sc->sc_lock);
268 
269 	/* Enable CMDCMPLT_IRQ. */
270 	DMA_WR(sc, HW_APB_CTRL1_SET, (1<<channel)<<16);
271 
272 	mutex_exit(&sc->sc_lock);
273 
274 	return;
275 }
276 
277 /*
278  * Set command chain for DMA channel.
279  */
280 #define HW_APB_CHN_NXTCMDAR(base, channel)	(base + (0x70 * channel))
281 void
apbdma_chan_set_chain(struct apbdma_softc * sc,unsigned int channel,bus_dmamap_t dmap)282 apbdma_chan_set_chain(struct apbdma_softc *sc, unsigned int channel,
283 	bus_dmamap_t dmap)
284 {
285 	uint32_t reg;
286 
287 	if (sc->flags & F_APBH_DMA)
288 		reg = HW_APB_CHN_NXTCMDAR(HW_APBH_CH0_NXTCMDAR, channel);
289 	else
290 		reg = HW_APB_CHN_NXTCMDAR(HW_APBX_CH0_NXTCMDAR, channel);
291 
292 	mutex_enter(&sc->sc_lock);
293 	DMA_WR(sc, reg, dmap->dm_segs[0].ds_addr);
294 	mutex_exit(&sc->sc_lock);
295 
296 	return;
297 }
298 
299 /*
300  * Initiate DMA transfer.
301  */
302 #define HW_APB_CHN_SEMA(base, channel)	(base + (0x70 * channel))
303 void
apbdma_run(struct apbdma_softc * sc,unsigned int channel)304 apbdma_run(struct apbdma_softc *sc, unsigned int channel)
305 {
306 	uint32_t reg;
307 	uint8_t val;
308 
309 	if (sc->flags & F_APBH_DMA) {
310 		reg = HW_APB_CHN_SEMA(HW_APBH_CH0_SEMA, channel);
311 		val = __SHIFTIN(1, HW_APBH_CH0_SEMA_INCREMENT_SEMA);
312 	 } else {
313 		reg = HW_APB_CHN_SEMA(HW_APBX_CH0_SEMA, channel);
314 		val = __SHIFTIN(1, HW_APBX_CH0_SEMA_INCREMENT_SEMA);
315 	}
316 
317 	mutex_enter(&sc->sc_lock);
318 	DMA_WR(sc, reg, val);
319 	mutex_exit(&sc->sc_lock);
320 
321 	return;
322 }
323 
324 /*
325  * Acknowledge command complete IRQ.
326  */
327 void
apbdma_ack_intr(struct apbdma_softc * sc,unsigned int channel)328 apbdma_ack_intr(struct apbdma_softc *sc, unsigned int channel)
329 {
330 
331 	mutex_enter(&sc->sc_lock);
332 	if (sc->flags & F_APBH_DMA) {
333 		DMA_WR(sc, HW_APB_CTRL1_CLR, (1<<channel));
334 	} else {
335 		DMA_WR(sc, HW_APB_CTRL1_CLR, (1<<channel));
336 	}
337 	mutex_exit(&sc->sc_lock);
338 
339 	return;
340 }
341 
342 /*
343  * Acknowledge error IRQ.
344  */
345 void
apbdma_ack_error_intr(struct apbdma_softc * sc,unsigned int channel)346 apbdma_ack_error_intr(struct apbdma_softc *sc, unsigned int channel)
347 {
348 
349 	mutex_enter(&sc->sc_lock);
350 	DMA_WR(sc, HW_APB_CTRL2_CLR, (1<<channel));
351 	mutex_exit(&sc->sc_lock);
352 
353 	return;
354 }
355 
356 /*
357  * Return reason for the IRQ.
358  */
359 unsigned int
apbdma_intr_status(struct apbdma_softc * sc,unsigned int channel)360 apbdma_intr_status(struct apbdma_softc *sc, unsigned int channel)
361 {
362 	unsigned int reason;
363 
364 	reason = 0;
365 
366 	mutex_enter(&sc->sc_lock);
367 
368 	/* Check if this was command complete IRQ. */
369 	if (DMA_RD(sc, HW_APB_CTRL1) & (1<<channel))
370 		reason = DMA_IRQ_CMDCMPLT;
371 
372 	/* Check if error was set. */
373 	if (DMA_RD(sc, HW_APB_CTRL2) & (1<<channel)) {
374 		if (DMA_RD(sc, HW_APB_CTRL2) & (1<<channel)<<16)
375 			reason = DMA_IRQ_BUS_ERROR;
376 		else
377 			reason = DMA_IRQ_TERM;
378 	}
379 
380 	mutex_exit(&sc->sc_lock);
381 
382 	return reason;
383 }
384 
385 /*
386  * Reset DMA channel.
387  * Use only for devices on APBH bus.
388  */
389 void
apbdma_chan_reset(struct apbdma_softc * sc,unsigned int channel)390 apbdma_chan_reset(struct apbdma_softc *sc, unsigned int channel)
391 {
392 
393 	mutex_enter(&sc->sc_lock);
394 
395 	if (sc->flags & F_APBH_DMA) {
396 		DMA_WR(sc, HW_APB_CTRL0_SET,
397 		    __SHIFTIN((1<<channel), HW_APBH_CTRL0_RESET_CHANNEL));
398 		while(DMA_RD(sc, HW_APB_CTRL0) & HW_APBH_CTRL0_RESET_CHANNEL);
399 	} else {
400 		DMA_WR(sc, HW_APBX_CHANNEL_CTRL_SET,
401 			__SHIFTIN((1<<channel), HW_APBH_CTRL0_RESET_CHANNEL));
402 		while(DMA_RD(sc, HW_APBX_CHANNEL_CTRL) & (1<<channel));
403 	}
404 
405 	mutex_exit(&sc->sc_lock);
406 
407 	return;
408 }
409 
410 void
apbdma_wait(struct apbdma_softc * sc,unsigned int channel)411 apbdma_wait(struct apbdma_softc *sc, unsigned int channel)
412 {
413 
414 	mutex_enter(&sc->sc_lock);
415 
416 	if (sc->flags & F_APBH_DMA) {
417 		while (DMA_RD(sc, HW_APB_CHN_SEMA(HW_APBH_CH0_SEMA, channel)) & HW_APBH_CH0_SEMA_PHORE)
418 			;
419 	 } else {
420 		while (DMA_RD(sc, HW_APB_CHN_SEMA(HW_APBX_CH0_SEMA, channel)) & HW_APBX_CH0_SEMA_PHORE)
421 			;
422 	}
423 
424 	mutex_exit(&sc->sc_lock);
425 }
426