xref: /netbsd-src/sys/arch/arm/xscale/pxa2x0_i2s.c (revision e622eac459adf11c2e710d7a4de0f05510bbbe61)
1 /*	$NetBSD: pxa2x0_i2s.c,v 1.13 2019/05/08 13:40:14 isaki Exp $	*/
2 /*	$OpenBSD: pxa2x0_i2s.c,v 1.7 2006/04/04 11:45:40 pascoe Exp $	*/
3 
4 /*
5  * Copyright (c) 2005 Christopher Pascoe <pascoe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: pxa2x0_i2s.c,v 1.13 2019/05/08 13:40:14 isaki Exp $");
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/kmem.h>
27 #include <sys/bus.h>
28 
29 #include <arm/xscale/pxa2x0reg.h>
30 #include <arm/xscale/pxa2x0var.h>
31 #include <arm/xscale/pxa2x0_gpio.h>
32 #include <arm/xscale/pxa2x0_i2s.h>
33 #include <arm/xscale/pxa2x0_dmac.h>
34 
35 struct pxa2x0_i2s_dma {
36 	struct pxa2x0_i2s_dma *next;
37 	void *addr;
38 	size_t size;
39 	bus_dmamap_t map;
40 #define	I2S_N_SEGS	1
41 	bus_dma_segment_t segs[I2S_N_SEGS];
42 	int nsegs;
43 	struct dmac_xfer *dx;
44 };
45 
46 static void pxa2x0_i2s_dmac_ointr(struct dmac_xfer *, int);
47 static void pxa2x0_i2s_dmac_iintr(struct dmac_xfer *, int);
48 
49 void
pxa2x0_i2s_init(struct pxa2x0_i2s_softc * sc)50 pxa2x0_i2s_init(struct pxa2x0_i2s_softc *sc)
51 {
52 
53 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0, SACR0_RST);
54 	delay(100);
55 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0,
56 	    SACR0_BCKD | SACR0_SET_TFTH(7) | SACR0_SET_RFTH(7));
57 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR1, 0);
58 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADR, 0);
59 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADIV, sc->sc_sadiv);
60 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0,
61 		SACR0_BCKD | SACR0_SET_TFTH(7) | SACR0_SET_RFTH(7) | SACR0_ENB);
62 }
63 
64 int
pxa2x0_i2s_attach_sub(struct pxa2x0_i2s_softc * sc)65 pxa2x0_i2s_attach_sub(struct pxa2x0_i2s_softc *sc)
66 {
67 	int rv;
68 
69 	KASSERT(sc->sc_intr_lock != NULL);
70 
71 	rv = bus_space_map(sc->sc_iot, PXA2X0_I2S_BASE, PXA2X0_I2S_SIZE, 0,
72 	    &sc->sc_ioh);
73 	if (rv) {
74 		sc->sc_size = 0;
75 		return 1;
76 	}
77 
78 	sc->sc_dr.ds_addr = PXA2X0_I2S_BASE + I2S_SADR;
79 	sc->sc_dr.ds_len = 4;
80 
81 	sc->sc_sadiv = SADIV_3_058MHz;
82 
83 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_size,
84 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
85 
86 	pxa2x0_i2s_init(sc);
87 
88 	return 0;
89 }
90 
91 void
pxa2x0_i2s_open(struct pxa2x0_i2s_softc * sc)92 pxa2x0_i2s_open(struct pxa2x0_i2s_softc *sc)
93 {
94 
95 	if (sc->sc_open++ == 0) {
96 		pxa2x0_clkman_config(CKEN_I2S, 1);
97 	}
98 }
99 
100 void
pxa2x0_i2s_close(struct pxa2x0_i2s_softc * sc)101 pxa2x0_i2s_close(struct pxa2x0_i2s_softc *sc)
102 {
103 
104 	if (--sc->sc_open == 0) {
105 		pxa2x0_clkman_config(CKEN_I2S, 0);
106 	}
107 }
108 
109 int
pxa2x0_i2s_detach_sub(struct pxa2x0_i2s_softc * sc)110 pxa2x0_i2s_detach_sub(struct pxa2x0_i2s_softc *sc)
111 {
112 
113 	if (sc->sc_size > 0) {
114 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size);
115 		sc->sc_size = 0;
116 	}
117 	pxa2x0_clkman_config(CKEN_I2S, 0);
118 
119 	return 0;
120 }
121 
122 void
pxa2x0_i2s_write(struct pxa2x0_i2s_softc * sc,uint32_t data)123 pxa2x0_i2s_write(struct pxa2x0_i2s_softc *sc, uint32_t data)
124 {
125 
126 	if (sc->sc_open == 0)
127 		return;
128 
129 	/* Clear intr and underrun bit if set. */
130 	if (bus_space_read_4(sc->sc_iot, sc->sc_ioh, I2S_SASR0) & SASR0_TUR)
131 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SAICR, SAICR_TUR);
132 
133 	/* Wait for transmit fifo to have space. */
134 	while ((bus_space_read_4(sc->sc_iot, sc->sc_ioh, I2S_SASR0) & SASR0_TNF)
135 	     == 0)
136 		continue;	/* nothing */
137 
138 	/* Queue data */
139 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADR, data);
140 }
141 
142 void
pxa2x0_i2s_setspeed(struct pxa2x0_i2s_softc * sc,u_int arg)143 pxa2x0_i2s_setspeed(struct pxa2x0_i2s_softc *sc, u_int arg)
144 {
145 	/*
146 	 * The available speeds are in the following table.
147 	 */
148 	static const struct speed_struct {
149 		int	speed;
150 		int	div;
151 	} speed_table[] = {
152 		{8000,	SADIV_513_25kHz},
153 		{11025,	SADIV_702_75kHz},
154 		{16000,	SADIV_1_026MHz},
155 		{22050,	SADIV_1_405MHz},
156 		{44100,	SADIV_2_836MHz},
157 		{48000,	SADIV_3_058MHz},
158 	};
159 	const int n = (int)__arraycount(speed_table);
160 	int selected = -1;
161 	int i;
162 
163 	if (arg < speed_table[0].speed)
164 		selected = 0;
165 	if (arg > speed_table[n - 1].speed)
166 		selected = n - 1;
167 
168 	for (i = 0; selected == -1 && i < n; i++) {
169 		if (speed_table[i].speed == arg)
170 			selected = i;
171 	}
172 	KASSERT(selected != -1);
173 
174 	sc->sc_sadiv = speed_table[selected].div;
175 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADIV, sc->sc_sadiv);
176 }
177 
178 void *
pxa2x0_i2s_allocm(void * hdl,int direction,size_t size)179 pxa2x0_i2s_allocm(void *hdl, int direction, size_t size)
180 {
181 	struct pxa2x0_i2s_softc *sc = hdl;
182 	struct pxa2x0_i2s_dma *p;
183 	struct dmac_xfer *dx;
184 	int error;
185 
186 	p = kmem_alloc(sizeof(*p), KM_SLEEP);
187 
188 	dx = pxa2x0_dmac_allocate_xfer();
189 	if (dx == NULL) {
190 		goto fail_alloc;
191 	}
192 	p->dx = dx;
193 
194 	p->size = size;
195 	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, NBPG, 0, p->segs,
196 	    I2S_N_SEGS, &p->nsegs, BUS_DMA_WAITOK)) != 0) {
197 		goto fail_xfer;
198 	}
199 
200 	if ((error = bus_dmamem_map(sc->sc_dmat, p->segs, p->nsegs, size,
201 	    &p->addr, BUS_DMA_WAITOK | BUS_DMA_COHERENT)) != 0) {
202 		goto fail_map;
203 	}
204 
205 	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
206 	    BUS_DMA_WAITOK, &p->map)) != 0) {
207 		goto fail_create;
208 	}
209 
210 	if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
211 	    BUS_DMA_WAITOK)) != 0) {
212 		goto fail_load;
213 	}
214 
215 	dx->dx_cookie = sc;
216 	dx->dx_priority = DMAC_PRIORITY_NORMAL;
217 	dx->dx_dev_width = DMAC_DEV_WIDTH_4;
218 	dx->dx_burst_size = DMAC_BURST_SIZE_32;
219 
220 	p->next = sc->sc_dmas;
221 	sc->sc_dmas = p;
222 
223 	return p->addr;
224 
225 fail_load:
226 	bus_dmamap_destroy(sc->sc_dmat, p->map);
227 fail_create:
228 	bus_dmamem_unmap(sc->sc_dmat, p->addr, size);
229 fail_map:
230 	bus_dmamem_free(sc->sc_dmat, p->segs, p->nsegs);
231 fail_xfer:
232 	pxa2x0_dmac_free_xfer(dx);
233 fail_alloc:
234 	kmem_free(p, sizeof(*p));
235 	return NULL;
236 }
237 
238 void
pxa2x0_i2s_freem(void * hdl,void * ptr,size_t size)239 pxa2x0_i2s_freem(void *hdl, void *ptr, size_t size)
240 {
241 	struct pxa2x0_i2s_softc *sc = hdl;
242 	struct pxa2x0_i2s_dma **pp, *p;
243 
244 	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
245 		if (p->addr == ptr) {
246 			pxa2x0_dmac_abort_xfer(p->dx);
247 			pxa2x0_dmac_free_xfer(p->dx);
248 			p->segs[0].ds_len = p->size;	/* XXX */
249 			bus_dmamap_unload(sc->sc_dmat, p->map);
250 			bus_dmamap_destroy(sc->sc_dmat, p->map);
251 			bus_dmamem_unmap(sc->sc_dmat, p->addr, p->size);
252 			bus_dmamem_free(sc->sc_dmat, p->segs, p->nsegs);
253 
254 			*pp = p->next;
255 			kmem_free(p, sizeof(*p));
256 			return;
257 		}
258 	}
259 	panic("pxa2x0_i2s_freem: trying to free unallocated memory");
260 }
261 
262 int
pxa2x0_i2s_round_blocksize(void * hdl,int bs,int mode,const struct audio_params * param)263 pxa2x0_i2s_round_blocksize(void *hdl, int bs, int mode,
264     const struct audio_params *param)
265 {
266 
267 	/* Enforce individual DMA block size limit */
268 	if (bs > DCMD_LENGTH_MASK)
269 		return (DCMD_LENGTH_MASK & ~0x07);
270 
271 	return (bs + 0x07) & ~0x07;	/* XXX: 64-bit multiples */
272 }
273 
274 size_t
pxa2x0_i2s_round_buffersize(void * hdl,int direction,size_t bufsize)275 pxa2x0_i2s_round_buffersize(void *hdl, int direction, size_t bufsize)
276 {
277 
278 	return bufsize;
279 }
280 
281 int
pxa2x0_i2s_halt_output(void * hdl)282 pxa2x0_i2s_halt_output(void *hdl)
283 {
284 	struct pxa2x0_i2s_softc *sc = hdl;
285 
286 	if (sc->sc_txdma) {
287 		pxa2x0_dmac_abort_xfer(sc->sc_txdma->dx);
288 		sc->sc_txdma = NULL;
289 	}
290 
291 	return 0;
292 }
293 
294 int
pxa2x0_i2s_halt_input(void * hdl)295 pxa2x0_i2s_halt_input(void *hdl)
296 {
297 	struct pxa2x0_i2s_softc *sc = hdl;
298 
299 	if (sc->sc_rxdma) {
300 		pxa2x0_dmac_abort_xfer(sc->sc_rxdma->dx);
301 		sc->sc_rxdma = NULL;
302 	}
303 
304 	return 0;
305 }
306 
307 int
pxa2x0_i2s_start_output(void * hdl,void * block,int bsize,void (* tx_func)(void *),void * tx_arg)308 pxa2x0_i2s_start_output(void *hdl, void *block, int bsize,
309     void (*tx_func)(void *), void *tx_arg)
310 {
311 	struct pxa2x0_i2s_softc *sc = hdl;
312 	struct pxa2x0_i2s_dma *p;
313 	struct dmac_xfer *dx;
314 
315 	if (sc->sc_txdma)
316 		return EBUSY;
317 
318 	/* Find mapping which contains block completely */
319 	for (p = sc->sc_dmas;
320 	     p != NULL &&
321 	       (((char*)block < (char *)p->addr) ||
322 	        ((char *)block + bsize > (char *)p->addr + p->size));
323 	     p = p->next) {
324 		continue;	/* Nothing */
325 	}
326 	if (p == NULL) {
327 		aprint_error("pxa2x0_i2s_start_output: "
328 		    "request with bad start address: %p, size: %d\n",
329 		    block, bsize);
330 		return ENXIO;
331 	}
332 	sc->sc_txdma = p;
333 
334 	p->segs[0].ds_addr = p->map->dm_segs[0].ds_addr +
335 	                         ((char *)block - (char *)p->addr);
336 	p->segs[0].ds_len = bsize;
337 
338 	dx = p->dx;
339 	dx->dx_done = pxa2x0_i2s_dmac_ointr;
340 	dx->dx_peripheral = DMAC_PERIPH_I2STX;
341 	dx->dx_flow = DMAC_FLOW_CTRL_DEST;
342 	dx->dx_loop_notify = DMAC_DONT_LOOP;
343 	dx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = false;
344 	dx->dx_desc[DMAC_DESC_SRC].xd_nsegs = p->nsegs;
345 	dx->dx_desc[DMAC_DESC_SRC].xd_dma_segs = p->segs;
346 	dx->dx_desc[DMAC_DESC_DST].xd_addr_hold = true;
347 	dx->dx_desc[DMAC_DESC_DST].xd_nsegs = 1;
348 	dx->dx_desc[DMAC_DESC_DST].xd_dma_segs = &sc->sc_dr;
349 
350 	sc->sc_txfunc = tx_func;
351 	sc->sc_txarg = tx_arg;
352 
353 	/* Start DMA */
354 	return pxa2x0_dmac_start_xfer(dx);
355 }
356 
357 int
pxa2x0_i2s_start_input(void * hdl,void * block,int bsize,void (* rx_func)(void *),void * rx_arg)358 pxa2x0_i2s_start_input(void *hdl, void *block, int bsize,
359     void (*rx_func)(void *), void *rx_arg)
360 {
361 	struct pxa2x0_i2s_softc *sc = hdl;
362 	struct pxa2x0_i2s_dma *p;
363 	struct dmac_xfer *dx;
364 
365 	if (sc->sc_rxdma)
366 		return EBUSY;
367 
368 	/* Find mapping which contains block completely */
369 	for (p = sc->sc_dmas;
370 	     p != NULL &&
371 	       (((char*)block < (char *)p->addr) ||
372 	        ((char *)block + bsize > (char *)p->addr + p->size));
373 	     p = p->next) {
374 		continue;	/* Nothing */
375 	}
376 	if (p == NULL) {
377 		aprint_error("pxa2x0_i2s_start_input: "
378 		    "request with bad start address: %p, size: %d\n",
379 		    block, bsize);
380 		return ENXIO;
381 	}
382 	sc->sc_rxdma = p;
383 
384 	p->segs[0].ds_addr = p->map->dm_segs[0].ds_addr +
385 	                         ((char *)block - (char *)p->addr);
386 	p->segs[0].ds_len = bsize;
387 
388 	dx = p->dx;
389 	dx->dx_done = pxa2x0_i2s_dmac_iintr;
390 	dx->dx_peripheral = DMAC_PERIPH_I2SRX;
391 	dx->dx_flow = DMAC_FLOW_CTRL_SRC;
392 	dx->dx_loop_notify = DMAC_DONT_LOOP;
393 	dx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = true;
394 	dx->dx_desc[DMAC_DESC_SRC].xd_nsegs = 1;
395 	dx->dx_desc[DMAC_DESC_SRC].xd_dma_segs = &sc->sc_dr;
396 	dx->dx_desc[DMAC_DESC_DST].xd_addr_hold = false;
397 	dx->dx_desc[DMAC_DESC_DST].xd_nsegs = p->nsegs;
398 	dx->dx_desc[DMAC_DESC_DST].xd_dma_segs = p->segs;
399 
400 	sc->sc_rxfunc = rx_func;
401 	sc->sc_rxarg = rx_arg;
402 
403 	/* Start DMA */
404 	return pxa2x0_dmac_start_xfer(dx);
405 }
406 
407 static void
pxa2x0_i2s_dmac_ointr(struct dmac_xfer * dx,int status)408 pxa2x0_i2s_dmac_ointr(struct dmac_xfer *dx, int status)
409 {
410 	struct pxa2x0_i2s_softc *sc = dx->dx_cookie;
411 
412 	if (sc->sc_txdma == NULL) {
413 		panic("pxa2x_i2s_dmac_ointr: bad TX DMA descriptor!");
414 	}
415 	if (sc->sc_txdma->dx != dx) {
416 		panic("pxa2x_i2s_dmac_ointr: xfer mismatch!");
417 	}
418 	sc->sc_txdma = NULL;
419 
420 	if (status) {
421 		aprint_error("pxa2x0_i2s_dmac_ointr: "
422 		    "non-zero completion status %d\n", status);
423 	}
424 
425 	mutex_spin_enter(sc->sc_intr_lock);
426 	(sc->sc_txfunc)(sc->sc_txarg);
427 	mutex_spin_exit(sc->sc_intr_lock);
428 }
429 
430 static void
pxa2x0_i2s_dmac_iintr(struct dmac_xfer * dx,int status)431 pxa2x0_i2s_dmac_iintr(struct dmac_xfer *dx, int status)
432 {
433 	struct pxa2x0_i2s_softc *sc = dx->dx_cookie;
434 
435 	if (sc->sc_rxdma == NULL) {
436 		panic("pxa2x_i2s_dmac_iintr: bad RX DMA descriptor!");
437 	}
438 	if (sc->sc_rxdma->dx != dx) {
439 		panic("pxa2x_i2s_dmac_iintr: xfer mismatch!");
440 	}
441 	sc->sc_rxdma = NULL;
442 
443 	if (status) {
444 		aprint_error("pxa2x0_i2s_dmac_iintr: "
445 		    "non-zero completion status %d\n", status);
446 	}
447 
448 
449 	mutex_spin_enter(sc->sc_intr_lock);
450 	(sc->sc_rxfunc)(sc->sc_rxarg);
451 	mutex_spin_exit(sc->sc_intr_lock);
452 }
453