xref: /netbsd-src/sys/arch/arm/xscale/pxa2x0_i2s.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: pxa2x0_i2s.c,v 1.12 2017/06/01 02:45:06 chs Exp $	*/
2 /*	$OpenBSD: pxa2x0_i2s.c,v 1.7 2006/04/04 11:45:40 pascoe Exp $	*/
3 
4 /*
5  * Copyright (c) 2005 Christopher Pascoe <pascoe@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: pxa2x0_i2s.c,v 1.12 2017/06/01 02:45:06 chs Exp $");
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/kmem.h>
27 #include <sys/bus.h>
28 
29 #include <arm/xscale/pxa2x0reg.h>
30 #include <arm/xscale/pxa2x0var.h>
31 #include <arm/xscale/pxa2x0_gpio.h>
32 #include <arm/xscale/pxa2x0_i2s.h>
33 #include <arm/xscale/pxa2x0_dmac.h>
34 
35 struct pxa2x0_i2s_dma {
36 	struct pxa2x0_i2s_dma *next;
37 	void *addr;
38 	size_t size;
39 	bus_dmamap_t map;
40 #define	I2S_N_SEGS	1
41 	bus_dma_segment_t segs[I2S_N_SEGS];
42 	int nsegs;
43 	struct dmac_xfer *dx;
44 };
45 
46 static void pxa2x0_i2s_dmac_ointr(struct dmac_xfer *, int);
47 static void pxa2x0_i2s_dmac_iintr(struct dmac_xfer *, int);
48 
49 void
50 pxa2x0_i2s_init(struct pxa2x0_i2s_softc *sc)
51 {
52 
53 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0, SACR0_RST);
54 	delay(100);
55 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0,
56 	    SACR0_BCKD | SACR0_SET_TFTH(7) | SACR0_SET_RFTH(7));
57 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR1, 0);
58 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADR, 0);
59 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADIV, sc->sc_sadiv);
60 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SACR0,
61 		SACR0_BCKD | SACR0_SET_TFTH(7) | SACR0_SET_RFTH(7) | SACR0_ENB);
62 }
63 
64 int
65 pxa2x0_i2s_attach_sub(struct pxa2x0_i2s_softc *sc)
66 {
67 	int rv;
68 
69 	KASSERT(sc->sc_intr_lock != NULL);
70 
71 	rv = bus_space_map(sc->sc_iot, PXA2X0_I2S_BASE, PXA2X0_I2S_SIZE, 0,
72 	    &sc->sc_ioh);
73 	if (rv) {
74 		sc->sc_size = 0;
75 		return 1;
76 	}
77 
78 	sc->sc_dr.ds_addr = PXA2X0_I2S_BASE + I2S_SADR;
79 	sc->sc_dr.ds_len = 4;
80 
81 	sc->sc_sadiv = SADIV_3_058MHz;
82 
83 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_size,
84 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
85 
86 	pxa2x0_i2s_init(sc);
87 
88 	return 0;
89 }
90 
91 void
92 pxa2x0_i2s_open(struct pxa2x0_i2s_softc *sc)
93 {
94 
95 	if (sc->sc_open++ == 0) {
96 		pxa2x0_clkman_config(CKEN_I2S, 1);
97 	}
98 }
99 
100 void
101 pxa2x0_i2s_close(struct pxa2x0_i2s_softc *sc)
102 {
103 
104 	if (--sc->sc_open == 0) {
105 		pxa2x0_clkman_config(CKEN_I2S, 0);
106 	}
107 }
108 
109 int
110 pxa2x0_i2s_detach_sub(struct pxa2x0_i2s_softc *sc)
111 {
112 
113 	if (sc->sc_size > 0) {
114 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size);
115 		sc->sc_size = 0;
116 	}
117 	pxa2x0_clkman_config(CKEN_I2S, 0);
118 
119 	return 0;
120 }
121 
122 void
123 pxa2x0_i2s_write(struct pxa2x0_i2s_softc *sc, uint32_t data)
124 {
125 
126 	if (sc->sc_open == 0)
127 		return;
128 
129 	/* Clear intr and underrun bit if set. */
130 	if (bus_space_read_4(sc->sc_iot, sc->sc_ioh, I2S_SASR0) & SASR0_TUR)
131 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SAICR, SAICR_TUR);
132 
133 	/* Wait for transmit fifo to have space. */
134 	while ((bus_space_read_4(sc->sc_iot, sc->sc_ioh, I2S_SASR0) & SASR0_TNF)
135 	     == 0)
136 		continue;	/* nothing */
137 
138 	/* Queue data */
139 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADR, data);
140 }
141 
142 void
143 pxa2x0_i2s_setspeed(struct pxa2x0_i2s_softc *sc, u_int *argp)
144 {
145 	/*
146 	 * The available speeds are in the following table.
147 	 * Keep the speeds in increasing order.
148 	 */
149 	static const struct speed_struct {
150 		int	speed;
151 		int	div;
152 	} speed_table[] = {
153 		{8000,	SADIV_513_25kHz},
154 		{11025,	SADIV_702_75kHz},
155 		{16000,	SADIV_1_026MHz},
156 		{22050,	SADIV_1_405MHz},
157 		{44100,	SADIV_2_836MHz},
158 		{48000,	SADIV_3_058MHz},
159 	};
160 	const int n = (int)__arraycount(speed_table);
161 	u_int arg = (u_int)*argp;
162 	int selected = -1;
163 	int i;
164 
165 	if (arg < speed_table[0].speed)
166 		selected = 0;
167 	if (arg > speed_table[n - 1].speed)
168 		selected = n - 1;
169 
170 	for (i = 1; selected == -1 && i < n; i++) {
171 		if (speed_table[i].speed == arg)
172 			selected = i;
173 		else if (speed_table[i].speed > arg) {
174 			int diff1, diff2;
175 
176 			diff1 = arg - speed_table[i - 1].speed;
177 			diff2 = speed_table[i].speed - arg;
178 			if (diff1 < diff2)
179 				selected = i - 1;
180 			else
181 				selected = i;
182 		}
183 	}
184 
185 	if (selected == -1)
186 		selected = 0;
187 
188 	*argp = speed_table[selected].speed;
189 
190 	sc->sc_sadiv = speed_table[selected].div;
191 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, I2S_SADIV, sc->sc_sadiv);
192 }
193 
194 void *
195 pxa2x0_i2s_allocm(void *hdl, int direction, size_t size)
196 {
197 	struct pxa2x0_i2s_softc *sc = hdl;
198 	struct pxa2x0_i2s_dma *p;
199 	struct dmac_xfer *dx;
200 	int error;
201 
202 	p = kmem_alloc(sizeof(*p), KM_SLEEP);
203 
204 	dx = pxa2x0_dmac_allocate_xfer();
205 	if (dx == NULL) {
206 		goto fail_alloc;
207 	}
208 	p->dx = dx;
209 
210 	p->size = size;
211 	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, NBPG, 0, p->segs,
212 	    I2S_N_SEGS, &p->nsegs, BUS_DMA_WAITOK)) != 0) {
213 		goto fail_xfer;
214 	}
215 
216 	if ((error = bus_dmamem_map(sc->sc_dmat, p->segs, p->nsegs, size,
217 	    &p->addr, BUS_DMA_WAITOK | BUS_DMA_COHERENT)) != 0) {
218 		goto fail_map;
219 	}
220 
221 	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
222 	    BUS_DMA_WAITOK, &p->map)) != 0) {
223 		goto fail_create;
224 	}
225 
226 	if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
227 	    BUS_DMA_WAITOK)) != 0) {
228 		goto fail_load;
229 	}
230 
231 	dx->dx_cookie = sc;
232 	dx->dx_priority = DMAC_PRIORITY_NORMAL;
233 	dx->dx_dev_width = DMAC_DEV_WIDTH_4;
234 	dx->dx_burst_size = DMAC_BURST_SIZE_32;
235 
236 	p->next = sc->sc_dmas;
237 	sc->sc_dmas = p;
238 
239 	return p->addr;
240 
241 fail_load:
242 	bus_dmamap_destroy(sc->sc_dmat, p->map);
243 fail_create:
244 	bus_dmamem_unmap(sc->sc_dmat, p->addr, size);
245 fail_map:
246 	bus_dmamem_free(sc->sc_dmat, p->segs, p->nsegs);
247 fail_xfer:
248 	pxa2x0_dmac_free_xfer(dx);
249 fail_alloc:
250 	kmem_free(p, sizeof(*p));
251 	return NULL;
252 }
253 
254 void
255 pxa2x0_i2s_freem(void *hdl, void *ptr, size_t size)
256 {
257 	struct pxa2x0_i2s_softc *sc = hdl;
258 	struct pxa2x0_i2s_dma **pp, *p;
259 
260 	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
261 		if (p->addr == ptr) {
262 			pxa2x0_dmac_abort_xfer(p->dx);
263 			pxa2x0_dmac_free_xfer(p->dx);
264 			p->segs[0].ds_len = p->size;	/* XXX */
265 			bus_dmamap_unload(sc->sc_dmat, p->map);
266 			bus_dmamap_destroy(sc->sc_dmat, p->map);
267 			bus_dmamem_unmap(sc->sc_dmat, p->addr, p->size);
268 			bus_dmamem_free(sc->sc_dmat, p->segs, p->nsegs);
269 
270 			*pp = p->next;
271 			kmem_free(p, sizeof(*p));
272 			return;
273 		}
274 	}
275 	panic("pxa2x0_i2s_freem: trying to free unallocated memory");
276 }
277 
278 paddr_t
279 pxa2x0_i2s_mappage(void *hdl, void *mem, off_t off, int prot)
280 {
281 	struct pxa2x0_i2s_softc *sc = hdl;
282 	struct pxa2x0_i2s_dma *p;
283 
284 	if (off < 0)
285 		return -1;
286 
287 	for (p = sc->sc_dmas; p && p->addr != mem; p = p->next)
288 		continue;
289 	if (p == NULL)
290 		return -1;
291 
292 	if (off > p->size)
293 		return -1;
294 
295 	return bus_dmamem_mmap(sc->sc_dmat, p->segs, p->nsegs, off, prot,
296 	    BUS_DMA_WAITOK);
297 }
298 
299 int
300 pxa2x0_i2s_round_blocksize(void *hdl, int bs, int mode,
301     const struct audio_params *param)
302 {
303 
304 	/* Enforce individual DMA block size limit */
305 	if (bs > DCMD_LENGTH_MASK)
306 		return (DCMD_LENGTH_MASK & ~0x07);
307 
308 	return (bs + 0x07) & ~0x07;	/* XXX: 64-bit multiples */
309 }
310 
311 size_t
312 pxa2x0_i2s_round_buffersize(void *hdl, int direction, size_t bufsize)
313 {
314 
315 	return bufsize;
316 }
317 
318 int
319 pxa2x0_i2s_halt_output(void *hdl)
320 {
321 	struct pxa2x0_i2s_softc *sc = hdl;
322 
323 	if (sc->sc_txdma) {
324 		pxa2x0_dmac_abort_xfer(sc->sc_txdma->dx);
325 		sc->sc_txdma = NULL;
326 	}
327 
328 	return 0;
329 }
330 
331 int
332 pxa2x0_i2s_halt_input(void *hdl)
333 {
334 	struct pxa2x0_i2s_softc *sc = hdl;
335 
336 	if (sc->sc_rxdma) {
337 		pxa2x0_dmac_abort_xfer(sc->sc_rxdma->dx);
338 		sc->sc_rxdma = NULL;
339 	}
340 
341 	return 0;
342 }
343 
344 int
345 pxa2x0_i2s_start_output(void *hdl, void *block, int bsize,
346     void (*tx_func)(void *), void *tx_arg)
347 {
348 	struct pxa2x0_i2s_softc *sc = hdl;
349 	struct pxa2x0_i2s_dma *p;
350 	struct dmac_xfer *dx;
351 
352 	if (sc->sc_txdma)
353 		return EBUSY;
354 
355 	/* Find mapping which contains block completely */
356 	for (p = sc->sc_dmas;
357 	     p != NULL &&
358 	       (((char*)block < (char *)p->addr) ||
359 	        ((char *)block + bsize > (char *)p->addr + p->size));
360 	     p = p->next) {
361 		continue;	/* Nothing */
362 	}
363 	if (p == NULL) {
364 		aprint_error("pxa2x0_i2s_start_output: "
365 		    "request with bad start address: %p, size: %d\n",
366 		    block, bsize);
367 		return ENXIO;
368 	}
369 	sc->sc_txdma = p;
370 
371 	p->segs[0].ds_addr = p->map->dm_segs[0].ds_addr +
372 	                         ((char *)block - (char *)p->addr);
373 	p->segs[0].ds_len = bsize;
374 
375 	dx = p->dx;
376 	dx->dx_done = pxa2x0_i2s_dmac_ointr;
377 	dx->dx_peripheral = DMAC_PERIPH_I2STX;
378 	dx->dx_flow = DMAC_FLOW_CTRL_DEST;
379 	dx->dx_loop_notify = DMAC_DONT_LOOP;
380 	dx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = false;
381 	dx->dx_desc[DMAC_DESC_SRC].xd_nsegs = p->nsegs;
382 	dx->dx_desc[DMAC_DESC_SRC].xd_dma_segs = p->segs;
383 	dx->dx_desc[DMAC_DESC_DST].xd_addr_hold = true;
384 	dx->dx_desc[DMAC_DESC_DST].xd_nsegs = 1;
385 	dx->dx_desc[DMAC_DESC_DST].xd_dma_segs = &sc->sc_dr;
386 
387 	sc->sc_txfunc = tx_func;
388 	sc->sc_txarg = tx_arg;
389 
390 	/* Start DMA */
391 	return pxa2x0_dmac_start_xfer(dx);
392 }
393 
394 int
395 pxa2x0_i2s_start_input(void *hdl, void *block, int bsize,
396     void (*rx_func)(void *), void *rx_arg)
397 {
398 	struct pxa2x0_i2s_softc *sc = hdl;
399 	struct pxa2x0_i2s_dma *p;
400 	struct dmac_xfer *dx;
401 
402 	if (sc->sc_rxdma)
403 		return EBUSY;
404 
405 	/* Find mapping which contains block completely */
406 	for (p = sc->sc_dmas;
407 	     p != NULL &&
408 	       (((char*)block < (char *)p->addr) ||
409 	        ((char *)block + bsize > (char *)p->addr + p->size));
410 	     p = p->next) {
411 		continue;	/* Nothing */
412 	}
413 	if (p == NULL) {
414 		aprint_error("pxa2x0_i2s_start_input: "
415 		    "request with bad start address: %p, size: %d\n",
416 		    block, bsize);
417 		return ENXIO;
418 	}
419 	sc->sc_rxdma = p;
420 
421 	p->segs[0].ds_addr = p->map->dm_segs[0].ds_addr +
422 	                         ((char *)block - (char *)p->addr);
423 	p->segs[0].ds_len = bsize;
424 
425 	dx = p->dx;
426 	dx->dx_done = pxa2x0_i2s_dmac_iintr;
427 	dx->dx_peripheral = DMAC_PERIPH_I2SRX;
428 	dx->dx_flow = DMAC_FLOW_CTRL_SRC;
429 	dx->dx_loop_notify = DMAC_DONT_LOOP;
430 	dx->dx_desc[DMAC_DESC_SRC].xd_addr_hold = true;
431 	dx->dx_desc[DMAC_DESC_SRC].xd_nsegs = 1;
432 	dx->dx_desc[DMAC_DESC_SRC].xd_dma_segs = &sc->sc_dr;
433 	dx->dx_desc[DMAC_DESC_DST].xd_addr_hold = false;
434 	dx->dx_desc[DMAC_DESC_DST].xd_nsegs = p->nsegs;
435 	dx->dx_desc[DMAC_DESC_DST].xd_dma_segs = p->segs;
436 
437 	sc->sc_rxfunc = rx_func;
438 	sc->sc_rxarg = rx_arg;
439 
440 	/* Start DMA */
441 	return pxa2x0_dmac_start_xfer(dx);
442 }
443 
444 static void
445 pxa2x0_i2s_dmac_ointr(struct dmac_xfer *dx, int status)
446 {
447 	struct pxa2x0_i2s_softc *sc = dx->dx_cookie;
448 
449 	if (sc->sc_txdma == NULL) {
450 		panic("pxa2x_i2s_dmac_ointr: bad TX DMA descriptor!");
451 	}
452 	if (sc->sc_txdma->dx != dx) {
453 		panic("pxa2x_i2s_dmac_ointr: xfer mismatch!");
454 	}
455 	sc->sc_txdma = NULL;
456 
457 	if (status) {
458 		aprint_error("pxa2x0_i2s_dmac_ointr: "
459 		    "non-zero completion status %d\n", status);
460 	}
461 
462 	mutex_spin_enter(sc->sc_intr_lock);
463 	(sc->sc_txfunc)(sc->sc_txarg);
464 	mutex_spin_exit(sc->sc_intr_lock);
465 }
466 
467 static void
468 pxa2x0_i2s_dmac_iintr(struct dmac_xfer *dx, int status)
469 {
470 	struct pxa2x0_i2s_softc *sc = dx->dx_cookie;
471 
472 	if (sc->sc_rxdma == NULL) {
473 		panic("pxa2x_i2s_dmac_iintr: bad RX DMA descriptor!");
474 	}
475 	if (sc->sc_rxdma->dx != dx) {
476 		panic("pxa2x_i2s_dmac_iintr: xfer mismatch!");
477 	}
478 	sc->sc_rxdma = NULL;
479 
480 	if (status) {
481 		aprint_error("pxa2x0_i2s_dmac_iintr: "
482 		    "non-zero completion status %d\n", status);
483 	}
484 
485 
486 	mutex_spin_enter(sc->sc_intr_lock);
487 	(sc->sc_rxfunc)(sc->sc_rxarg);
488 	mutex_spin_exit(sc->sc_intr_lock);
489 }
490