xref: /netbsd-src/sys/arch/arm/xscale/iopaau.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: iopaau.c,v 1.10 2003/04/29 01:07:31 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Common code for XScale-based I/O Processor Application Accelerator
40  * Unit support.
41  *
42  * The AAU provides a back-end for the dmover(9) facility.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: iopaau.c,v 1.10 2003/04/29 01:07:31 thorpej Exp $");
47 
48 #include <sys/param.h>
49 #include <sys/pool.h>
50 #include <sys/lock.h>
51 #include <sys/systm.h>
52 #include <sys/device.h>
53 #include <sys/uio.h>
54 
55 #include <uvm/uvm.h>
56 
57 #include <machine/bus.h>
58 
59 #include <arm/xscale/iopaaureg.h>
60 #include <arm/xscale/iopaauvar.h>
61 
62 #ifdef AAU_DEBUG
63 #define	DPRINTF(x)	printf x
64 #else
65 #define	DPRINTF(x)	/* nothing */
66 #endif
67 
68 static struct pool aau_desc_4_pool;
69 static struct pool aau_desc_8_pool;
70 
71 struct pool_cache iopaau_desc_4_cache;
72 struct pool_cache iopaau_desc_8_cache;
73 
74 /*
75  * iopaau_desc_ctor:
76  *
77  *	Constructor for all types of descriptors.
78  */
79 static int
80 iopaau_desc_ctor(void *arg, void *object, int flags)
81 {
82 	struct aau_desc_4 *d = object;
83 
84 	/*
85 	 * Cache the physical address of the hardware portion of
86 	 * the descriptor in the software portion of the descriptor
87 	 * for quick reference later.
88 	 */
89 	d->d_pa = vtophys((vaddr_t)d) + SYNC_DESC_4_OFFSET;
90 	KASSERT((d->d_pa & 31) == 0);
91 	return (0);
92 }
93 
94 /*
95  * iopaau_desc_free:
96  *
97  *	Free a chain of AAU descriptors.
98  */
99 void
100 iopaau_desc_free(struct pool_cache *dc, void *firstdesc)
101 {
102 	struct aau_desc_4 *d, *next;
103 
104 	for (d = firstdesc; d != NULL; d = next) {
105 		next = d->d_next;
106 		pool_cache_put(dc, d);
107 	}
108 }
109 
110 /*
111  * iopaau_start:
112  *
113  *	Start an AAU request.  Must be called at splbio().
114  */
115 static void
116 iopaau_start(struct iopaau_softc *sc)
117 {
118 	struct dmover_backend *dmb = &sc->sc_dmb;
119 	struct dmover_request *dreq;
120 	struct iopaau_function *af;
121 	int error;
122 
123 	for (;;) {
124 
125 		KASSERT(sc->sc_running == NULL);
126 
127 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
128 		if (dreq == NULL)
129 			return;
130 
131 		dmover_backend_remque(dmb, dreq);
132 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
133 
134 		sc->sc_running = dreq;
135 
136 		/* XXXUNLOCK */
137 
138 		af = dreq->dreq_assignment->das_algdesc->dad_data;
139 		error = (*af->af_setup)(sc, dreq);
140 
141 		/* XXXLOCK */
142 
143 		if (error) {
144 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
145 			dreq->dreq_error = error;
146 			sc->sc_running = NULL;
147 			/* XXXUNLOCK */
148 			dmover_done(dreq);
149 			/* XXXLOCK */
150 			continue;
151 		}
152 
153 #ifdef DIAGNOSTIC
154 		if (bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR) &
155 		    AAU_ASR_AAF)
156 			panic("iopaau_start: AAU already active");
157 #endif
158 
159 		DPRINTF(("%s: starting dreq %p\n", sc->sc_dev.dv_xname,
160 		    dreq));
161 
162 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ANDAR,
163 		    sc->sc_firstdesc_pa);
164 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR,
165 		    AAU_ACR_AAE);
166 
167 		break;
168 	}
169 }
170 
171 /*
172  * iopaau_finish:
173  *
174  *	Finish the current operation.  AAU must be stopped.
175  */
176 static void
177 iopaau_finish(struct iopaau_softc *sc)
178 {
179 	struct dmover_request *dreq = sc->sc_running;
180 	struct iopaau_function *af =
181 	    dreq->dreq_assignment->das_algdesc->dad_data;
182 	void *firstdesc = sc->sc_firstdesc;
183 	int i, ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
184 
185 	sc->sc_running = NULL;
186 
187 	/* If the function has inputs, unmap them. */
188 	for (i = 0; i < ninputs; i++) {
189 		bus_dmamap_sync(sc->sc_dmat, sc->sc_map_in[i], 0,
190 		    sc->sc_map_in[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
191 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
192 	}
193 
194 	/* Unload the output buffer DMA map. */
195 	bus_dmamap_sync(sc->sc_dmat, sc->sc_map_out, 0,
196 	    sc->sc_map_out->dm_mapsize, BUS_DMASYNC_POSTREAD);
197 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
198 
199 	/* Get the next transfer started. */
200 	iopaau_start(sc);
201 
202 	/* Now free descriptors for last transfer. */
203 	iopaau_desc_free(af->af_desc_cache, firstdesc);
204 
205 	dmover_done(dreq);
206 }
207 
208 /*
209  * iopaau_process:
210  *
211  *	Dmover back-end entry point.
212  */
213 void
214 iopaau_process(struct dmover_backend *dmb)
215 {
216 	struct iopaau_softc *sc = dmb->dmb_cookie;
217 	int s;
218 
219 	s = splbio();
220 	/* XXXLOCK */
221 
222 	if (sc->sc_running == NULL)
223 		iopaau_start(sc);
224 
225 	/* XXXUNLOCK */
226 	splx(s);
227 }
228 
229 /*
230  * iopaau_func_fill_immed_setup:
231  *
232  *	Common code shared by the zero and fillN setup routines.
233  */
234 static int
235 iopaau_func_fill_immed_setup(struct iopaau_softc *sc,
236     struct dmover_request *dreq, uint32_t immed)
237 {
238 	struct iopaau_function *af =
239 	    dreq->dreq_assignment->das_algdesc->dad_data;
240 	struct pool_cache *dc = af->af_desc_cache;
241 	bus_dmamap_t dmamap = sc->sc_map_out;
242 	uint32_t *prevpa;
243 	struct aau_desc_4 **prevp, *cur;
244 	int error, seg;
245 
246 	switch (dreq->dreq_outbuf_type) {
247 	case DMOVER_BUF_LINEAR:
248 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
249 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
250 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
251 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
252 		break;
253 
254 	case DMOVER_BUF_UIO:
255 	    {
256 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
257 
258 		if (uio->uio_rw != UIO_READ)
259 			return (EINVAL);
260 
261 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
262 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
263 		break;
264 	    }
265 
266 	default:
267 		error = EINVAL;
268 	}
269 
270 	if (__predict_false(error != 0))
271 		return (error);
272 
273 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
274 	    BUS_DMASYNC_PREREAD);
275 
276 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
277 	prevpa = &sc->sc_firstdesc_pa;
278 
279 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
280 		cur = pool_cache_get(dc, PR_NOWAIT);
281 		if (cur == NULL) {
282 			*prevp = NULL;
283 			error = ENOMEM;
284 			goto bad;
285 		}
286 
287 		*prevp = cur;
288 		*prevpa = cur->d_pa;
289 
290 		prevp = &cur->d_next;
291 		prevpa = &cur->d_nda;
292 
293 		/*
294 		 * We don't actually enforce the page alignment
295 		 * constraint, here, because there is only one
296 		 * data stream to worry about.
297 		 */
298 
299 		cur->d_sar[0] = immed;
300 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
301 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
302 		cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
303 		SYNC_DESC(cur, sizeof(struct aau_desc_4));
304 	}
305 
306 	*prevp = NULL;
307 	*prevpa = 0;
308 
309 	cur->d_dc |= AAU_DC_IE;
310 	SYNC_DESC(cur, sizeof(struct aau_desc_4));
311 
312 	sc->sc_lastdesc = cur;
313 
314 	return (0);
315 
316  bad:
317 	iopaau_desc_free(dc, sc->sc_firstdesc);
318 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
319 	sc->sc_firstdesc = NULL;
320 
321 	return (error);
322 }
323 
324 /*
325  * iopaau_func_zero_setup:
326  *
327  *	Setup routine for the "zero" function.
328  */
329 int
330 iopaau_func_zero_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
331 {
332 
333 	return (iopaau_func_fill_immed_setup(sc, dreq, 0));
334 }
335 
336 /*
337  * iopaau_func_fill8_setup:
338  *
339  *	Setup routine for the "fill8" function.
340  */
341 int
342 iopaau_func_fill8_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
343 {
344 
345 	return (iopaau_func_fill_immed_setup(sc, dreq,
346 	    dreq->dreq_immediate[0] |
347 	    (dreq->dreq_immediate[0] << 8) |
348 	    (dreq->dreq_immediate[0] << 16) |
349 	    (dreq->dreq_immediate[0] << 24)));
350 }
351 
352 /*
353  * Descriptor command words for varying numbers of inputs.  For 1 input,
354  * this does a copy.  For multiple inputs, we're doing an XOR.  In this
355  * case, the first block is a "direct fill" to load the store queue, and
356  * the remaining blocks are XOR'd to the store queue.
357  */
358 static const uint32_t iopaau_dc_inputs[] = {
359 	0,						/* 0 */
360 
361 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL),		/* 1 */
362 
363 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 2 */
364 	AAU_DC_B2_CC(AAU_DC_CC_XOR),
365 
366 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 3 */
367 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
368 	AAU_DC_B3_CC(AAU_DC_CC_XOR),
369 
370 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 4 */
371 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
372 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
373 	AAU_DC_B4_CC(AAU_DC_CC_XOR),
374 
375 	AAU_DC_SBCI_5_8|				/* 5 */
376 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
377 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
378 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
379 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
380 	AAU_DC_B5_CC(AAU_DC_CC_XOR),
381 
382 	AAU_DC_SBCI_5_8|				/* 6 */
383 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
384 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
385 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
386 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
387 	AAU_DC_B5_CC(AAU_DC_CC_XOR)|
388 	AAU_DC_B6_CC(AAU_DC_CC_XOR),
389 
390 	AAU_DC_SBCI_5_8|				/* 7 */
391 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
392 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
393 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
394 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
395 	AAU_DC_B5_CC(AAU_DC_CC_XOR)|
396 	AAU_DC_B6_CC(AAU_DC_CC_XOR)|
397 	AAU_DC_B7_CC(AAU_DC_CC_XOR),
398 
399 	AAU_DC_SBCI_5_8|				/* 8 */
400 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
401 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
402 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
403 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
404 	AAU_DC_B5_CC(AAU_DC_CC_XOR)|
405 	AAU_DC_B6_CC(AAU_DC_CC_XOR)|
406 	AAU_DC_B7_CC(AAU_DC_CC_XOR)|
407 	AAU_DC_B8_CC(AAU_DC_CC_XOR),
408 };
409 
410 /*
411  * iopaau_func_xor_setup:
412  *
413  *	Setup routine for the "copy", "xor2".."xor8" functions.
414  */
415 int
416 iopaau_func_xor_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
417 {
418 	struct iopaau_function *af =
419 	    dreq->dreq_assignment->das_algdesc->dad_data;
420 	struct pool_cache *dc = af->af_desc_cache;
421 	bus_dmamap_t dmamap = sc->sc_map_out;
422 	bus_dmamap_t *inmap = sc->sc_map_in;
423 	uint32_t *prevpa;
424 	struct aau_desc_8 **prevp, *cur;
425 	int ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
426 	int i, error, seg;
427 	size_t descsz = AAU_DESC_SIZE(ninputs);
428 
429 	KASSERT(ninputs <= AAU_MAX_INPUTS);
430 
431 	switch (dreq->dreq_outbuf_type) {
432 	case DMOVER_BUF_LINEAR:
433 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
434 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
435 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
436 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
437 		break;
438 
439 	case DMOVER_BUF_UIO:
440 	    {
441 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
442 
443 		if (uio->uio_rw != UIO_READ)
444 			return (EINVAL);
445 
446 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
447 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
448 		break;
449 	    }
450 
451 	default:
452 		error = EINVAL;
453 	}
454 
455 	if (__predict_false(error != 0))
456 		return (error);
457 
458 	switch (dreq->dreq_inbuf_type) {
459 	case DMOVER_BUF_LINEAR:
460 		for (i = 0; i < ninputs; i++) {
461 			error = bus_dmamap_load(sc->sc_dmat, inmap[i],
462 			    dreq->dreq_inbuf[i].dmbuf_linear.l_addr,
463 			    dreq->dreq_inbuf[i].dmbuf_linear.l_len, NULL,
464 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
465 			if (__predict_false(error != 0))
466 				break;
467 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
468 				error = EFAULT;	/* "address error", sort of. */
469 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
470 				break;
471 			}
472 		}
473 		break;
474 
475 	 case DMOVER_BUF_UIO:
476 	     {
477 		struct uio *uio;
478 
479 		for (i = 0; i < ninputs; i++) {
480 			uio = dreq->dreq_inbuf[i].dmbuf_uio;
481 
482 			if (uio->uio_rw != UIO_WRITE) {
483 				error = EINVAL;
484 				break;
485 			}
486 
487 			error = bus_dmamap_load_uio(sc->sc_dmat, inmap[i], uio,
488 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
489 			if (__predict_false(error != 0)) {
490 				break;
491 			}
492 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
493 				error = EFAULT;	/* "address error", sort of. */
494 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
495 				break;
496 			}
497 		}
498 		break;
499 	    }
500 
501 	default:
502 		error = EINVAL;
503 	}
504 
505 	if (__predict_false(error != 0)) {
506 		for (--i; i >= 0; i--)
507 			bus_dmamap_unload(sc->sc_dmat, inmap[i]);
508 		bus_dmamap_unload(sc->sc_dmat, dmamap);
509 		return (error);
510 	}
511 
512 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
513 	    BUS_DMASYNC_PREREAD);
514 	for (i = 0; i < ninputs; i++) {
515 		bus_dmamap_sync(sc->sc_dmat, inmap[i], 0, inmap[i]->dm_mapsize,
516 		    BUS_DMASYNC_PREWRITE);
517 	}
518 
519 	prevp = (struct aau_desc_8 **) &sc->sc_firstdesc;
520 	prevpa = &sc->sc_firstdesc_pa;
521 
522 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
523 		cur = pool_cache_get(dc, PR_NOWAIT);
524 		if (cur == NULL) {
525 			*prevp = NULL;
526 			error = ENOMEM;
527 			goto bad;
528 		}
529 
530 		*prevp = cur;
531 		*prevpa = cur->d_pa;
532 
533 		prevp = &cur->d_next;
534 		prevpa = &cur->d_nda;
535 
536 		for (i = 0; i < ninputs; i++) {
537 			if (dmamap->dm_segs[seg].ds_len !=
538 			    inmap[i]->dm_segs[seg].ds_len) {
539 				*prevp = NULL;
540 				error = EFAULT;	/* "address" error, sort of. */
541 				goto bad;
542 			}
543 			if (i < 4) {
544 				cur->d_sar[i] =
545 				    inmap[i]->dm_segs[seg].ds_addr;
546 			} else if (i < 8) {
547 				cur->d_sar5_8[i - 4] =
548 				    inmap[i]->dm_segs[seg].ds_addr;
549 			}
550 		}
551 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
552 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
553 		cur->d_dc = iopaau_dc_inputs[ninputs] | AAU_DC_DWE;
554 		SYNC_DESC(cur, descsz);
555 	}
556 
557 	*prevp = NULL;
558 	*prevpa = 0;
559 
560 	cur->d_dc |= AAU_DC_IE;
561 	SYNC_DESC(cur, descsz);
562 
563 	sc->sc_lastdesc = cur;
564 
565 	return (0);
566 
567  bad:
568 	iopaau_desc_free(dc, sc->sc_firstdesc);
569 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
570 	for (i = 0; i < ninputs; i++)
571 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
572 	sc->sc_firstdesc = NULL;
573 
574 	return (error);
575 }
576 
577 int
578 iopaau_intr(void *arg)
579 {
580 	struct iopaau_softc *sc = arg;
581 	struct dmover_request *dreq;
582 	uint32_t asr;
583 
584 	/* Clear the interrupt. */
585 	asr = bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR);
586 	if (asr == 0)
587 		return (0);
588 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ASR, asr);
589 
590 	/* XXX -- why does this happen? */
591 	if (sc->sc_running == NULL) {
592 		printf("%s: unexpected interrupt, ASR = 0x%08x\n",
593 		    sc->sc_dev.dv_xname, asr);
594 		return (1);
595 	}
596 	dreq = sc->sc_running;
597 
598 	/* Stop the AAU. */
599 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR, 0);
600 
601 	DPRINTF(("%s: got interrupt for dreq %p\n", sc->sc_dev.dv_xname,
602 	    dreq));
603 
604 	if (__predict_false((asr & AAU_ASR_ETIF) != 0)) {
605 		/*
606 		 * We expect to get end-of-chain interrupts, not
607 		 * end-of-transfer interrupts, so panic if we get
608 		 * one of these.
609 		 */
610 		panic("aau_intr: got EOT interrupt");
611 	}
612 
613 	if (__predict_false((asr & AAU_ASR_MA) != 0)) {
614 		printf("%s: WARNING: got master abort\n", sc->sc_dev.dv_xname);
615 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
616 		dreq->dreq_error = EFAULT;
617 	}
618 
619 	/* Finish this transfer, start next one. */
620 	iopaau_finish(sc);
621 
622 	return (1);
623 }
624 
625 void
626 iopaau_attach(struct iopaau_softc *sc)
627 {
628 	int error, i;
629 
630 	error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER, AAU_MAX_SEGS,
631 	    AAU_MAX_XFER, AAU_IO_BOUNDARY, 0, &sc->sc_map_out);
632 	if (error) {
633 		aprint_error(
634 		    "%s: unable to create output DMA map, error = %d\n",
635 		    sc->sc_dev.dv_xname, error);
636 		return;
637 	}
638 
639 	for (i = 0; i < AAU_MAX_INPUTS; i++) {
640 		error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER,
641 		    AAU_MAX_SEGS, AAU_MAX_XFER, AAU_IO_BOUNDARY, 0,
642 		    &sc->sc_map_in[i]);
643 		if (error) {
644 			aprint_error("%s: unable to create input %d DMA map, "
645 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
646 			return;
647 		}
648 	}
649 
650 	/*
651 	 * Initialize global resources.  Ok to do here, since there's
652 	 * only one AAU.
653 	 */
654 	pool_init(&aau_desc_4_pool, sizeof(struct aau_desc_4),
655 	    8 * 4, offsetof(struct aau_desc_4, d_nda), 0, "aaud4pl",
656 	    NULL);
657 	pool_init(&aau_desc_8_pool, sizeof(struct aau_desc_8),
658 	    8 * 4, offsetof(struct aau_desc_8, d_nda), 0, "aaud8pl",
659 	    NULL);
660 
661 	pool_cache_init(&iopaau_desc_4_cache, &aau_desc_4_pool,
662 	    iopaau_desc_ctor, NULL, NULL);
663 	pool_cache_init(&iopaau_desc_8_cache, &aau_desc_8_pool,
664 	    iopaau_desc_ctor, NULL, NULL);
665 
666 	/* Register us with dmover. */
667 	dmover_backend_register(&sc->sc_dmb);
668 }
669