xref: /netbsd-src/sys/arch/arm/xscale/iopaau.c (revision e5548b402ae4c44fb816de42c7bba9581ce23ef5)
1 /*	$NetBSD: iopaau.c,v 1.12 2005/12/11 12:16:51 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Common code for XScale-based I/O Processor Application Accelerator
40  * Unit support.
41  *
42  * The AAU provides a back-end for the dmover(9) facility.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: iopaau.c,v 1.12 2005/12/11 12:16:51 christos Exp $");
47 
48 #include <sys/param.h>
49 #include <sys/pool.h>
50 #include <sys/lock.h>
51 #include <sys/systm.h>
52 #include <sys/device.h>
53 #include <sys/uio.h>
54 
55 #include <uvm/uvm.h>
56 
57 #include <machine/bus.h>
58 
59 #include <arm/xscale/iopaaureg.h>
60 #include <arm/xscale/iopaauvar.h>
61 
62 #ifdef AAU_DEBUG
63 #define	DPRINTF(x)	printf x
64 #else
65 #define	DPRINTF(x)	/* nothing */
66 #endif
67 
68 static struct pool aau_desc_4_pool;
69 static struct pool aau_desc_8_pool;
70 
71 struct pool_cache iopaau_desc_4_cache;
72 struct pool_cache iopaau_desc_8_cache;
73 
74 /*
75  * iopaau_desc_ctor:
76  *
77  *	Constructor for all types of descriptors.
78  */
79 static int
80 iopaau_desc_ctor(void *arg, void *object, int flags)
81 {
82 	struct aau_desc_4 *d = object;
83 
84 	/*
85 	 * Cache the physical address of the hardware portion of
86 	 * the descriptor in the software portion of the descriptor
87 	 * for quick reference later.
88 	 */
89 	d->d_pa = vtophys((vaddr_t)d) + SYNC_DESC_4_OFFSET;
90 	KASSERT((d->d_pa & 31) == 0);
91 	return (0);
92 }
93 
94 /*
95  * iopaau_desc_free:
96  *
97  *	Free a chain of AAU descriptors.
98  */
99 void
100 iopaau_desc_free(struct pool_cache *dc, void *firstdesc)
101 {
102 	struct aau_desc_4 *d, *next;
103 
104 	for (d = firstdesc; d != NULL; d = next) {
105 		next = d->d_next;
106 		pool_cache_put(dc, d);
107 	}
108 }
109 
110 /*
111  * iopaau_start:
112  *
113  *	Start an AAU request.  Must be called at splbio().
114  */
115 static void
116 iopaau_start(struct iopaau_softc *sc)
117 {
118 	struct dmover_backend *dmb = &sc->sc_dmb;
119 	struct dmover_request *dreq;
120 	struct iopaau_function *af;
121 	int error;
122 
123 	for (;;) {
124 
125 		KASSERT(sc->sc_running == NULL);
126 
127 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
128 		if (dreq == NULL)
129 			return;
130 
131 		dmover_backend_remque(dmb, dreq);
132 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
133 
134 		sc->sc_running = dreq;
135 
136 		/* XXXUNLOCK */
137 
138 		af = dreq->dreq_assignment->das_algdesc->dad_data;
139 		error = (*af->af_setup)(sc, dreq);
140 
141 		/* XXXLOCK */
142 
143 		if (error) {
144 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
145 			dreq->dreq_error = error;
146 			sc->sc_running = NULL;
147 			/* XXXUNLOCK */
148 			dmover_done(dreq);
149 			/* XXXLOCK */
150 			continue;
151 		}
152 
153 #ifdef DIAGNOSTIC
154 		if (bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR) &
155 		    AAU_ASR_AAF)
156 			panic("iopaau_start: AAU already active");
157 #endif
158 
159 		DPRINTF(("%s: starting dreq %p\n", sc->sc_dev.dv_xname,
160 		    dreq));
161 
162 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ANDAR,
163 		    sc->sc_firstdesc_pa);
164 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR,
165 		    AAU_ACR_AAE);
166 
167 		break;
168 	}
169 }
170 
171 /*
172  * iopaau_finish:
173  *
174  *	Finish the current operation.  AAU must be stopped.
175  */
176 static void
177 iopaau_finish(struct iopaau_softc *sc)
178 {
179 	struct dmover_request *dreq = sc->sc_running;
180 	struct iopaau_function *af =
181 	    dreq->dreq_assignment->das_algdesc->dad_data;
182 	void *firstdesc = sc->sc_firstdesc;
183 	int i, ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
184 
185 	sc->sc_running = NULL;
186 
187 	/* If the function has inputs, unmap them. */
188 	for (i = 0; i < ninputs; i++) {
189 		bus_dmamap_sync(sc->sc_dmat, sc->sc_map_in[i], 0,
190 		    sc->sc_map_in[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
191 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
192 	}
193 
194 	/* Unload the output buffer DMA map. */
195 	bus_dmamap_sync(sc->sc_dmat, sc->sc_map_out, 0,
196 	    sc->sc_map_out->dm_mapsize, BUS_DMASYNC_POSTREAD);
197 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
198 
199 	/* Get the next transfer started. */
200 	iopaau_start(sc);
201 
202 	/* Now free descriptors for last transfer. */
203 	iopaau_desc_free(af->af_desc_cache, firstdesc);
204 
205 	dmover_done(dreq);
206 }
207 
208 /*
209  * iopaau_process:
210  *
211  *	Dmover back-end entry point.
212  */
213 void
214 iopaau_process(struct dmover_backend *dmb)
215 {
216 	struct iopaau_softc *sc = dmb->dmb_cookie;
217 	int s;
218 
219 	s = splbio();
220 	/* XXXLOCK */
221 
222 	if (sc->sc_running == NULL)
223 		iopaau_start(sc);
224 
225 	/* XXXUNLOCK */
226 	splx(s);
227 }
228 
229 /*
230  * iopaau_func_fill_immed_setup:
231  *
232  *	Common code shared by the zero and fillN setup routines.
233  */
234 static int
235 iopaau_func_fill_immed_setup(struct iopaau_softc *sc,
236     struct dmover_request *dreq, uint32_t immed)
237 {
238 	struct iopaau_function *af =
239 	    dreq->dreq_assignment->das_algdesc->dad_data;
240 	struct pool_cache *dc = af->af_desc_cache;
241 	bus_dmamap_t dmamap = sc->sc_map_out;
242 	uint32_t *prevpa;
243 	struct aau_desc_4 **prevp, *cur;
244 	int error, seg;
245 
246 	switch (dreq->dreq_outbuf_type) {
247 	case DMOVER_BUF_LINEAR:
248 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
249 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
250 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
251 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
252 		break;
253 
254 	case DMOVER_BUF_UIO:
255 	    {
256 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
257 
258 		if (uio->uio_rw != UIO_READ)
259 			return (EINVAL);
260 
261 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
262 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
263 		break;
264 	    }
265 
266 	default:
267 		error = EINVAL;
268 	}
269 
270 	if (__predict_false(error != 0))
271 		return (error);
272 
273 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
274 	    BUS_DMASYNC_PREREAD);
275 
276 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
277 	prevpa = &sc->sc_firstdesc_pa;
278 
279 	cur = NULL;	/* XXX: gcc */
280 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
281 		cur = pool_cache_get(dc, PR_NOWAIT);
282 		if (cur == NULL) {
283 			*prevp = NULL;
284 			error = ENOMEM;
285 			goto bad;
286 		}
287 
288 		*prevp = cur;
289 		*prevpa = cur->d_pa;
290 
291 		prevp = &cur->d_next;
292 		prevpa = &cur->d_nda;
293 
294 		/*
295 		 * We don't actually enforce the page alignment
296 		 * constraint, here, because there is only one
297 		 * data stream to worry about.
298 		 */
299 
300 		cur->d_sar[0] = immed;
301 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
302 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
303 		cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
304 		SYNC_DESC(cur, sizeof(struct aau_desc_4));
305 	}
306 
307 	*prevp = NULL;
308 	*prevpa = 0;
309 
310 	cur->d_dc |= AAU_DC_IE;
311 	SYNC_DESC(cur, sizeof(struct aau_desc_4));
312 
313 	sc->sc_lastdesc = cur;
314 
315 	return (0);
316 
317  bad:
318 	iopaau_desc_free(dc, sc->sc_firstdesc);
319 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
320 	sc->sc_firstdesc = NULL;
321 
322 	return (error);
323 }
324 
325 /*
326  * iopaau_func_zero_setup:
327  *
328  *	Setup routine for the "zero" function.
329  */
330 int
331 iopaau_func_zero_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
332 {
333 
334 	return (iopaau_func_fill_immed_setup(sc, dreq, 0));
335 }
336 
337 /*
338  * iopaau_func_fill8_setup:
339  *
340  *	Setup routine for the "fill8" function.
341  */
342 int
343 iopaau_func_fill8_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
344 {
345 
346 	return (iopaau_func_fill_immed_setup(sc, dreq,
347 	    dreq->dreq_immediate[0] |
348 	    (dreq->dreq_immediate[0] << 8) |
349 	    (dreq->dreq_immediate[0] << 16) |
350 	    (dreq->dreq_immediate[0] << 24)));
351 }
352 
353 /*
354  * Descriptor command words for varying numbers of inputs.  For 1 input,
355  * this does a copy.  For multiple inputs, we're doing an XOR.  In this
356  * case, the first block is a "direct fill" to load the store queue, and
357  * the remaining blocks are XOR'd to the store queue.
358  */
359 static const uint32_t iopaau_dc_inputs[] = {
360 	0,						/* 0 */
361 
362 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL),		/* 1 */
363 
364 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 2 */
365 	AAU_DC_B2_CC(AAU_DC_CC_XOR),
366 
367 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 3 */
368 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
369 	AAU_DC_B3_CC(AAU_DC_CC_XOR),
370 
371 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 4 */
372 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
373 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
374 	AAU_DC_B4_CC(AAU_DC_CC_XOR),
375 
376 	AAU_DC_SBCI_5_8|				/* 5 */
377 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
378 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
379 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
380 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
381 	AAU_DC_B5_CC(AAU_DC_CC_XOR),
382 
383 	AAU_DC_SBCI_5_8|				/* 6 */
384 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
385 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
386 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
387 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
388 	AAU_DC_B5_CC(AAU_DC_CC_XOR)|
389 	AAU_DC_B6_CC(AAU_DC_CC_XOR),
390 
391 	AAU_DC_SBCI_5_8|				/* 7 */
392 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
393 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
394 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
395 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
396 	AAU_DC_B5_CC(AAU_DC_CC_XOR)|
397 	AAU_DC_B6_CC(AAU_DC_CC_XOR)|
398 	AAU_DC_B7_CC(AAU_DC_CC_XOR),
399 
400 	AAU_DC_SBCI_5_8|				/* 8 */
401 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
402 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
403 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
404 	AAU_DC_B4_CC(AAU_DC_CC_XOR)|
405 	AAU_DC_B5_CC(AAU_DC_CC_XOR)|
406 	AAU_DC_B6_CC(AAU_DC_CC_XOR)|
407 	AAU_DC_B7_CC(AAU_DC_CC_XOR)|
408 	AAU_DC_B8_CC(AAU_DC_CC_XOR),
409 };
410 
411 /*
412  * iopaau_func_xor_setup:
413  *
414  *	Setup routine for the "copy", "xor2".."xor8" functions.
415  */
416 int
417 iopaau_func_xor_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
418 {
419 	struct iopaau_function *af =
420 	    dreq->dreq_assignment->das_algdesc->dad_data;
421 	struct pool_cache *dc = af->af_desc_cache;
422 	bus_dmamap_t dmamap = sc->sc_map_out;
423 	bus_dmamap_t *inmap = sc->sc_map_in;
424 	uint32_t *prevpa;
425 	struct aau_desc_8 **prevp, *cur;
426 	int ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
427 	int i, error, seg;
428 	size_t descsz = AAU_DESC_SIZE(ninputs);
429 
430 	KASSERT(ninputs <= AAU_MAX_INPUTS);
431 
432 	switch (dreq->dreq_outbuf_type) {
433 	case DMOVER_BUF_LINEAR:
434 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
435 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
436 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
437 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
438 		break;
439 
440 	case DMOVER_BUF_UIO:
441 	    {
442 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
443 
444 		if (uio->uio_rw != UIO_READ)
445 			return (EINVAL);
446 
447 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
448 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
449 		break;
450 	    }
451 
452 	default:
453 		error = EINVAL;
454 	}
455 
456 	if (__predict_false(error != 0))
457 		return (error);
458 
459 	switch (dreq->dreq_inbuf_type) {
460 	case DMOVER_BUF_LINEAR:
461 		for (i = 0; i < ninputs; i++) {
462 			error = bus_dmamap_load(sc->sc_dmat, inmap[i],
463 			    dreq->dreq_inbuf[i].dmbuf_linear.l_addr,
464 			    dreq->dreq_inbuf[i].dmbuf_linear.l_len, NULL,
465 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
466 			if (__predict_false(error != 0))
467 				break;
468 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
469 				error = EFAULT;	/* "address error", sort of. */
470 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
471 				break;
472 			}
473 		}
474 		break;
475 
476 	 case DMOVER_BUF_UIO:
477 	     {
478 		struct uio *uio;
479 
480 		for (i = 0; i < ninputs; i++) {
481 			uio = dreq->dreq_inbuf[i].dmbuf_uio;
482 
483 			if (uio->uio_rw != UIO_WRITE) {
484 				error = EINVAL;
485 				break;
486 			}
487 
488 			error = bus_dmamap_load_uio(sc->sc_dmat, inmap[i], uio,
489 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
490 			if (__predict_false(error != 0)) {
491 				break;
492 			}
493 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
494 				error = EFAULT;	/* "address error", sort of. */
495 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
496 				break;
497 			}
498 		}
499 		break;
500 	    }
501 
502 	default:
503 		i = 0;	/* XXX: gcc */
504 		error = EINVAL;
505 	}
506 
507 	if (__predict_false(error != 0)) {
508 		for (--i; i >= 0; i--)
509 			bus_dmamap_unload(sc->sc_dmat, inmap[i]);
510 		bus_dmamap_unload(sc->sc_dmat, dmamap);
511 		return (error);
512 	}
513 
514 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
515 	    BUS_DMASYNC_PREREAD);
516 	for (i = 0; i < ninputs; i++) {
517 		bus_dmamap_sync(sc->sc_dmat, inmap[i], 0, inmap[i]->dm_mapsize,
518 		    BUS_DMASYNC_PREWRITE);
519 	}
520 
521 	prevp = (struct aau_desc_8 **) &sc->sc_firstdesc;
522 	prevpa = &sc->sc_firstdesc_pa;
523 
524 	cur = NULL;	/* XXX: gcc */
525 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
526 		cur = pool_cache_get(dc, PR_NOWAIT);
527 		if (cur == NULL) {
528 			*prevp = NULL;
529 			error = ENOMEM;
530 			goto bad;
531 		}
532 
533 		*prevp = cur;
534 		*prevpa = cur->d_pa;
535 
536 		prevp = &cur->d_next;
537 		prevpa = &cur->d_nda;
538 
539 		for (i = 0; i < ninputs; i++) {
540 			if (dmamap->dm_segs[seg].ds_len !=
541 			    inmap[i]->dm_segs[seg].ds_len) {
542 				*prevp = NULL;
543 				error = EFAULT;	/* "address" error, sort of. */
544 				goto bad;
545 			}
546 			if (i < 4) {
547 				cur->d_sar[i] =
548 				    inmap[i]->dm_segs[seg].ds_addr;
549 			} else if (i < 8) {
550 				cur->d_sar5_8[i - 4] =
551 				    inmap[i]->dm_segs[seg].ds_addr;
552 			}
553 		}
554 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
555 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
556 		cur->d_dc = iopaau_dc_inputs[ninputs] | AAU_DC_DWE;
557 		SYNC_DESC(cur, descsz);
558 	}
559 
560 	*prevp = NULL;
561 	*prevpa = 0;
562 
563 	cur->d_dc |= AAU_DC_IE;
564 	SYNC_DESC(cur, descsz);
565 
566 	sc->sc_lastdesc = cur;
567 
568 	return (0);
569 
570  bad:
571 	iopaau_desc_free(dc, sc->sc_firstdesc);
572 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
573 	for (i = 0; i < ninputs; i++)
574 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
575 	sc->sc_firstdesc = NULL;
576 
577 	return (error);
578 }
579 
580 int
581 iopaau_intr(void *arg)
582 {
583 	struct iopaau_softc *sc = arg;
584 	struct dmover_request *dreq;
585 	uint32_t asr;
586 
587 	/* Clear the interrupt. */
588 	asr = bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR);
589 	if (asr == 0)
590 		return (0);
591 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ASR, asr);
592 
593 	/* XXX -- why does this happen? */
594 	if (sc->sc_running == NULL) {
595 		printf("%s: unexpected interrupt, ASR = 0x%08x\n",
596 		    sc->sc_dev.dv_xname, asr);
597 		return (1);
598 	}
599 	dreq = sc->sc_running;
600 
601 	/* Stop the AAU. */
602 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR, 0);
603 
604 	DPRINTF(("%s: got interrupt for dreq %p\n", sc->sc_dev.dv_xname,
605 	    dreq));
606 
607 	if (__predict_false((asr & AAU_ASR_ETIF) != 0)) {
608 		/*
609 		 * We expect to get end-of-chain interrupts, not
610 		 * end-of-transfer interrupts, so panic if we get
611 		 * one of these.
612 		 */
613 		panic("aau_intr: got EOT interrupt");
614 	}
615 
616 	if (__predict_false((asr & AAU_ASR_MA) != 0)) {
617 		printf("%s: WARNING: got master abort\n", sc->sc_dev.dv_xname);
618 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
619 		dreq->dreq_error = EFAULT;
620 	}
621 
622 	/* Finish this transfer, start next one. */
623 	iopaau_finish(sc);
624 
625 	return (1);
626 }
627 
628 void
629 iopaau_attach(struct iopaau_softc *sc)
630 {
631 	int error, i;
632 
633 	error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER, AAU_MAX_SEGS,
634 	    AAU_MAX_XFER, AAU_IO_BOUNDARY, 0, &sc->sc_map_out);
635 	if (error) {
636 		aprint_error(
637 		    "%s: unable to create output DMA map, error = %d\n",
638 		    sc->sc_dev.dv_xname, error);
639 		return;
640 	}
641 
642 	for (i = 0; i < AAU_MAX_INPUTS; i++) {
643 		error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER,
644 		    AAU_MAX_SEGS, AAU_MAX_XFER, AAU_IO_BOUNDARY, 0,
645 		    &sc->sc_map_in[i]);
646 		if (error) {
647 			aprint_error("%s: unable to create input %d DMA map, "
648 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
649 			return;
650 		}
651 	}
652 
653 	/*
654 	 * Initialize global resources.  Ok to do here, since there's
655 	 * only one AAU.
656 	 */
657 	pool_init(&aau_desc_4_pool, sizeof(struct aau_desc_4),
658 	    8 * 4, offsetof(struct aau_desc_4, d_nda), 0, "aaud4pl",
659 	    NULL);
660 	pool_init(&aau_desc_8_pool, sizeof(struct aau_desc_8),
661 	    8 * 4, offsetof(struct aau_desc_8, d_nda), 0, "aaud8pl",
662 	    NULL);
663 
664 	pool_cache_init(&iopaau_desc_4_cache, &aau_desc_4_pool,
665 	    iopaau_desc_ctor, NULL, NULL);
666 	pool_cache_init(&iopaau_desc_8_cache, &aau_desc_8_pool,
667 	    iopaau_desc_ctor, NULL, NULL);
668 
669 	/* Register us with dmover. */
670 	dmover_backend_register(&sc->sc_dmb);
671 }
672