1 /* $NetBSD: iopaau.c,v 1.18 2019/03/17 06:36:22 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Common code for XScale-based I/O Processor Application Accelerator
40 * Unit support.
41 *
42 * The AAU provides a back-end for the dmover(9) facility.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: iopaau.c,v 1.18 2019/03/17 06:36:22 maxv Exp $");
47
48 #include <sys/param.h>
49 #include <sys/pool.h>
50 #include <sys/systm.h>
51 #include <sys/device.h>
52 #include <sys/uio.h>
53 #include <sys/bus.h>
54
55 #include <uvm/uvm.h>
56
57 #include <arm/xscale/iopaaureg.h>
58 #include <arm/xscale/iopaauvar.h>
59
60 #ifdef AAU_DEBUG
61 #define DPRINTF(x) printf x
62 #else
63 #define DPRINTF(x) /* nothing */
64 #endif
65
66 pool_cache_t iopaau_desc_4_cache;
67 pool_cache_t iopaau_desc_8_cache;
68
69 /*
70 * iopaau_desc_ctor:
71 *
72 * Constructor for all types of descriptors.
73 */
74 static int
iopaau_desc_ctor(void * arg,void * object,int flags)75 iopaau_desc_ctor(void *arg, void *object, int flags)
76 {
77 struct aau_desc_4 *d = object;
78
79 /*
80 * Cache the physical address of the hardware portion of
81 * the descriptor in the software portion of the descriptor
82 * for quick reference later.
83 */
84 d->d_pa = vtophys((vaddr_t)d) + SYNC_DESC_4_OFFSET;
85 KASSERT((d->d_pa & 31) == 0);
86 return (0);
87 }
88
89 /*
90 * iopaau_desc_free:
91 *
92 * Free a chain of AAU descriptors.
93 */
94 void
iopaau_desc_free(struct pool_cache * dc,void * firstdesc)95 iopaau_desc_free(struct pool_cache *dc, void *firstdesc)
96 {
97 struct aau_desc_4 *d, *next;
98
99 for (d = firstdesc; d != NULL; d = next) {
100 next = d->d_next;
101 pool_cache_put(dc, d);
102 }
103 }
104
105 /*
106 * iopaau_start:
107 *
108 * Start an AAU request. Must be called at splbio().
109 */
110 static void
iopaau_start(struct iopaau_softc * sc)111 iopaau_start(struct iopaau_softc *sc)
112 {
113 struct dmover_backend *dmb = &sc->sc_dmb;
114 struct dmover_request *dreq;
115 struct iopaau_function *af;
116 int error;
117
118 for (;;) {
119
120 KASSERT(sc->sc_running == NULL);
121
122 dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
123 if (dreq == NULL)
124 return;
125
126 dmover_backend_remque(dmb, dreq);
127 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
128
129 sc->sc_running = dreq;
130
131 /* XXXUNLOCK */
132
133 af = dreq->dreq_assignment->das_algdesc->dad_data;
134 error = (*af->af_setup)(sc, dreq);
135
136 /* XXXLOCK */
137
138 if (error) {
139 dreq->dreq_flags |= DMOVER_REQ_ERROR;
140 dreq->dreq_error = error;
141 sc->sc_running = NULL;
142 /* XXXUNLOCK */
143 dmover_done(dreq);
144 /* XXXLOCK */
145 continue;
146 }
147
148 #ifdef DIAGNOSTIC
149 if (bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR) &
150 AAU_ASR_AAF)
151 panic("iopaau_start: AAU already active");
152 #endif
153
154 DPRINTF(("%s: starting dreq %p\n", device_xname(sc->sc_dev),
155 dreq));
156
157 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ANDAR,
158 sc->sc_firstdesc_pa);
159 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR,
160 AAU_ACR_AAE);
161
162 break;
163 }
164 }
165
166 /*
167 * iopaau_finish:
168 *
169 * Finish the current operation. AAU must be stopped.
170 */
171 static void
iopaau_finish(struct iopaau_softc * sc)172 iopaau_finish(struct iopaau_softc *sc)
173 {
174 struct dmover_request *dreq = sc->sc_running;
175 struct iopaau_function *af =
176 dreq->dreq_assignment->das_algdesc->dad_data;
177 void *firstdesc = sc->sc_firstdesc;
178 int i, ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
179
180 sc->sc_running = NULL;
181
182 /* If the function has inputs, unmap them. */
183 for (i = 0; i < ninputs; i++) {
184 bus_dmamap_sync(sc->sc_dmat, sc->sc_map_in[i], 0,
185 sc->sc_map_in[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
186 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
187 }
188
189 /* Unload the output buffer DMA map. */
190 bus_dmamap_sync(sc->sc_dmat, sc->sc_map_out, 0,
191 sc->sc_map_out->dm_mapsize, BUS_DMASYNC_POSTREAD);
192 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
193
194 /* Get the next transfer started. */
195 iopaau_start(sc);
196
197 /* Now free descriptors for last transfer. */
198 iopaau_desc_free(af->af_desc_cache, firstdesc);
199
200 dmover_done(dreq);
201 }
202
203 /*
204 * iopaau_process:
205 *
206 * Dmover back-end entry point.
207 */
208 void
iopaau_process(struct dmover_backend * dmb)209 iopaau_process(struct dmover_backend *dmb)
210 {
211 struct iopaau_softc *sc = dmb->dmb_cookie;
212 int s;
213
214 s = splbio();
215 /* XXXLOCK */
216
217 if (sc->sc_running == NULL)
218 iopaau_start(sc);
219
220 /* XXXUNLOCK */
221 splx(s);
222 }
223
224 /*
225 * iopaau_func_fill_immed_setup:
226 *
227 * Common code shared by the zero and fillN setup routines.
228 */
229 static int
iopaau_func_fill_immed_setup(struct iopaau_softc * sc,struct dmover_request * dreq,uint32_t immed)230 iopaau_func_fill_immed_setup(struct iopaau_softc *sc,
231 struct dmover_request *dreq, uint32_t immed)
232 {
233 struct iopaau_function *af =
234 dreq->dreq_assignment->das_algdesc->dad_data;
235 struct pool_cache *dc = af->af_desc_cache;
236 bus_dmamap_t dmamap = sc->sc_map_out;
237 uint32_t *prevpa;
238 struct aau_desc_4 **prevp, *cur;
239 int error, seg;
240
241 switch (dreq->dreq_outbuf_type) {
242 case DMOVER_BUF_LINEAR:
243 error = bus_dmamap_load(sc->sc_dmat, dmamap,
244 dreq->dreq_outbuf.dmbuf_linear.l_addr,
245 dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
246 BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
247 break;
248
249 case DMOVER_BUF_UIO:
250 {
251 struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
252
253 if (uio->uio_rw != UIO_READ)
254 return (EINVAL);
255
256 error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
257 uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
258 break;
259 }
260
261 default:
262 error = EINVAL;
263 }
264
265 if (__predict_false(error != 0))
266 return (error);
267
268 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
269 BUS_DMASYNC_PREREAD);
270
271 prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
272 prevpa = &sc->sc_firstdesc_pa;
273
274 cur = NULL; /* XXX: gcc */
275 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
276 cur = pool_cache_get(dc, PR_NOWAIT);
277 if (cur == NULL) {
278 *prevp = NULL;
279 error = ENOMEM;
280 goto bad;
281 }
282
283 *prevp = cur;
284 *prevpa = cur->d_pa;
285
286 prevp = &cur->d_next;
287 prevpa = &cur->d_nda;
288
289 /*
290 * We don't actually enforce the page alignment
291 * constraint, here, because there is only one
292 * data stream to worry about.
293 */
294
295 cur->d_sar[0] = immed;
296 cur->d_dar = dmamap->dm_segs[seg].ds_addr;
297 cur->d_bc = dmamap->dm_segs[seg].ds_len;
298 cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
299 SYNC_DESC(cur, sizeof(struct aau_desc_4));
300 }
301
302 *prevp = NULL;
303 *prevpa = 0;
304
305 cur->d_dc |= AAU_DC_IE;
306 SYNC_DESC(cur, sizeof(struct aau_desc_4));
307
308 sc->sc_lastdesc = cur;
309
310 return (0);
311
312 bad:
313 iopaau_desc_free(dc, sc->sc_firstdesc);
314 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
315 sc->sc_firstdesc = NULL;
316
317 return (error);
318 }
319
320 /*
321 * iopaau_func_zero_setup:
322 *
323 * Setup routine for the "zero" function.
324 */
325 int
iopaau_func_zero_setup(struct iopaau_softc * sc,struct dmover_request * dreq)326 iopaau_func_zero_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
327 {
328
329 return (iopaau_func_fill_immed_setup(sc, dreq, 0));
330 }
331
332 /*
333 * iopaau_func_fill8_setup:
334 *
335 * Setup routine for the "fill8" function.
336 */
337 int
iopaau_func_fill8_setup(struct iopaau_softc * sc,struct dmover_request * dreq)338 iopaau_func_fill8_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
339 {
340
341 return (iopaau_func_fill_immed_setup(sc, dreq,
342 dreq->dreq_immediate[0] |
343 (dreq->dreq_immediate[0] << 8) |
344 (dreq->dreq_immediate[0] << 16) |
345 (dreq->dreq_immediate[0] << 24)));
346 }
347
348 /*
349 * Descriptor command words for varying numbers of inputs. For 1 input,
350 * this does a copy. For multiple inputs, we're doing an XOR. In this
351 * case, the first block is a "direct fill" to load the store queue, and
352 * the remaining blocks are XOR'd to the store queue.
353 */
354 static const uint32_t iopaau_dc_inputs[] = {
355 0, /* 0 */
356
357 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL), /* 1 */
358
359 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)| /* 2 */
360 AAU_DC_B2_CC(AAU_DC_CC_XOR),
361
362 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)| /* 3 */
363 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
364 AAU_DC_B3_CC(AAU_DC_CC_XOR),
365
366 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)| /* 4 */
367 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
368 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
369 AAU_DC_B4_CC(AAU_DC_CC_XOR),
370
371 AAU_DC_SBCI_5_8| /* 5 */
372 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
373 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
374 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
375 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
376 AAU_DC_B5_CC(AAU_DC_CC_XOR),
377
378 AAU_DC_SBCI_5_8| /* 6 */
379 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
380 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
381 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
382 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
383 AAU_DC_B5_CC(AAU_DC_CC_XOR)|
384 AAU_DC_B6_CC(AAU_DC_CC_XOR),
385
386 AAU_DC_SBCI_5_8| /* 7 */
387 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
388 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
389 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
390 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
391 AAU_DC_B5_CC(AAU_DC_CC_XOR)|
392 AAU_DC_B6_CC(AAU_DC_CC_XOR)|
393 AAU_DC_B7_CC(AAU_DC_CC_XOR),
394
395 AAU_DC_SBCI_5_8| /* 8 */
396 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
397 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
398 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
399 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
400 AAU_DC_B5_CC(AAU_DC_CC_XOR)|
401 AAU_DC_B6_CC(AAU_DC_CC_XOR)|
402 AAU_DC_B7_CC(AAU_DC_CC_XOR)|
403 AAU_DC_B8_CC(AAU_DC_CC_XOR),
404 };
405
406 /*
407 * iopaau_func_xor_setup:
408 *
409 * Setup routine for the "copy", "xor2".."xor8" functions.
410 */
411 int
iopaau_func_xor_setup(struct iopaau_softc * sc,struct dmover_request * dreq)412 iopaau_func_xor_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
413 {
414 struct iopaau_function *af =
415 dreq->dreq_assignment->das_algdesc->dad_data;
416 struct pool_cache *dc = af->af_desc_cache;
417 bus_dmamap_t dmamap = sc->sc_map_out;
418 bus_dmamap_t *inmap = sc->sc_map_in;
419 uint32_t *prevpa;
420 struct aau_desc_8 **prevp, *cur;
421 int ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
422 int i, error, seg;
423 size_t descsz = AAU_DESC_SIZE(ninputs);
424
425 KASSERT(ninputs <= AAU_MAX_INPUTS);
426
427 switch (dreq->dreq_outbuf_type) {
428 case DMOVER_BUF_LINEAR:
429 error = bus_dmamap_load(sc->sc_dmat, dmamap,
430 dreq->dreq_outbuf.dmbuf_linear.l_addr,
431 dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
432 BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
433 break;
434
435 case DMOVER_BUF_UIO:
436 {
437 struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
438
439 if (uio->uio_rw != UIO_READ)
440 return (EINVAL);
441
442 error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
443 uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
444 break;
445 }
446
447 default:
448 error = EINVAL;
449 }
450
451 if (__predict_false(error != 0))
452 return (error);
453
454 switch (dreq->dreq_inbuf_type) {
455 case DMOVER_BUF_LINEAR:
456 for (i = 0; i < ninputs; i++) {
457 error = bus_dmamap_load(sc->sc_dmat, inmap[i],
458 dreq->dreq_inbuf[i].dmbuf_linear.l_addr,
459 dreq->dreq_inbuf[i].dmbuf_linear.l_len, NULL,
460 BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
461 if (__predict_false(error != 0))
462 break;
463 if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
464 error = EFAULT; /* "address error", sort of. */
465 bus_dmamap_unload(sc->sc_dmat, inmap[i]);
466 break;
467 }
468 }
469 break;
470
471 case DMOVER_BUF_UIO:
472 {
473 struct uio *uio;
474
475 for (i = 0; i < ninputs; i++) {
476 uio = dreq->dreq_inbuf[i].dmbuf_uio;
477
478 if (uio->uio_rw != UIO_WRITE) {
479 error = EINVAL;
480 break;
481 }
482
483 error = bus_dmamap_load_uio(sc->sc_dmat, inmap[i], uio,
484 BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
485 if (__predict_false(error != 0)) {
486 break;
487 }
488 if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
489 error = EFAULT; /* "address error", sort of. */
490 bus_dmamap_unload(sc->sc_dmat, inmap[i]);
491 break;
492 }
493 }
494 break;
495 }
496
497 default:
498 i = 0; /* XXX: gcc */
499 error = EINVAL;
500 }
501
502 if (__predict_false(error != 0)) {
503 for (--i; i >= 0; i--)
504 bus_dmamap_unload(sc->sc_dmat, inmap[i]);
505 bus_dmamap_unload(sc->sc_dmat, dmamap);
506 return (error);
507 }
508
509 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
510 BUS_DMASYNC_PREREAD);
511 for (i = 0; i < ninputs; i++) {
512 bus_dmamap_sync(sc->sc_dmat, inmap[i], 0, inmap[i]->dm_mapsize,
513 BUS_DMASYNC_PREWRITE);
514 }
515
516 prevp = (struct aau_desc_8 **) &sc->sc_firstdesc;
517 prevpa = &sc->sc_firstdesc_pa;
518
519 cur = NULL; /* XXX: gcc */
520 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
521 cur = pool_cache_get(dc, PR_NOWAIT);
522 if (cur == NULL) {
523 *prevp = NULL;
524 error = ENOMEM;
525 goto bad;
526 }
527
528 *prevp = cur;
529 *prevpa = cur->d_pa;
530
531 prevp = &cur->d_next;
532 prevpa = &cur->d_nda;
533
534 for (i = 0; i < ninputs; i++) {
535 if (dmamap->dm_segs[seg].ds_len !=
536 inmap[i]->dm_segs[seg].ds_len) {
537 *prevp = NULL;
538 error = EFAULT; /* "address" error, sort of. */
539 goto bad;
540 }
541 if (i < 4) {
542 cur->d_sar[i] =
543 inmap[i]->dm_segs[seg].ds_addr;
544 } else if (i < 8) {
545 cur->d_sar5_8[i - 4] =
546 inmap[i]->dm_segs[seg].ds_addr;
547 }
548 }
549 cur->d_dar = dmamap->dm_segs[seg].ds_addr;
550 cur->d_bc = dmamap->dm_segs[seg].ds_len;
551 cur->d_dc = iopaau_dc_inputs[ninputs] | AAU_DC_DWE;
552 SYNC_DESC(cur, descsz);
553 }
554
555 *prevp = NULL;
556 *prevpa = 0;
557
558 cur->d_dc |= AAU_DC_IE;
559 SYNC_DESC(cur, descsz);
560
561 sc->sc_lastdesc = cur;
562
563 return (0);
564
565 bad:
566 iopaau_desc_free(dc, sc->sc_firstdesc);
567 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
568 for (i = 0; i < ninputs; i++)
569 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
570 sc->sc_firstdesc = NULL;
571
572 return (error);
573 }
574
575 int
iopaau_intr(void * arg)576 iopaau_intr(void *arg)
577 {
578 struct iopaau_softc *sc = arg;
579 struct dmover_request *dreq;
580 uint32_t asr;
581
582 /* Clear the interrupt. */
583 asr = bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR);
584 if (asr == 0)
585 return (0);
586 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ASR, asr);
587
588 /* XXX -- why does this happen? */
589 if (sc->sc_running == NULL) {
590 printf("%s: unexpected interrupt, ASR = 0x%08x\n",
591 device_xname(sc->sc_dev), asr);
592 return (1);
593 }
594 dreq = sc->sc_running;
595
596 /* Stop the AAU. */
597 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR, 0);
598
599 DPRINTF(("%s: got interrupt for dreq %p\n", device_xname(sc->sc_dev),
600 dreq));
601
602 if (__predict_false((asr & AAU_ASR_ETIF) != 0)) {
603 /*
604 * We expect to get end-of-chain interrupts, not
605 * end-of-transfer interrupts, so panic if we get
606 * one of these.
607 */
608 panic("aau_intr: got EOT interrupt");
609 }
610
611 if (__predict_false((asr & AAU_ASR_MA) != 0)) {
612 aprint_error_dev(sc->sc_dev, "WARNING: got master abort\n");
613 dreq->dreq_flags |= DMOVER_REQ_ERROR;
614 dreq->dreq_error = EFAULT;
615 }
616
617 /* Finish this transfer, start next one. */
618 iopaau_finish(sc);
619
620 return (1);
621 }
622
623 void
iopaau_attach(struct iopaau_softc * sc)624 iopaau_attach(struct iopaau_softc *sc)
625 {
626 int error, i;
627
628 error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER, AAU_MAX_SEGS,
629 AAU_MAX_XFER, AAU_IO_BOUNDARY, 0, &sc->sc_map_out);
630 if (error) {
631 aprint_error_dev(sc->sc_dev,
632 "unable to create output DMA map, error = %d\n", error);
633 return;
634 }
635
636 for (i = 0; i < AAU_MAX_INPUTS; i++) {
637 error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER,
638 AAU_MAX_SEGS, AAU_MAX_XFER, AAU_IO_BOUNDARY, 0,
639 &sc->sc_map_in[i]);
640 if (error) {
641 aprint_error_dev(sc->sc_dev,
642 "unable to create input %d DMA map, error = %d\n",
643 i, error);
644 return;
645 }
646 }
647
648 /*
649 * Initialize global resources. Ok to do here, since there's
650 * only one AAU. The structures are 32-byte aligned.
651 */
652 iopaau_desc_4_cache = pool_cache_init(sizeof(struct aau_desc_4),
653 8 * 4, 0, 0, "aaud4pl",
654 NULL, IPL_VM, iopaau_desc_ctor, NULL, NULL);
655 iopaau_desc_8_cache = pool_cache_init(sizeof(struct aau_desc_8),
656 8 * 4, 0, 0, "aaud8pl",
657 NULL, IPL_VM, iopaau_desc_ctor, NULL, NULL);
658
659 /* Register us with dmover. */
660 dmover_backend_register(&sc->sc_dmb);
661 }
662