iopaau.c revision 1.14 1 /* $NetBSD: iopaau.c,v 1.14 2007/11/07 00:23:16 ad Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Common code for XScale-based I/O Processor Application Accelerator
40 * Unit support.
41 *
42 * The AAU provides a back-end for the dmover(9) facility.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: iopaau.c,v 1.14 2007/11/07 00:23:16 ad Exp $");
47
48 #include <sys/param.h>
49 #include <sys/pool.h>
50 #include <sys/lock.h>
51 #include <sys/systm.h>
52 #include <sys/device.h>
53 #include <sys/uio.h>
54
55 #include <uvm/uvm.h>
56
57 #include <machine/bus.h>
58
59 #include <arm/xscale/iopaaureg.h>
60 #include <arm/xscale/iopaauvar.h>
61
62 #ifdef AAU_DEBUG
63 #define DPRINTF(x) printf x
64 #else
65 #define DPRINTF(x) /* nothing */
66 #endif
67
68 pool_cache_t iopaau_desc_4_cache;
69 pool_cache_t iopaau_desc_8_cache;
70
71 /*
72 * iopaau_desc_ctor:
73 *
74 * Constructor for all types of descriptors.
75 */
76 static int
77 iopaau_desc_ctor(void *arg, void *object, int flags)
78 {
79 struct aau_desc_4 *d = object;
80
81 /*
82 * Cache the physical address of the hardware portion of
83 * the descriptor in the software portion of the descriptor
84 * for quick reference later.
85 */
86 d->d_pa = vtophys((vaddr_t)d) + SYNC_DESC_4_OFFSET;
87 KASSERT((d->d_pa & 31) == 0);
88 return (0);
89 }
90
91 /*
92 * iopaau_desc_free:
93 *
94 * Free a chain of AAU descriptors.
95 */
96 void
97 iopaau_desc_free(struct pool_cache *dc, void *firstdesc)
98 {
99 struct aau_desc_4 *d, *next;
100
101 for (d = firstdesc; d != NULL; d = next) {
102 next = d->d_next;
103 pool_cache_put(dc, d);
104 }
105 }
106
107 /*
108 * iopaau_start:
109 *
110 * Start an AAU request. Must be called at splbio().
111 */
112 static void
113 iopaau_start(struct iopaau_softc *sc)
114 {
115 struct dmover_backend *dmb = &sc->sc_dmb;
116 struct dmover_request *dreq;
117 struct iopaau_function *af;
118 int error;
119
120 for (;;) {
121
122 KASSERT(sc->sc_running == NULL);
123
124 dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
125 if (dreq == NULL)
126 return;
127
128 dmover_backend_remque(dmb, dreq);
129 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
130
131 sc->sc_running = dreq;
132
133 /* XXXUNLOCK */
134
135 af = dreq->dreq_assignment->das_algdesc->dad_data;
136 error = (*af->af_setup)(sc, dreq);
137
138 /* XXXLOCK */
139
140 if (error) {
141 dreq->dreq_flags |= DMOVER_REQ_ERROR;
142 dreq->dreq_error = error;
143 sc->sc_running = NULL;
144 /* XXXUNLOCK */
145 dmover_done(dreq);
146 /* XXXLOCK */
147 continue;
148 }
149
150 #ifdef DIAGNOSTIC
151 if (bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR) &
152 AAU_ASR_AAF)
153 panic("iopaau_start: AAU already active");
154 #endif
155
156 DPRINTF(("%s: starting dreq %p\n", sc->sc_dev.dv_xname,
157 dreq));
158
159 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ANDAR,
160 sc->sc_firstdesc_pa);
161 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR,
162 AAU_ACR_AAE);
163
164 break;
165 }
166 }
167
168 /*
169 * iopaau_finish:
170 *
171 * Finish the current operation. AAU must be stopped.
172 */
173 static void
174 iopaau_finish(struct iopaau_softc *sc)
175 {
176 struct dmover_request *dreq = sc->sc_running;
177 struct iopaau_function *af =
178 dreq->dreq_assignment->das_algdesc->dad_data;
179 void *firstdesc = sc->sc_firstdesc;
180 int i, ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
181
182 sc->sc_running = NULL;
183
184 /* If the function has inputs, unmap them. */
185 for (i = 0; i < ninputs; i++) {
186 bus_dmamap_sync(sc->sc_dmat, sc->sc_map_in[i], 0,
187 sc->sc_map_in[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
188 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
189 }
190
191 /* Unload the output buffer DMA map. */
192 bus_dmamap_sync(sc->sc_dmat, sc->sc_map_out, 0,
193 sc->sc_map_out->dm_mapsize, BUS_DMASYNC_POSTREAD);
194 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
195
196 /* Get the next transfer started. */
197 iopaau_start(sc);
198
199 /* Now free descriptors for last transfer. */
200 iopaau_desc_free(af->af_desc_cache, firstdesc);
201
202 dmover_done(dreq);
203 }
204
205 /*
206 * iopaau_process:
207 *
208 * Dmover back-end entry point.
209 */
210 void
211 iopaau_process(struct dmover_backend *dmb)
212 {
213 struct iopaau_softc *sc = dmb->dmb_cookie;
214 int s;
215
216 s = splbio();
217 /* XXXLOCK */
218
219 if (sc->sc_running == NULL)
220 iopaau_start(sc);
221
222 /* XXXUNLOCK */
223 splx(s);
224 }
225
226 /*
227 * iopaau_func_fill_immed_setup:
228 *
229 * Common code shared by the zero and fillN setup routines.
230 */
231 static int
232 iopaau_func_fill_immed_setup(struct iopaau_softc *sc,
233 struct dmover_request *dreq, uint32_t immed)
234 {
235 struct iopaau_function *af =
236 dreq->dreq_assignment->das_algdesc->dad_data;
237 struct pool_cache *dc = af->af_desc_cache;
238 bus_dmamap_t dmamap = sc->sc_map_out;
239 uint32_t *prevpa;
240 struct aau_desc_4 **prevp, *cur;
241 int error, seg;
242
243 switch (dreq->dreq_outbuf_type) {
244 case DMOVER_BUF_LINEAR:
245 error = bus_dmamap_load(sc->sc_dmat, dmamap,
246 dreq->dreq_outbuf.dmbuf_linear.l_addr,
247 dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
248 BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
249 break;
250
251 case DMOVER_BUF_UIO:
252 {
253 struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
254
255 if (uio->uio_rw != UIO_READ)
256 return (EINVAL);
257
258 error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
259 uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
260 break;
261 }
262
263 default:
264 error = EINVAL;
265 }
266
267 if (__predict_false(error != 0))
268 return (error);
269
270 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
271 BUS_DMASYNC_PREREAD);
272
273 prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
274 prevpa = &sc->sc_firstdesc_pa;
275
276 cur = NULL; /* XXX: gcc */
277 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
278 cur = pool_cache_get(dc, PR_NOWAIT);
279 if (cur == NULL) {
280 *prevp = NULL;
281 error = ENOMEM;
282 goto bad;
283 }
284
285 *prevp = cur;
286 *prevpa = cur->d_pa;
287
288 prevp = &cur->d_next;
289 prevpa = &cur->d_nda;
290
291 /*
292 * We don't actually enforce the page alignment
293 * constraint, here, because there is only one
294 * data stream to worry about.
295 */
296
297 cur->d_sar[0] = immed;
298 cur->d_dar = dmamap->dm_segs[seg].ds_addr;
299 cur->d_bc = dmamap->dm_segs[seg].ds_len;
300 cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
301 SYNC_DESC(cur, sizeof(struct aau_desc_4));
302 }
303
304 *prevp = NULL;
305 *prevpa = 0;
306
307 cur->d_dc |= AAU_DC_IE;
308 SYNC_DESC(cur, sizeof(struct aau_desc_4));
309
310 sc->sc_lastdesc = cur;
311
312 return (0);
313
314 bad:
315 iopaau_desc_free(dc, sc->sc_firstdesc);
316 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
317 sc->sc_firstdesc = NULL;
318
319 return (error);
320 }
321
322 /*
323 * iopaau_func_zero_setup:
324 *
325 * Setup routine for the "zero" function.
326 */
327 int
328 iopaau_func_zero_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
329 {
330
331 return (iopaau_func_fill_immed_setup(sc, dreq, 0));
332 }
333
334 /*
335 * iopaau_func_fill8_setup:
336 *
337 * Setup routine for the "fill8" function.
338 */
339 int
340 iopaau_func_fill8_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
341 {
342
343 return (iopaau_func_fill_immed_setup(sc, dreq,
344 dreq->dreq_immediate[0] |
345 (dreq->dreq_immediate[0] << 8) |
346 (dreq->dreq_immediate[0] << 16) |
347 (dreq->dreq_immediate[0] << 24)));
348 }
349
350 /*
351 * Descriptor command words for varying numbers of inputs. For 1 input,
352 * this does a copy. For multiple inputs, we're doing an XOR. In this
353 * case, the first block is a "direct fill" to load the store queue, and
354 * the remaining blocks are XOR'd to the store queue.
355 */
356 static const uint32_t iopaau_dc_inputs[] = {
357 0, /* 0 */
358
359 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL), /* 1 */
360
361 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)| /* 2 */
362 AAU_DC_B2_CC(AAU_DC_CC_XOR),
363
364 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)| /* 3 */
365 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
366 AAU_DC_B3_CC(AAU_DC_CC_XOR),
367
368 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)| /* 4 */
369 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
370 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
371 AAU_DC_B4_CC(AAU_DC_CC_XOR),
372
373 AAU_DC_SBCI_5_8| /* 5 */
374 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
375 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
376 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
377 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
378 AAU_DC_B5_CC(AAU_DC_CC_XOR),
379
380 AAU_DC_SBCI_5_8| /* 6 */
381 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
382 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
383 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
384 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
385 AAU_DC_B5_CC(AAU_DC_CC_XOR)|
386 AAU_DC_B6_CC(AAU_DC_CC_XOR),
387
388 AAU_DC_SBCI_5_8| /* 7 */
389 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
390 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
391 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
392 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
393 AAU_DC_B5_CC(AAU_DC_CC_XOR)|
394 AAU_DC_B6_CC(AAU_DC_CC_XOR)|
395 AAU_DC_B7_CC(AAU_DC_CC_XOR),
396
397 AAU_DC_SBCI_5_8| /* 8 */
398 AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|
399 AAU_DC_B2_CC(AAU_DC_CC_XOR)|
400 AAU_DC_B3_CC(AAU_DC_CC_XOR)|
401 AAU_DC_B4_CC(AAU_DC_CC_XOR)|
402 AAU_DC_B5_CC(AAU_DC_CC_XOR)|
403 AAU_DC_B6_CC(AAU_DC_CC_XOR)|
404 AAU_DC_B7_CC(AAU_DC_CC_XOR)|
405 AAU_DC_B8_CC(AAU_DC_CC_XOR),
406 };
407
408 /*
409 * iopaau_func_xor_setup:
410 *
411 * Setup routine for the "copy", "xor2".."xor8" functions.
412 */
413 int
414 iopaau_func_xor_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
415 {
416 struct iopaau_function *af =
417 dreq->dreq_assignment->das_algdesc->dad_data;
418 struct pool_cache *dc = af->af_desc_cache;
419 bus_dmamap_t dmamap = sc->sc_map_out;
420 bus_dmamap_t *inmap = sc->sc_map_in;
421 uint32_t *prevpa;
422 struct aau_desc_8 **prevp, *cur;
423 int ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
424 int i, error, seg;
425 size_t descsz = AAU_DESC_SIZE(ninputs);
426
427 KASSERT(ninputs <= AAU_MAX_INPUTS);
428
429 switch (dreq->dreq_outbuf_type) {
430 case DMOVER_BUF_LINEAR:
431 error = bus_dmamap_load(sc->sc_dmat, dmamap,
432 dreq->dreq_outbuf.dmbuf_linear.l_addr,
433 dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
434 BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
435 break;
436
437 case DMOVER_BUF_UIO:
438 {
439 struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
440
441 if (uio->uio_rw != UIO_READ)
442 return (EINVAL);
443
444 error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
445 uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
446 break;
447 }
448
449 default:
450 error = EINVAL;
451 }
452
453 if (__predict_false(error != 0))
454 return (error);
455
456 switch (dreq->dreq_inbuf_type) {
457 case DMOVER_BUF_LINEAR:
458 for (i = 0; i < ninputs; i++) {
459 error = bus_dmamap_load(sc->sc_dmat, inmap[i],
460 dreq->dreq_inbuf[i].dmbuf_linear.l_addr,
461 dreq->dreq_inbuf[i].dmbuf_linear.l_len, NULL,
462 BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
463 if (__predict_false(error != 0))
464 break;
465 if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
466 error = EFAULT; /* "address error", sort of. */
467 bus_dmamap_unload(sc->sc_dmat, inmap[i]);
468 break;
469 }
470 }
471 break;
472
473 case DMOVER_BUF_UIO:
474 {
475 struct uio *uio;
476
477 for (i = 0; i < ninputs; i++) {
478 uio = dreq->dreq_inbuf[i].dmbuf_uio;
479
480 if (uio->uio_rw != UIO_WRITE) {
481 error = EINVAL;
482 break;
483 }
484
485 error = bus_dmamap_load_uio(sc->sc_dmat, inmap[i], uio,
486 BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
487 if (__predict_false(error != 0)) {
488 break;
489 }
490 if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
491 error = EFAULT; /* "address error", sort of. */
492 bus_dmamap_unload(sc->sc_dmat, inmap[i]);
493 break;
494 }
495 }
496 break;
497 }
498
499 default:
500 i = 0; /* XXX: gcc */
501 error = EINVAL;
502 }
503
504 if (__predict_false(error != 0)) {
505 for (--i; i >= 0; i--)
506 bus_dmamap_unload(sc->sc_dmat, inmap[i]);
507 bus_dmamap_unload(sc->sc_dmat, dmamap);
508 return (error);
509 }
510
511 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
512 BUS_DMASYNC_PREREAD);
513 for (i = 0; i < ninputs; i++) {
514 bus_dmamap_sync(sc->sc_dmat, inmap[i], 0, inmap[i]->dm_mapsize,
515 BUS_DMASYNC_PREWRITE);
516 }
517
518 prevp = (struct aau_desc_8 **) &sc->sc_firstdesc;
519 prevpa = &sc->sc_firstdesc_pa;
520
521 cur = NULL; /* XXX: gcc */
522 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
523 cur = pool_cache_get(dc, PR_NOWAIT);
524 if (cur == NULL) {
525 *prevp = NULL;
526 error = ENOMEM;
527 goto bad;
528 }
529
530 *prevp = cur;
531 *prevpa = cur->d_pa;
532
533 prevp = &cur->d_next;
534 prevpa = &cur->d_nda;
535
536 for (i = 0; i < ninputs; i++) {
537 if (dmamap->dm_segs[seg].ds_len !=
538 inmap[i]->dm_segs[seg].ds_len) {
539 *prevp = NULL;
540 error = EFAULT; /* "address" error, sort of. */
541 goto bad;
542 }
543 if (i < 4) {
544 cur->d_sar[i] =
545 inmap[i]->dm_segs[seg].ds_addr;
546 } else if (i < 8) {
547 cur->d_sar5_8[i - 4] =
548 inmap[i]->dm_segs[seg].ds_addr;
549 }
550 }
551 cur->d_dar = dmamap->dm_segs[seg].ds_addr;
552 cur->d_bc = dmamap->dm_segs[seg].ds_len;
553 cur->d_dc = iopaau_dc_inputs[ninputs] | AAU_DC_DWE;
554 SYNC_DESC(cur, descsz);
555 }
556
557 *prevp = NULL;
558 *prevpa = 0;
559
560 cur->d_dc |= AAU_DC_IE;
561 SYNC_DESC(cur, descsz);
562
563 sc->sc_lastdesc = cur;
564
565 return (0);
566
567 bad:
568 iopaau_desc_free(dc, sc->sc_firstdesc);
569 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
570 for (i = 0; i < ninputs; i++)
571 bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
572 sc->sc_firstdesc = NULL;
573
574 return (error);
575 }
576
577 int
578 iopaau_intr(void *arg)
579 {
580 struct iopaau_softc *sc = arg;
581 struct dmover_request *dreq;
582 uint32_t asr;
583
584 /* Clear the interrupt. */
585 asr = bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR);
586 if (asr == 0)
587 return (0);
588 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ASR, asr);
589
590 /* XXX -- why does this happen? */
591 if (sc->sc_running == NULL) {
592 printf("%s: unexpected interrupt, ASR = 0x%08x\n",
593 sc->sc_dev.dv_xname, asr);
594 return (1);
595 }
596 dreq = sc->sc_running;
597
598 /* Stop the AAU. */
599 bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR, 0);
600
601 DPRINTF(("%s: got interrupt for dreq %p\n", sc->sc_dev.dv_xname,
602 dreq));
603
604 if (__predict_false((asr & AAU_ASR_ETIF) != 0)) {
605 /*
606 * We expect to get end-of-chain interrupts, not
607 * end-of-transfer interrupts, so panic if we get
608 * one of these.
609 */
610 panic("aau_intr: got EOT interrupt");
611 }
612
613 if (__predict_false((asr & AAU_ASR_MA) != 0)) {
614 printf("%s: WARNING: got master abort\n", sc->sc_dev.dv_xname);
615 dreq->dreq_flags |= DMOVER_REQ_ERROR;
616 dreq->dreq_error = EFAULT;
617 }
618
619 /* Finish this transfer, start next one. */
620 iopaau_finish(sc);
621
622 return (1);
623 }
624
625 void
626 iopaau_attach(struct iopaau_softc *sc)
627 {
628 int error, i;
629
630 error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER, AAU_MAX_SEGS,
631 AAU_MAX_XFER, AAU_IO_BOUNDARY, 0, &sc->sc_map_out);
632 if (error) {
633 aprint_error(
634 "%s: unable to create output DMA map, error = %d\n",
635 sc->sc_dev.dv_xname, error);
636 return;
637 }
638
639 for (i = 0; i < AAU_MAX_INPUTS; i++) {
640 error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER,
641 AAU_MAX_SEGS, AAU_MAX_XFER, AAU_IO_BOUNDARY, 0,
642 &sc->sc_map_in[i]);
643 if (error) {
644 aprint_error("%s: unable to create input %d DMA map, "
645 "error = %d\n", sc->sc_dev.dv_xname, i, error);
646 return;
647 }
648 }
649
650 /*
651 * Initialize global resources. Ok to do here, since there's
652 * only one AAU.
653 */
654 iopaau_desc_4_cache = pool_cache_init(sizeof(struct aau_desc_4),
655 8 * 4, offsetof(struct aau_desc_4, d_nda), 0, "aaud4pl",
656 NULL, IPL_VM, iopaau_desc_ctor, NULL, NULL);
657 aau_desc_8_cahe = pool_cache_init(sizeof(struct aau_desc_8),
658 8 * 4, offsetof(struct aau_desc_8, d_nda), 0, "aaud8pl",
659 NULL, IPL_VM, iopaau_desc_ctor, NULL, NULL);
660
661 /* Register us with dmover. */
662 dmover_backend_register(&sc->sc_dmb);
663 }
664