Home | History | Annotate | Line # | Download | only in xscale
iopaau.c revision 1.6
      1 /*	$NetBSD: iopaau.c,v 1.6 2002/08/03 21:58:55 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Common code for XScale-based I/O Processor Application Accelerator
     40  * Unit support.
     41  *
     42  * The AAU provides a back-end for the dmover(9) facility.
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: iopaau.c,v 1.6 2002/08/03 21:58:55 thorpej Exp $");
     47 
     48 #include <sys/param.h>
     49 #include <sys/pool.h>
     50 #include <sys/lock.h>
     51 #include <sys/systm.h>
     52 #include <sys/device.h>
     53 #include <sys/uio.h>
     54 
     55 #include <uvm/uvm.h>
     56 
     57 #include <machine/bus.h>
     58 
     59 #include <arm/xscale/iopaaureg.h>
     60 #include <arm/xscale/iopaauvar.h>
     61 
     62 #ifdef AAU_DEBUG
     63 #define	DPRINTF(x)	printf x
     64 #else
     65 #define	DPRINTF(x)	/* nothing */
     66 #endif
     67 
     68 static struct pool aau_desc_4_pool;
     69 
     70 struct pool_cache iopaau_desc_4_cache;
     71 
     72 /*
     73  * iopaau_desc_ctor:
     74  *
     75  *	Constructor for all types of descriptors.
     76  */
     77 static int
     78 iopaau_desc_ctor(void *arg, void *object, int flags)
     79 {
     80 	struct aau_desc_4 *d = object;
     81 
     82 	/*
     83 	 * Cache the physical address of the hardware portion of
     84 	 * the descriptor in the software portion of the descriptor
     85 	 * for quick reference later.
     86 	 */
     87 	d->d_pa = vtophys(d) + SYNC_DESC_4_OFFSET;
     88 	KASSERT((d->d_pa & 31) == 0);
     89 	return (0);
     90 }
     91 
     92 /*
     93  * iopaau_desc_free:
     94  *
     95  *	Free a chain of AAU descriptors.
     96  */
     97 void
     98 iopaau_desc_free(struct pool_cache *dc, void *firstdesc)
     99 {
    100 	struct aau_desc_4 *d, *next;
    101 
    102 	for (d = firstdesc; d != NULL; d = next) {
    103 		next = d->d_next;
    104 		pool_cache_put(dc, d);
    105 	}
    106 }
    107 
    108 /*
    109  * iopaau_start:
    110  *
    111  *	Start an AAU request.  Must be called at splbio().
    112  */
    113 static void
    114 iopaau_start(struct iopaau_softc *sc)
    115 {
    116 	struct dmover_backend *dmb = &sc->sc_dmb;
    117 	struct dmover_request *dreq;
    118 	struct iopaau_function *af;
    119 	int error;
    120 
    121 	for (;;) {
    122 
    123 		KASSERT(sc->sc_running == NULL);
    124 
    125 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
    126 		if (dreq == NULL)
    127 			return;
    128 
    129 		dmover_backend_remque(dmb, dreq);
    130 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
    131 
    132 		sc->sc_running = dreq;
    133 
    134 		/* XXXUNLOCK */
    135 
    136 		af = dreq->dreq_assignment->das_algdesc->dad_data;
    137 		error = (*af->af_setup)(sc, dreq);
    138 
    139 		/* XXXLOCK */
    140 
    141 		if (error) {
    142 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
    143 			dreq->dreq_error = error;
    144 			sc->sc_running = NULL;
    145 			/* XXXUNLOCK */
    146 			dmover_done(dreq);
    147 			/* XXXLOCK */
    148 			continue;
    149 		}
    150 
    151 #ifdef DIAGNOSTIC
    152 		if (bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR) &
    153 		    AAU_ASR_AAF)
    154 			panic("iopaau_start: AAU already active");
    155 #endif
    156 
    157 		DPRINTF(("%s: starting dreq %p\n", sc->sc_dev.dv_xname,
    158 		    dreq));
    159 
    160 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ANDAR,
    161 		    sc->sc_firstdesc_pa);
    162 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR,
    163 		    AAU_ACR_AAE);
    164 
    165 		break;
    166 	}
    167 }
    168 
    169 /*
    170  * iopaau_finish:
    171  *
    172  *	Finish the current operation.  AAU must be stopped.
    173  */
    174 static void
    175 iopaau_finish(struct iopaau_softc *sc)
    176 {
    177 	struct dmover_request *dreq = sc->sc_running;
    178 	struct iopaau_function *af =
    179 	    dreq->dreq_assignment->das_algdesc->dad_data;
    180 	void *firstdesc = sc->sc_firstdesc;
    181 	int i, ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    182 
    183 	sc->sc_running = NULL;
    184 
    185 	/* If the function has inputs, unmap them. */
    186 	for (i = 0; i < ninputs; i++) {
    187 		bus_dmamap_sync(sc->sc_dmat, sc->sc_map_in[i], 0,
    188 		    sc->sc_map_in[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    189 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
    190 	}
    191 
    192 	/* Unload the output buffer DMA map. */
    193 	bus_dmamap_sync(sc->sc_dmat, sc->sc_map_out, 0,
    194 	    sc->sc_map_out->dm_mapsize, BUS_DMASYNC_POSTREAD);
    195 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    196 
    197 	/* Get the next transfer started. */
    198 	iopaau_start(sc);
    199 
    200 	/* Now free descriptors for last transfer. */
    201 	iopaau_desc_free(af->af_desc_cache, firstdesc);
    202 
    203 	dmover_done(dreq);
    204 }
    205 
    206 /*
    207  * iopaau_process:
    208  *
    209  *	Dmover back-end entry point.
    210  */
    211 void
    212 iopaau_process(struct dmover_backend *dmb)
    213 {
    214 	struct iopaau_softc *sc = dmb->dmb_cookie;
    215 	int s;
    216 
    217 	s = splbio();
    218 	/* XXXLOCK */
    219 
    220 	if (sc->sc_running == NULL)
    221 		iopaau_start(sc);
    222 
    223 	/* XXXUNLOCK */
    224 	splx(s);
    225 }
    226 
    227 /*
    228  * iopaau_func_fill_immed_setup:
    229  *
    230  *	Common code shared by the zero and fillN setup routines.
    231  */
    232 static int
    233 iopaau_func_fill_immed_setup(struct iopaau_softc *sc,
    234     struct dmover_request *dreq, uint32_t immed)
    235 {
    236 	struct iopaau_function *af =
    237 	    dreq->dreq_assignment->das_algdesc->dad_data;
    238 	struct pool_cache *dc = af->af_desc_cache;
    239 	bus_dmamap_t dmamap = sc->sc_map_out;
    240 	uint32_t *prevpa;
    241 	struct aau_desc_4 **prevp, *cur;
    242 	int error, seg;
    243 
    244 	switch (dreq->dreq_outbuf_type) {
    245 	case DMOVER_BUF_LINEAR:
    246 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    247 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
    248 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
    249 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    250 		break;
    251 
    252 	case DMOVER_BUF_UIO:
    253 	    {
    254 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
    255 
    256 		if (uio->uio_rw != UIO_READ)
    257 			return (EINVAL);
    258 
    259 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    260 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    261 		break;
    262 	    }
    263 	}
    264 
    265 	if (__predict_false(error != 0))
    266 		return (error);
    267 
    268 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    269 	    BUS_DMASYNC_PREREAD);
    270 
    271 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
    272 	prevpa = &sc->sc_firstdesc_pa;
    273 
    274 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    275 		cur = pool_cache_get(dc, PR_NOWAIT);
    276 		if (cur == NULL) {
    277 			*prevp = NULL;
    278 			error = ENOMEM;
    279 			goto bad;
    280 		}
    281 
    282 		*prevp = cur;
    283 		*prevpa = cur->d_pa;
    284 
    285 		prevp = &cur->d_next;
    286 		prevpa = &cur->d_nda;
    287 
    288 		/*
    289 		 * We don't actually enforce the page alignment
    290 		 * constraint, here, because there is only one
    291 		 * data stream to worry about.
    292 		 */
    293 
    294 		cur->d_sar[0] = immed;
    295 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
    296 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
    297 		cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
    298 		SYNC_DESC(cur, sizeof(struct aau_desc_4));
    299 	}
    300 
    301 	*prevp = NULL;
    302 	*prevpa = 0;
    303 
    304 	cur->d_dc |= AAU_DC_IE;
    305 	SYNC_DESC(cur, sizeof(struct aau_desc_4));
    306 
    307 	sc->sc_lastdesc = cur;
    308 
    309 	return (0);
    310 
    311  bad:
    312 	iopaau_desc_free(dc, sc->sc_firstdesc);
    313 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    314 	sc->sc_firstdesc = NULL;
    315 
    316 	return (error);
    317 }
    318 
    319 /*
    320  * iopaau_func_zero_setup:
    321  *
    322  *	Setup routine for the "zero" function.
    323  */
    324 int
    325 iopaau_func_zero_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    326 {
    327 
    328 	return (iopaau_func_fill_immed_setup(sc, dreq, 0));
    329 }
    330 
    331 /*
    332  * iopaau_func_fill8_setup:
    333  *
    334  *	Setup routine for the "fill8" function.
    335  */
    336 int
    337 iopaau_func_fill8_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    338 {
    339 
    340 	return (iopaau_func_fill_immed_setup(sc, dreq,
    341 	    dreq->dreq_immediate[0] |
    342 	    (dreq->dreq_immediate[0] << 8) |
    343 	    (dreq->dreq_immediate[0] << 16) |
    344 	    (dreq->dreq_immediate[0] << 24)));
    345 }
    346 
    347 /*
    348  * Descriptor command words for varying numbers of inputs.  For 1 input,
    349  * this does a copy.  For multiple inputs, we're doing an XOR.  In this
    350  * case, the first block is a "direct fill" to load the store queue, and
    351  * the remaining blocks are XOR'd to the store queue.
    352  */
    353 static const uint32_t iopaau_dc_inputs[] = {
    354 	0,						/* 0 */
    355 
    356 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL),		/* 1 */
    357 
    358 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 2 */
    359 	AAU_DC_B2_CC(AAU_DC_CC_XOR),
    360 
    361 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 3 */
    362 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
    363 	AAU_DC_B3_CC(AAU_DC_CC_XOR),
    364 
    365 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 4 */
    366 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
    367 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
    368 	AAU_DC_B4_CC(AAU_DC_CC_XOR),
    369 };
    370 
    371 /*
    372  * iopaau_func_xor_1_4_setup:
    373  *
    374  *	Setup routine for the "copy", "xor2".."xor4" functions.
    375  */
    376 int
    377 iopaau_func_xor_1_4_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    378 {
    379 	struct iopaau_function *af =
    380 	    dreq->dreq_assignment->das_algdesc->dad_data;
    381 	struct pool_cache *dc = af->af_desc_cache;
    382 	bus_dmamap_t dmamap = sc->sc_map_out;
    383 	bus_dmamap_t *inmap = sc->sc_map_in;
    384 	uint32_t *prevpa;
    385 	struct aau_desc_4 **prevp, *cur;
    386 	int ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    387 	int i, error, seg;
    388 	size_t descsz = AAU_DESC_SIZE(ninputs);
    389 
    390 	KASSERT(ninputs <= AAU_MAX_INPUTS);
    391 
    392 	switch (dreq->dreq_outbuf_type) {
    393 	case DMOVER_BUF_LINEAR:
    394 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    395 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
    396 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
    397 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    398 		break;
    399 
    400 	case DMOVER_BUF_UIO:
    401 	    {
    402 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
    403 
    404 		if (uio->uio_rw != UIO_READ)
    405 			return (EINVAL);
    406 
    407 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    408 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    409 		break;
    410 	    }
    411 	}
    412 
    413 	if (__predict_false(error != 0))
    414 		return (error);
    415 
    416 	switch (dreq->dreq_inbuf_type) {
    417 	case DMOVER_BUF_LINEAR:
    418 		for (i = 0; i < ninputs; i++) {
    419 			error = bus_dmamap_load(sc->sc_dmat, inmap[i],
    420 			    dreq->dreq_inbuf[i].dmbuf_linear.l_addr,
    421 			    dreq->dreq_inbuf[i].dmbuf_linear.l_len, NULL,
    422 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    423 			if (__predict_false(error != 0))
    424 				break;
    425 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
    426 				error = EFAULT;	/* "address error", sort of. */
    427 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
    428 				break;
    429 			}
    430 		}
    431 		break;
    432 
    433 	 case DMOVER_BUF_UIO:
    434 	     {
    435 		struct uio *uio;
    436 
    437 		for (i = 0; i < ninputs; i++) {
    438 			uio = dreq->dreq_inbuf[i].dmbuf_uio;
    439 
    440 			if (uio->uio_rw != UIO_WRITE) {
    441 				error = EINVAL;
    442 				break;
    443 			}
    444 
    445 			error = bus_dmamap_load_uio(sc->sc_dmat, inmap[i], uio,
    446 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    447 			if (__predict_false(error != 0)) {
    448 				break;
    449 			}
    450 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
    451 				error = EFAULT;	/* "address error", sort of. */
    452 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
    453 				break;
    454 			}
    455 		}
    456 		break;
    457 	    }
    458 	}
    459 
    460 	if (__predict_false(error != 0)) {
    461 		for (--i; i >= 0; i--)
    462 			bus_dmamap_unload(sc->sc_dmat, inmap[i]);
    463 		bus_dmamap_unload(sc->sc_dmat, dmamap);
    464 		return (error);
    465 	}
    466 
    467 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    468 	    BUS_DMASYNC_PREREAD);
    469 	for (i = 0; i < ninputs; i++) {
    470 		bus_dmamap_sync(sc->sc_dmat, inmap[i], 0, inmap[i]->dm_mapsize,
    471 		    BUS_DMASYNC_PREWRITE);
    472 	}
    473 
    474 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
    475 	prevpa = &sc->sc_firstdesc_pa;
    476 
    477 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    478 		cur = pool_cache_get(dc, PR_NOWAIT);
    479 		if (cur == NULL) {
    480 			*prevp = NULL;
    481 			error = ENOMEM;
    482 			goto bad;
    483 		}
    484 
    485 		*prevp = cur;
    486 		*prevpa = cur->d_pa;
    487 
    488 		prevp = &cur->d_next;
    489 		prevpa = &cur->d_nda;
    490 
    491 		for (i = 0; i < ninputs; i++) {
    492 			if (dmamap->dm_segs[seg].ds_len !=
    493 			    inmap[i]->dm_segs[seg].ds_len) {
    494 				*prevp = NULL;
    495 				error = EFAULT;	/* "address" error, sort of. */
    496 				goto bad;
    497 			}
    498 			cur->d_sar[i] = inmap[i]->dm_segs[seg].ds_addr;
    499 		}
    500 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
    501 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
    502 		cur->d_dc = iopaau_dc_inputs[ninputs] | AAU_DC_DWE;
    503 		SYNC_DESC(cur, descsz);
    504 	}
    505 
    506 	*prevp = NULL;
    507 	*prevpa = 0;
    508 
    509 	cur->d_dc |= AAU_DC_IE;
    510 	SYNC_DESC(cur, descsz);
    511 
    512 	sc->sc_lastdesc = cur;
    513 
    514 	return (0);
    515 
    516  bad:
    517 	iopaau_desc_free(dc, sc->sc_firstdesc);
    518 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    519 	for (i = 0; i < ninputs; i++)
    520 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
    521 	sc->sc_firstdesc = NULL;
    522 
    523 	return (error);
    524 }
    525 
    526 int
    527 iopaau_intr(void *arg)
    528 {
    529 	struct iopaau_softc *sc = arg;
    530 	struct dmover_request *dreq;
    531 	uint32_t asr;
    532 
    533 	/* Clear the interrupt. */
    534 	asr = bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR);
    535 	if (asr == 0)
    536 		return (0);
    537 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ASR, asr);
    538 
    539 	/* XXX -- why does this happen? */
    540 	if (sc->sc_running == NULL) {
    541 		printf("%s: unexpected interrupt, ASR = 0x%08x\n",
    542 		    sc->sc_dev.dv_xname, asr);
    543 		return (1);
    544 	}
    545 	dreq = sc->sc_running;
    546 
    547 	/* Stop the AAU. */
    548 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR, 0);
    549 
    550 	DPRINTF(("%s: got interrupt for dreq %p\n", sc->sc_dev.dv_xname,
    551 	    dreq));
    552 
    553 	if (__predict_false((asr & AAU_ASR_ETIF) != 0)) {
    554 		/*
    555 		 * We expect to get end-of-chain interrupts, not
    556 		 * end-of-transfer interrupts, so panic if we get
    557 		 * one of these.
    558 		 */
    559 		panic("aau_intr: got EOT interrupt");
    560 	}
    561 
    562 	if (__predict_false((asr & AAU_ASR_MA) != 0)) {
    563 		printf("%s: WARNING: got master abort\n", sc->sc_dev.dv_xname);
    564 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
    565 		dreq->dreq_error = EFAULT;
    566 	}
    567 
    568 	/* Finish this transfer, start next one. */
    569 	iopaau_finish(sc);
    570 
    571 	return (1);
    572 }
    573 
    574 void
    575 iopaau_attach(struct iopaau_softc *sc)
    576 {
    577 	int error, i;
    578 
    579 	error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER, AAU_MAX_SEGS,
    580 	    AAU_MAX_XFER, AAU_IO_BOUNDARY, 0, &sc->sc_map_out);
    581 	if (error) {
    582 		printf("%s: unable to create output DMA map, error = %d\n",
    583 		    sc->sc_dev.dv_xname, error);
    584 		return;
    585 	}
    586 
    587 	for (i = 0; i < AAU_MAX_INPUTS; i++) {
    588 		error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER,
    589 		    AAU_MAX_SEGS, AAU_MAX_XFER, AAU_IO_BOUNDARY, 0,
    590 		    &sc->sc_map_in[i]);
    591 		if (error) {
    592 			printf("%s: unable to create input %d DMA map, "
    593 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    594 			return;
    595 		}
    596 	}
    597 
    598 	/*
    599 	 * Initialize global resources.  Ok to do here, since there's
    600 	 * only one AAU.
    601 	 */
    602 	pool_init(&aau_desc_4_pool, sizeof(struct aau_desc_4),
    603 	    8 * 4, offsetof(struct aau_desc_4, d_nda), 0, "aaud4pl",
    604 	    NULL);
    605 	pool_cache_init(&iopaau_desc_4_cache, &aau_desc_4_pool,
    606 	    iopaau_desc_ctor, NULL, NULL);
    607 
    608 	/* Register us with dmover. */
    609 	dmover_backend_register(&sc->sc_dmb);
    610 }
    611