Home | History | Annotate | Line # | Download | only in xscale
iopaau.c revision 1.5
      1 /*	$NetBSD: iopaau.c,v 1.5 2002/08/03 21:31:16 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Common code for XScale-based I/O Processor Application Accelerator
     40  * Unit support.
     41  *
     42  * The AAU provides a back-end for the dmover(9) facility.
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: iopaau.c,v 1.5 2002/08/03 21:31:16 thorpej Exp $");
     47 
     48 #include <sys/param.h>
     49 #include <sys/pool.h>
     50 #include <sys/lock.h>
     51 #include <sys/systm.h>
     52 #include <sys/device.h>
     53 #include <sys/uio.h>
     54 
     55 #include <uvm/uvm.h>
     56 
     57 #include <machine/bus.h>
     58 
     59 #include <arm/xscale/iopaaureg.h>
     60 #include <arm/xscale/iopaauvar.h>
     61 
     62 #ifdef AAU_DEBUG
     63 #define	DPRINTF(x)	printf x
     64 #else
     65 #define	DPRINTF(x)	/* nothing */
     66 #endif
     67 
     68 static struct pool aau_desc_4_pool;
     69 
     70 struct pool_cache iopaau_desc_4_cache;
     71 
     72 /*
     73  * iopaau_desc_ctor:
     74  *
     75  *	Constructor for all types of descriptors.
     76  */
     77 static int
     78 iopaau_desc_ctor(void *arg, void *object, int flags)
     79 {
     80 	struct aau_desc_4 *d = object;
     81 
     82 	/*
     83 	 * Cache the physical address of the hardware portion of
     84 	 * the descriptor in the software portion of the descriptor
     85 	 * for quick reference later.
     86 	 */
     87 	d->d_pa = vtophys(d) + SYNC_DESC_4_OFFSET;
     88 	KASSERT((d->d_pa & 31) == 0);
     89 	return (0);
     90 }
     91 
     92 /*
     93  * iopaau_desc_free:
     94  *
     95  *	Free a chain of AAU descriptors.
     96  */
     97 void
     98 iopaau_desc_free(struct pool_cache *dc, void *firstdesc)
     99 {
    100 	struct aau_desc_4 *d, *next;
    101 
    102 	for (d = firstdesc; d != NULL; d = next) {
    103 		next = d->d_next;
    104 		pool_cache_put(dc, d);
    105 	}
    106 }
    107 
    108 /*
    109  * iopaau_start:
    110  *
    111  *	Start an AAU request.  Must be called at splbio().
    112  */
    113 static void
    114 iopaau_start(struct iopaau_softc *sc)
    115 {
    116 	struct dmover_backend *dmb = &sc->sc_dmb;
    117 	struct dmover_request *dreq;
    118 	struct iopaau_function *af;
    119 	int error;
    120 
    121 	for (;;) {
    122 
    123 		KASSERT(sc->sc_running == NULL);
    124 
    125 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
    126 		if (dreq == NULL)
    127 			return;
    128 
    129 		dmover_backend_remque(dmb, dreq);
    130 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
    131 
    132 		sc->sc_running = dreq;
    133 
    134 		/* XXXUNLOCK */
    135 
    136 		af = dreq->dreq_assignment->das_algdesc->dad_data;
    137 		error = (*af->af_setup)(sc, dreq);
    138 
    139 		/* XXXLOCK */
    140 
    141 		if (error) {
    142 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
    143 			dreq->dreq_error = error;
    144 			sc->sc_running = NULL;
    145 			/* XXXUNLOCK */
    146 			dmover_done(dreq);
    147 			/* XXXLOCK */
    148 			continue;
    149 		}
    150 
    151 #ifdef DIAGNOSTIC
    152 		if (bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR) &
    153 		    AAU_ASR_AAF)
    154 			panic("iopaau_start: AAU already active");
    155 #endif
    156 
    157 		DPRINTF(("%s: starting dreq %p\n", sc->sc_dev.dv_xname,
    158 		    dreq));
    159 
    160 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ANDAR,
    161 		    sc->sc_firstdesc_pa);
    162 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR,
    163 		    AAU_ACR_AAE);
    164 
    165 		break;
    166 	}
    167 }
    168 
    169 /*
    170  * iopaau_finish:
    171  *
    172  *	Finish the current operation.  AAU must be stopped.
    173  */
    174 static void
    175 iopaau_finish(struct iopaau_softc *sc)
    176 {
    177 	struct dmover_request *dreq = sc->sc_running;
    178 	struct iopaau_function *af =
    179 	    dreq->dreq_assignment->das_algdesc->dad_data;
    180 	void *firstdesc = sc->sc_firstdesc;
    181 	int i, ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    182 
    183 	sc->sc_running = NULL;
    184 
    185 	/* If the function has inputs, unmap them. */
    186 	for (i = 0; i < ninputs; i++) {
    187 		bus_dmamap_sync(sc->sc_dmat, sc->sc_map_in[i], 0,
    188 		    sc->sc_map_in[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    189 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
    190 	}
    191 
    192 	/* Unload the output buffer DMA map. */
    193 	bus_dmamap_sync(sc->sc_dmat, sc->sc_map_out, 0,
    194 	    sc->sc_map_out->dm_mapsize, BUS_DMASYNC_POSTREAD);
    195 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    196 
    197 	/* Get the next transfer started. */
    198 	iopaau_start(sc);
    199 
    200 	/* Now free descriptors for last transfer. */
    201 	iopaau_desc_free(af->af_desc_cache, firstdesc);
    202 
    203 	dmover_done(dreq);
    204 }
    205 
    206 /*
    207  * iopaau_process:
    208  *
    209  *	Dmover back-end entry point.
    210  */
    211 void
    212 iopaau_process(struct dmover_backend *dmb)
    213 {
    214 	struct iopaau_softc *sc = dmb->dmb_cookie;
    215 	int s;
    216 
    217 	s = splbio();
    218 	/* XXXLOCK */
    219 
    220 	if (sc->sc_running == NULL)
    221 		iopaau_start(sc);
    222 
    223 	/* XXXUNLOCK */
    224 	splx(s);
    225 }
    226 
    227 /*
    228  * iopaau_func_fill_immed_setup:
    229  *
    230  *	Common code shared by the zero and fillN setup routines.
    231  */
    232 static int
    233 iopaau_func_fill_immed_setup(struct iopaau_softc *sc,
    234     struct dmover_request *dreq, uint32_t immed)
    235 {
    236 	struct iopaau_function *af =
    237 	    dreq->dreq_assignment->das_algdesc->dad_data;
    238 	struct pool_cache *dc = af->af_desc_cache;
    239 	bus_dmamap_t dmamap = sc->sc_map_out;
    240 	uint32_t *prevpa;
    241 	struct aau_desc_4 **prevp, *cur;
    242 	int error, seg;
    243 
    244 	switch (dreq->dreq_outbuf_type) {
    245 	case DMOVER_BUF_LINEAR:
    246 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    247 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
    248 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
    249 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    250 		break;
    251 
    252 	case DMOVER_BUF_UIO:
    253 	    {
    254 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
    255 
    256 		if (uio->uio_rw != UIO_READ)
    257 			return (EINVAL);
    258 
    259 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    260 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    261 		break;
    262 	    }
    263 	}
    264 
    265 	if (__predict_false(error != 0))
    266 		return (error);
    267 
    268 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    269 	    BUS_DMASYNC_PREREAD);
    270 
    271 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
    272 	prevpa = &sc->sc_firstdesc_pa;
    273 
    274 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    275 		cur = pool_cache_get(dc, PR_NOWAIT);
    276 		if (cur == NULL) {
    277 			*prevp = NULL;
    278 			error = ENOMEM;
    279 			goto bad;
    280 		}
    281 
    282 		*prevp = cur;
    283 		*prevpa = cur->d_pa;
    284 
    285 		prevp = &cur->d_next;
    286 		prevpa = &cur->d_nda;
    287 
    288 		/*
    289 		 * We don't actually enforce the page alignment
    290 		 * constraint, here, because there is only one
    291 		 * data stream to worry about.
    292 		 */
    293 
    294 		cur->d_sar[0] = immed;
    295 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
    296 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
    297 		cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
    298 		SYNC_DESC_4(cur);
    299 	}
    300 
    301 	*prevp = NULL;
    302 	*prevpa = 0;
    303 
    304 	cur->d_dc |= AAU_DC_IE;
    305 	SYNC_DESC_4(cur);
    306 
    307 	sc->sc_lastdesc = cur;
    308 
    309 	return (0);
    310 
    311  bad:
    312 	iopaau_desc_free(dc, sc->sc_firstdesc);
    313 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    314 	sc->sc_firstdesc = NULL;
    315 
    316 	return (error);
    317 }
    318 
    319 /*
    320  * iopaau_func_zero_setup:
    321  *
    322  *	Setup routine for the "zero" function.
    323  */
    324 int
    325 iopaau_func_zero_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    326 {
    327 
    328 	return (iopaau_func_fill_immed_setup(sc, dreq, 0));
    329 }
    330 
    331 /*
    332  * iopaau_func_fill8_setup:
    333  *
    334  *	Setup routine for the "fill8" function.
    335  */
    336 int
    337 iopaau_func_fill8_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    338 {
    339 
    340 	return (iopaau_func_fill_immed_setup(sc, dreq,
    341 	    dreq->dreq_immediate[0] |
    342 	    (dreq->dreq_immediate[0] << 8) |
    343 	    (dreq->dreq_immediate[0] << 16) |
    344 	    (dreq->dreq_immediate[0] << 24)));
    345 }
    346 
    347 /*
    348  * Descriptor command words for varying numbers of inputs.  For 1 input,
    349  * this does a copy.  For multiple inputs, we're doing an XOR.  In this
    350  * case, the first block is a "direct fill" to load the store queue, and
    351  * the remaining blocks are XOR'd to the store queue.
    352  */
    353 static const uint32_t iopaau_dc_inputs[] = {
    354 	0,						/* 0 */
    355 
    356 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL),		/* 1 */
    357 
    358 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 2 */
    359 	AAU_DC_B2_CC(AAU_DC_CC_XOR),
    360 
    361 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 3 */
    362 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
    363 	AAU_DC_B3_CC(AAU_DC_CC_XOR),
    364 
    365 	AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL)|		/* 4 */
    366 	AAU_DC_B2_CC(AAU_DC_CC_XOR)|
    367 	AAU_DC_B3_CC(AAU_DC_CC_XOR)|
    368 	AAU_DC_B4_CC(AAU_DC_CC_XOR),
    369 };
    370 
    371 /*
    372  * iopaau_func_xor_1_4_setup:
    373  *
    374  *	Setup routine for the "copy", "xor2".."xor4" functions.
    375  */
    376 int
    377 iopaau_func_xor_1_4_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    378 {
    379 	struct iopaau_function *af =
    380 	    dreq->dreq_assignment->das_algdesc->dad_data;
    381 	struct pool_cache *dc = af->af_desc_cache;
    382 	bus_dmamap_t dmamap = sc->sc_map_out;
    383 	bus_dmamap_t *inmap = sc->sc_map_in;
    384 	uint32_t *prevpa;
    385 	struct aau_desc_4 **prevp, *cur;
    386 	int ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    387 	int i, error, seg;
    388 
    389 	KASSERT(ninputs <= AAU_MAX_INPUTS);
    390 
    391 	switch (dreq->dreq_outbuf_type) {
    392 	case DMOVER_BUF_LINEAR:
    393 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    394 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
    395 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
    396 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    397 		break;
    398 
    399 	case DMOVER_BUF_UIO:
    400 	    {
    401 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
    402 
    403 		if (uio->uio_rw != UIO_READ)
    404 			return (EINVAL);
    405 
    406 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    407 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    408 		break;
    409 	    }
    410 	}
    411 
    412 	if (__predict_false(error != 0))
    413 		return (error);
    414 
    415 	switch (dreq->dreq_inbuf_type) {
    416 	case DMOVER_BUF_LINEAR:
    417 		for (i = 0; i < ninputs; i++) {
    418 			error = bus_dmamap_load(sc->sc_dmat, inmap[i],
    419 			    dreq->dreq_inbuf[i].dmbuf_linear.l_addr,
    420 			    dreq->dreq_inbuf[i].dmbuf_linear.l_len, NULL,
    421 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    422 			if (__predict_false(error != 0))
    423 				break;
    424 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
    425 				error = EFAULT;	/* "address error", sort of. */
    426 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
    427 				break;
    428 			}
    429 		}
    430 		break;
    431 
    432 	 case DMOVER_BUF_UIO:
    433 	     {
    434 		struct uio *uio;
    435 
    436 		for (i = 0; i < ninputs; i++) {
    437 			uio = dreq->dreq_inbuf[i].dmbuf_uio;
    438 
    439 			if (uio->uio_rw != UIO_WRITE) {
    440 				error = EINVAL;
    441 				break;
    442 			}
    443 
    444 			error = bus_dmamap_load_uio(sc->sc_dmat, inmap[i], uio,
    445 			    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    446 			if (__predict_false(error != 0)) {
    447 				break;
    448 			}
    449 			if (dmamap->dm_nsegs != inmap[i]->dm_nsegs) {
    450 				error = EFAULT;	/* "address error", sort of. */
    451 				bus_dmamap_unload(sc->sc_dmat, inmap[i]);
    452 				break;
    453 			}
    454 		}
    455 		break;
    456 	    }
    457 	}
    458 
    459 	if (__predict_false(error != 0)) {
    460 		for (--i; i >= 0; i--)
    461 			bus_dmamap_unload(sc->sc_dmat, inmap[i]);
    462 		bus_dmamap_unload(sc->sc_dmat, dmamap);
    463 		return (error);
    464 	}
    465 
    466 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    467 	    BUS_DMASYNC_PREREAD);
    468 	for (i = 0; i < ninputs; i++) {
    469 		bus_dmamap_sync(sc->sc_dmat, inmap[i], 0, inmap[i]->dm_mapsize,
    470 		    BUS_DMASYNC_PREWRITE);
    471 	}
    472 
    473 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
    474 	prevpa = &sc->sc_firstdesc_pa;
    475 
    476 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    477 		cur = pool_cache_get(dc, PR_NOWAIT);
    478 		if (cur == NULL) {
    479 			*prevp = NULL;
    480 			error = ENOMEM;
    481 			goto bad;
    482 		}
    483 
    484 		*prevp = cur;
    485 		*prevpa = cur->d_pa;
    486 
    487 		prevp = &cur->d_next;
    488 		prevpa = &cur->d_nda;
    489 
    490 		for (i = 0; i < ninputs; i++) {
    491 			if (dmamap->dm_segs[seg].ds_len !=
    492 			    inmap[i]->dm_segs[seg].ds_len) {
    493 				*prevp = NULL;
    494 				error = EFAULT;	/* "address" error, sort of. */
    495 				goto bad;
    496 			}
    497 			cur->d_sar[i] = inmap[i]->dm_segs[seg].ds_addr;
    498 		}
    499 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
    500 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
    501 		cur->d_dc = iopaau_dc_inputs[ninputs] | AAU_DC_DWE;
    502 		SYNC_DESC_4(cur);
    503 	}
    504 
    505 	*prevp = NULL;
    506 	*prevpa = 0;
    507 
    508 	cur->d_dc |= AAU_DC_IE;
    509 	SYNC_DESC_4(cur);
    510 
    511 	sc->sc_lastdesc = cur;
    512 
    513 	return (0);
    514 
    515  bad:
    516 	iopaau_desc_free(dc, sc->sc_firstdesc);
    517 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    518 	for (i = 0; i < ninputs; i++)
    519 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
    520 	sc->sc_firstdesc = NULL;
    521 
    522 	return (error);
    523 }
    524 
    525 int
    526 iopaau_intr(void *arg)
    527 {
    528 	struct iopaau_softc *sc = arg;
    529 	struct dmover_request *dreq;
    530 	uint32_t asr;
    531 
    532 	/* Clear the interrupt. */
    533 	asr = bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR);
    534 	if (asr == 0)
    535 		return (0);
    536 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ASR, asr);
    537 
    538 	/* XXX -- why does this happen? */
    539 	if (sc->sc_running == NULL) {
    540 		printf("%s: unexpected interrupt, ASR = 0x%08x\n",
    541 		    sc->sc_dev.dv_xname, asr);
    542 		return (1);
    543 	}
    544 	dreq = sc->sc_running;
    545 
    546 	/* Stop the AAU. */
    547 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR, 0);
    548 
    549 	DPRINTF(("%s: got interrupt for dreq %p\n", sc->sc_dev.dv_xname,
    550 	    dreq));
    551 
    552 	if (__predict_false((asr & AAU_ASR_ETIF) != 0)) {
    553 		/*
    554 		 * We expect to get end-of-chain interrupts, not
    555 		 * end-of-transfer interrupts, so panic if we get
    556 		 * one of these.
    557 		 */
    558 		panic("aau_intr: got EOT interrupt");
    559 	}
    560 
    561 	if (__predict_false((asr & AAU_ASR_MA) != 0)) {
    562 		printf("%s: WARNING: got master abort\n", sc->sc_dev.dv_xname);
    563 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
    564 		dreq->dreq_error = EFAULT;
    565 	}
    566 
    567 	/* Finish this transfer, start next one. */
    568 	iopaau_finish(sc);
    569 
    570 	return (1);
    571 }
    572 
    573 void
    574 iopaau_attach(struct iopaau_softc *sc)
    575 {
    576 	int error, i;
    577 
    578 	error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER, AAU_MAX_SEGS,
    579 	    AAU_MAX_XFER, AAU_IO_BOUNDARY, 0, &sc->sc_map_out);
    580 	if (error) {
    581 		printf("%s: unable to create output DMA map, error = %d\n",
    582 		    sc->sc_dev.dv_xname, error);
    583 		return;
    584 	}
    585 
    586 	for (i = 0; i < AAU_MAX_INPUTS; i++) {
    587 		error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER,
    588 		    AAU_MAX_SEGS, AAU_MAX_XFER, AAU_IO_BOUNDARY, 0,
    589 		    &sc->sc_map_in[i]);
    590 		if (error) {
    591 			printf("%s: unable to create input %d DMA map, "
    592 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    593 			return;
    594 		}
    595 	}
    596 
    597 	/*
    598 	 * Initialize global resources.  Ok to do here, since there's
    599 	 * only one AAU.
    600 	 */
    601 	pool_init(&aau_desc_4_pool, sizeof(struct aau_desc_4),
    602 	    8 * 4, offsetof(struct aau_desc_4, d_nda), 0, "aaud4pl",
    603 	    NULL);
    604 	pool_cache_init(&iopaau_desc_4_cache, &aau_desc_4_pool,
    605 	    iopaau_desc_ctor, NULL, NULL);
    606 
    607 	/* Register us with dmover. */
    608 	dmover_backend_register(&sc->sc_dmb);
    609 }
    610