Home | History | Annotate | Line # | Download | only in xscale
iopaau.c revision 1.1
      1 /*	$NetBSD: iopaau.c,v 1.1 2002/08/02 00:35:48 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Common code for XScale-based I/O Processor Application Accelerator
     40  * Unit support.
     41  *
     42  * The AAU provides a back-end for the dmover(9) facility.
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD");
     47 
     48 #include <sys/param.h>
     49 #include <sys/pool.h>
     50 #include <sys/lock.h>
     51 #include <sys/systm.h>
     52 #include <sys/device.h>
     53 #include <sys/uio.h>
     54 
     55 #include <uvm/uvm.h>
     56 
     57 #include <machine/bus.h>
     58 
     59 #include <arm/xscale/iopaaureg.h>
     60 #include <arm/xscale/iopaauvar.h>
     61 
     62 #ifdef AAU_DEBUG
     63 #define	DPRINTF(x)	printf x
     64 #else
     65 #define	DPRINTF(x)	/* nothing */
     66 #endif
     67 
     68 static struct pool aau_desc_4_pool;
     69 static struct pool_cache aau_desc_4_cache;
     70 
     71 /*
     72  * iopaau_desc_ctor:
     73  *
     74  *	Constructor for all types of descriptors.
     75  */
     76 static int
     77 iopaau_desc_ctor(void *arg, void *object, int flags)
     78 {
     79 	struct aau_desc_4 *d = object;
     80 
     81 	/*
     82 	 * Cache the physical address of the hardware portion of
     83 	 * the descriptor in the software portion of the descriptor
     84 	 * for quick reference later.
     85 	 */
     86 	d->d_pa = vtophys(d) + SYNC_DESC_4_OFFSET;
     87 	KASSERT((d->d_pa & 31) == 0);
     88 	return (0);
     89 }
     90 
     91 /*
     92  * iopaau_desc_4_free:
     93  *
     94  *	Free a chain of aau_desc_4 structures.
     95  */
     96 void
     97 iopaau_desc_4_free(struct iopaau_softc *sc, void *firstdesc)
     98 {
     99 	struct aau_desc_4 *d, *next;
    100 
    101 	for (d = firstdesc; d != NULL; d = next) {
    102 		next = d->d_next;
    103 		pool_cache_put(&aau_desc_4_cache, d);
    104 	}
    105 }
    106 
    107 /*
    108  * iopaau_start:
    109  *
    110  *	Start an AAU request.  Must be called at splbio().
    111  */
    112 static void
    113 iopaau_start(struct iopaau_softc *sc)
    114 {
    115 	struct dmover_backend *dmb = &sc->sc_dmb;
    116 	struct dmover_request *dreq;
    117 	struct iopaau_function *af;
    118 	int error;
    119 
    120 	for (;;) {
    121 
    122 		KASSERT(sc->sc_running == NULL);
    123 
    124 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
    125 		if (dreq == NULL)
    126 			return;
    127 
    128 		dmover_backend_remque(dmb, dreq);
    129 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
    130 
    131 		sc->sc_running = dreq;
    132 
    133 		/* XXXUNLOCK */
    134 
    135 		af = dreq->dreq_assignment->das_algdesc->dad_data;
    136 		error = (*af->af_setup)(sc, dreq);
    137 
    138 		/* XXXLOCK */
    139 
    140 		if (error) {
    141 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
    142 			dreq->dreq_error = error;
    143 			sc->sc_running = NULL;
    144 			/* XXXUNLOCK */
    145 			dmover_done(dreq);
    146 			/* XXXLOCK */
    147 			continue;
    148 		}
    149 
    150 #ifdef DIAGNOSTIC
    151 		if (bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR) &
    152 		    AAU_ASR_AAF)
    153 			panic("iopaau_start: AAU already active");
    154 #endif
    155 
    156 		DPRINTF(("%s: starting dreq %p\n", sc->sc_dev.dv_xname,
    157 		    dreq));
    158 
    159 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ANDAR,
    160 		    sc->sc_firstdesc_pa);
    161 		bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR,
    162 		    AAU_ACR_AAE);
    163 
    164 		break;
    165 	}
    166 }
    167 
    168 /*
    169  * iopaau_finish:
    170  *
    171  *	Finish the current operation.  AAU must be stopped.
    172  */
    173 static void
    174 iopaau_finish(struct iopaau_softc *sc)
    175 {
    176 	struct dmover_request *dreq = sc->sc_running;
    177 	struct iopaau_function *af =
    178 	    dreq->dreq_assignment->das_algdesc->dad_data;
    179 	void *firstdesc = sc->sc_firstdesc;
    180 	int i, ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    181 
    182 	sc->sc_running = NULL;
    183 
    184 	/* If the function has inputs, unmap them. */
    185 	for (i = 0; i < ninputs; i++) {
    186 		bus_dmamap_sync(sc->sc_dmat, sc->sc_map_in[i], 0,
    187 		    sc->sc_map_in[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
    188 		bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[i]);
    189 	}
    190 
    191 	/* Unload the output buffer DMA map. */
    192 	bus_dmamap_sync(sc->sc_dmat, sc->sc_map_out, 0,
    193 	    sc->sc_map_out->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    194 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    195 
    196 	/* Get the next transfer started. */
    197 	iopaau_start(sc);
    198 
    199 	/* Now free descriptors for last transfer. */
    200 	(*af->af_free)(sc, firstdesc);
    201 
    202 	dmover_done(dreq);
    203 }
    204 
    205 /*
    206  * iopaau_process:
    207  *
    208  *	Dmover back-end entry point.
    209  */
    210 void
    211 iopaau_process(struct dmover_backend *dmb)
    212 {
    213 	struct iopaau_softc *sc = dmb->dmb_cookie;
    214 	int s;
    215 
    216 	s = splbio();
    217 	/* XXXLOCK */
    218 
    219 	if (sc->sc_running == NULL)
    220 		iopaau_start(sc);
    221 
    222 	/* XXXUNLOCK */
    223 	splx(s);
    224 }
    225 
    226 /*
    227  * iopaau_func_zero_setup:
    228  *
    229  *	Setup routine for the "zero" function.
    230  */
    231 int
    232 iopaau_func_zero_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    233 {
    234 	bus_dmamap_t dmamap = sc->sc_map_out;
    235 	uint32_t *prevpa;
    236 	struct aau_desc_4 **prevp, *cur;
    237 	int error, seg;
    238 
    239 	switch (dreq->dreq_outbuf_type) {
    240 	case DMOVER_BUF_LINEAR:
    241 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    242 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
    243 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
    244 		    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    245 		break;
    246 
    247 	case DMOVER_BUF_UIO:
    248 	    {
    249 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
    250 
    251 		if (uio->uio_rw != UIO_READ)
    252 			return (EINVAL);
    253 
    254 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    255 		    uio, BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    256 		break;
    257 	    }
    258 	}
    259 
    260 	if (__predict_false(error != 0))
    261 		return (error);
    262 
    263 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    264 	    BUS_DMASYNC_PREWRITE);
    265 
    266 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
    267 	prevpa = &sc->sc_firstdesc_pa;
    268 
    269 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    270 		cur = pool_cache_get(&aau_desc_4_cache, PR_NOWAIT);
    271 		if (cur == NULL) {
    272 			*prevp = NULL;
    273 			error = ENOMEM;
    274 			goto bad;
    275 		}
    276 
    277 		*prevp = cur;
    278 		*prevpa = cur->d_pa;
    279 
    280 		prevp = &cur->d_next;
    281 		prevpa = &cur->d_nda;
    282 
    283 		/*
    284 		 * We don't actually enforce the page alignment
    285 		 * constraint, here, because there is only one
    286 		 * data stream to worry about.
    287 		 */
    288 
    289 		cur->d_sar1 = 0;	/* immediate value */
    290 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
    291 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
    292 		cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
    293 		SYNC_DESC_4(cur);
    294 	}
    295 
    296 	*prevp = NULL;
    297 	*prevpa = 0;
    298 
    299 	cur->d_dc |= AAU_DC_IE;
    300 	SYNC_DESC_4(cur);
    301 
    302 	sc->sc_lastdesc = cur;
    303 
    304 	return (0);
    305 
    306  bad:
    307 	iopaau_desc_4_free(sc, sc->sc_firstdesc);
    308 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    309 	sc->sc_firstdesc = NULL;
    310 
    311 	return (error);
    312 }
    313 
    314 /*
    315  * iopaau_func_fill8_setup:
    316  *
    317  *	Setup routine for the "fill8" function.
    318  */
    319 int
    320 iopaau_func_fill8_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    321 {
    322 	bus_dmamap_t dmamap = sc->sc_map_out;
    323 	uint32_t *prevpa;
    324 	struct aau_desc_4 **prevp, *cur;
    325 	int error, seg;
    326 	uint32_t immed;
    327 
    328 	immed = dreq->dreq_immediate[0] |
    329 	    (dreq->dreq_immediate[0] << 8) |
    330 	    (dreq->dreq_immediate[0] << 16) |
    331 	    (dreq->dreq_immediate[0] << 24);
    332 
    333 	switch (dreq->dreq_outbuf_type) {
    334 	case DMOVER_BUF_LINEAR:
    335 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    336 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
    337 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
    338 		    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    339 		break;
    340 
    341 	case DMOVER_BUF_UIO:
    342 	    {
    343 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
    344 
    345 		if (uio->uio_rw != UIO_READ)
    346 			return (EINVAL);
    347 
    348 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    349 		    uio, BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    350 		break;
    351 	    }
    352 	}
    353 
    354 	if (__predict_false(error != 0))
    355 		return (error);
    356 
    357 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    358 	    BUS_DMASYNC_PREWRITE);
    359 
    360 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
    361 	prevpa = &sc->sc_firstdesc_pa;
    362 
    363 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    364 		cur = pool_cache_get(&aau_desc_4_cache, PR_NOWAIT);
    365 		if (cur == NULL) {
    366 			*prevp = NULL;
    367 			error = ENOMEM;
    368 			goto bad;
    369 		}
    370 
    371 		*prevp = cur;
    372 		*prevpa = cur->d_pa;
    373 
    374 		prevp = &cur->d_next;
    375 		prevpa = &cur->d_nda;
    376 
    377 		/*
    378 		 * We don't actually enforce the page alignment
    379 		 * constraint, here, because there is only one
    380 		 * data stream to worry about.
    381 		 */
    382 
    383 		cur->d_sar1 = immed;
    384 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
    385 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
    386 		cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_FILL) | AAU_DC_DWE;
    387 		SYNC_DESC_4(cur);
    388 	}
    389 
    390 	*prevp = NULL;
    391 	*prevpa = 0;
    392 
    393 	cur->d_dc |= AAU_DC_IE;
    394 	SYNC_DESC_4(cur);
    395 
    396 	sc->sc_lastdesc = cur;
    397 
    398 	return (0);
    399 
    400  bad:
    401 	iopaau_desc_4_free(sc, sc->sc_firstdesc);
    402 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    403 	sc->sc_firstdesc = NULL;
    404 
    405 	return (error);
    406 }
    407 
    408 /*
    409  * iopaau_func_copy_setup:
    410  *
    411  *	Setup routine for the "copy" function.
    412  */
    413 int
    414 iopaau_func_copy_setup(struct iopaau_softc *sc, struct dmover_request *dreq)
    415 {
    416 	bus_dmamap_t dmamap = sc->sc_map_out;
    417 	bus_dmamap_t inmap = sc->sc_map_in[0];
    418 	uint32_t *prevpa;
    419 	struct aau_desc_4 **prevp, *cur;
    420 	int error, seg;
    421 
    422 	switch (dreq->dreq_outbuf_type) {
    423 	case DMOVER_BUF_LINEAR:
    424 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    425 		    dreq->dreq_outbuf.dmbuf_linear.l_addr,
    426 		    dreq->dreq_outbuf.dmbuf_linear.l_len, NULL,
    427 		    BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    428 		break;
    429 
    430 	case DMOVER_BUF_UIO:
    431 	    {
    432 		struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
    433 
    434 		if (uio->uio_rw != UIO_READ)
    435 			return (EINVAL);
    436 
    437 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    438 		    uio, BUS_DMA_NOWAIT|BUS_DMA_WRITE|BUS_DMA_STREAMING);
    439 		break;
    440 	    }
    441 	}
    442 
    443 	if (__predict_false(error != 0))
    444 		return (error);
    445 
    446 	switch (dreq->dreq_inbuf_type) {
    447 	case DMOVER_BUF_LINEAR:
    448 		error = bus_dmamap_load(sc->sc_dmat, inmap,
    449 		    dreq->dreq_inbuf[0].dmbuf_linear.l_addr,
    450 		    dreq->dreq_inbuf[0].dmbuf_linear.l_len, NULL,
    451 		    BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    452 		break;
    453 
    454 	 case DMOVER_BUF_UIO:
    455 	     {
    456 		struct uio *uio = dreq->dreq_inbuf[0].dmbuf_uio;
    457 
    458 		if (uio->uio_rw != UIO_WRITE) {
    459 			error = EINVAL;
    460 			break;
    461 		}
    462 
    463 		error = bus_dmamap_load_uio(sc->sc_dmat, inmap,
    464 		    uio, BUS_DMA_NOWAIT|BUS_DMA_READ|BUS_DMA_STREAMING);
    465 		break;
    466 	    }
    467 	}
    468 
    469 	if (__predict_false(error != 0)) {
    470 		bus_dmamap_unload(sc->sc_dmat, dmamap);
    471 		return (error);
    472 	}
    473 
    474 	if (dmamap->dm_nsegs != inmap->dm_nsegs) {
    475 		bus_dmamap_unload(sc->sc_dmat, dmamap);
    476 		bus_dmamap_unload(sc->sc_dmat, inmap);
    477 		return (EFAULT);	/* "address" error, sort of. */
    478 	}
    479 
    480 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    481 	    BUS_DMASYNC_PREWRITE);
    482 	bus_dmamap_sync(sc->sc_dmat, inmap, 0, inmap->dm_mapsize,
    483 	    BUS_DMASYNC_PREREAD);
    484 
    485 	prevp = (struct aau_desc_4 **) &sc->sc_firstdesc;
    486 	prevpa = &sc->sc_firstdesc_pa;
    487 
    488 	for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    489 		cur = pool_cache_get(&aau_desc_4_cache, PR_NOWAIT);
    490 		if (cur == NULL) {
    491 			*prevp = NULL;
    492 			error = ENOMEM;
    493 			goto bad;
    494 		}
    495 
    496 		*prevp = cur;
    497 		*prevpa = cur->d_pa;
    498 
    499 		prevp = &cur->d_next;
    500 		prevpa = &cur->d_nda;
    501 
    502 		if (dmamap->dm_segs[seg].ds_len !=
    503 		    inmap->dm_segs[seg].ds_len) {
    504 			*prevp = NULL;
    505 			error = EFAULT;	/* "address" error, sort of. */
    506 			goto bad;
    507 		}
    508 
    509 		cur->d_sar1 = inmap->dm_segs[seg].ds_addr;
    510 		cur->d_dar = dmamap->dm_segs[seg].ds_addr;
    511 		cur->d_bc = dmamap->dm_segs[seg].ds_len;
    512 		cur->d_dc = AAU_DC_B1_CC(AAU_DC_CC_DIRECT_FILL) | AAU_DC_DWE;
    513 		SYNC_DESC_4(cur);
    514 	}
    515 
    516 	*prevp = NULL;
    517 	*prevpa = 0;
    518 
    519 	cur->d_dc |= AAU_DC_IE;
    520 	SYNC_DESC_4(cur);
    521 
    522 	sc->sc_lastdesc = cur;
    523 
    524 	return (0);
    525 
    526  bad:
    527 	iopaau_desc_4_free(sc, sc->sc_firstdesc);
    528 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_out);
    529 	bus_dmamap_unload(sc->sc_dmat, sc->sc_map_in[0]);
    530 	sc->sc_firstdesc = NULL;
    531 
    532 	return (error);
    533 }
    534 
    535 int
    536 iopaau_intr(void *arg)
    537 {
    538 	struct iopaau_softc *sc = arg;
    539 	struct dmover_request *dreq;
    540 	uint32_t asr;
    541 
    542 	/* Clear the interrupt. */
    543 	asr = bus_space_read_4(sc->sc_st, sc->sc_sh, AAU_ASR);
    544 	if (asr == 0)
    545 		return (0);
    546 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ASR, asr);
    547 
    548 	/* XXX -- why does this happen? */
    549 	if (sc->sc_running == NULL) {
    550 		printf("%s: unexpected interrupt, ASR = 0x%08x\n",
    551 		    sc->sc_dev.dv_xname, asr);
    552 		return (1);
    553 	}
    554 	dreq = sc->sc_running;
    555 
    556 	/* Stop the AAU. */
    557 	bus_space_write_4(sc->sc_st, sc->sc_sh, AAU_ACR, 0);
    558 
    559 	DPRINTF(("%s: got interrupt for dreq %p\n", sc->sc_dev.dv_xname,
    560 	    dreq));
    561 
    562 	if (__predict_false((asr & AAU_ASR_ETIF) != 0)) {
    563 		/*
    564 		 * We expect to get end-of-chain interrupts, not
    565 		 * end-of-transfer interrupts, so panic if we get
    566 		 * one of these.
    567 		 */
    568 		panic("aau_intr: got EOT interrupt");
    569 	}
    570 
    571 	if (__predict_false((asr & AAU_ASR_MA) != 0)) {
    572 		printf("%s: WARNING: got master abort\n", sc->sc_dev.dv_xname);
    573 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
    574 		dreq->dreq_error = EFAULT;
    575 	}
    576 
    577 	/* Finish this transfer, start next one. */
    578 	iopaau_finish(sc);
    579 
    580 	return (1);
    581 }
    582 
    583 void
    584 iopaau_attach(struct iopaau_softc *sc)
    585 {
    586 	int error, i;
    587 
    588 	error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER, AAU_MAX_SEGS,
    589 	    AAU_MAX_XFER, AAU_IO_BOUNDARY, 0, &sc->sc_map_out);
    590 	if (error) {
    591 		printf("%s: unable to create output DMA map, error = %d\n",
    592 		    sc->sc_dev.dv_xname, error);
    593 		return;
    594 	}
    595 
    596 	for (i = 0; i < AAU_MAX_INPUTS; i++) {
    597 		error = bus_dmamap_create(sc->sc_dmat, AAU_MAX_XFER,
    598 		    AAU_MAX_SEGS, AAU_MAX_XFER, AAU_IO_BOUNDARY, 0,
    599 		    &sc->sc_map_in[i]);
    600 		if (error) {
    601 			printf("%s: unable to create input %d DMA map, "
    602 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    603 			return;
    604 		}
    605 	}
    606 
    607 	/*
    608 	 * Initialize global resources.  Ok to do here, since there's
    609 	 * only one AAU.
    610 	 */
    611 	pool_init(&aau_desc_4_pool, sizeof(struct aau_desc_4),
    612 	    8 * 4, offsetof(struct aau_desc_4, d_nda), 0, "aaud4pl",
    613 	    NULL);
    614 	pool_cache_init(&aau_desc_4_cache, &aau_desc_4_pool, iopaau_desc_ctor,
    615 	    NULL, NULL);
    616 
    617 	/* Register us with dmover. */
    618 	dmover_backend_register(&sc->sc_dmb);
    619 }
    620