Home | History | Annotate | Line # | Download | only in marvell
gtidmac.c revision 1.7
      1 /*	$NetBSD: gtidmac.c,v 1.7 2012/01/30 23:31:28 matt Exp $	*/
      2 /*
      3  * Copyright (c) 2008 KIYOHARA Takashi
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  * POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.7 2012/01/30 23:31:28 matt Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/bus.h>
     33 #include <sys/device.h>
     34 #include <sys/errno.h>
     35 #include <sys/endian.h>
     36 #include <sys/kmem.h>
     37 
     38 #include <uvm/uvm_param.h>	/* For PAGE_SIZE */
     39 
     40 #include <dev/dmover/dmovervar.h>
     41 
     42 #include <dev/marvell/gtidmacreg.h>
     43 #include <dev/marvell/gtidmacvar.h>
     44 #include <dev/marvell/marvellreg.h>
     45 #include <dev/marvell/marvellvar.h>
     46 
     47 #include <prop/proplib.h>
     48 
     49 #include "locators.h"
     50 
     51 #ifdef GTIDMAC_DEBUG
     52 #define DPRINTF(x)	if (gtidmac_debug) printf x
     53 int gtidmac_debug = 0;
     54 #else
     55 #define DPRINTF(x)
     56 #endif
     57 
     58 #define GTIDMAC_NDESC		64
     59 #define GTIDMAC_MAXCHAN		8
     60 #define MVXORE_NDESC		128
     61 #define MVXORE_MAXCHAN		2
     62 
     63 #define GTIDMAC_NSEGS		((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
     64 #define MVXORE_NSEGS		((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
     65 
     66 
     67 struct gtidmac_softc;
     68 
     69 struct gtidmac_function {
     70 	int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
     71 	void (*chan_free)(void *, int);
     72 	int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
     73 			 bus_size_t);
     74 	void (*dma_start)(void *, int,
     75 			  void (*dma_done_cb)(void *, int, bus_dmamap_t *,
     76 						      bus_dmamap_t *, int));
     77 	uint32_t (*dma_finish)(void *, int, int);
     78 };
     79 
     80 struct gtidmac_dma_desc {
     81 	int dd_index;
     82 	union {
     83 		struct gtidmac_desc *idmac_vaddr;
     84 		struct mvxore_desc *xore_vaddr;
     85 	} dd_vaddr;
     86 #define dd_idmac_vaddr	dd_vaddr.idmac_vaddr
     87 #define dd_xore_vaddr	dd_vaddr.xore_vaddr
     88 	paddr_t dd_paddr;
     89 	SLIST_ENTRY(gtidmac_dma_desc) dd_next;
     90 };
     91 
     92 struct gtidmac_softc {
     93 	device_t sc_dev;
     94 
     95 	bus_space_tag_t sc_iot;
     96 	bus_space_handle_t sc_ioh;
     97 
     98 	bus_dma_tag_t sc_dmat;
     99 	struct gtidmac_dma_desc *sc_dd_buffer;
    100 	bus_dma_segment_t sc_pattern_segment;
    101 	struct {
    102 		u_char pbuf[16];	/* 16byte/pattern */
    103 	} *sc_pbuf;			/*   x256 pattern */
    104 
    105 	int sc_gtidmac_nchan;
    106 	struct gtidmac_desc *sc_dbuf;
    107 	bus_dmamap_t sc_dmap;
    108 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
    109 	struct {
    110 		bus_dmamap_t chan_in;		/* In dmamap */
    111 		bus_dmamap_t chan_out;		/* Out dmamap */
    112 		uint64_t chan_totalcnt;		/* total transfered byte */
    113 		int chan_ddidx;
    114 		void *chan_running;		/* opaque object data */
    115 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
    116 				      bus_dmamap_t *, int);
    117 	} sc_cdesc[GTIDMAC_MAXCHAN];
    118 	struct gtidmac_intr_arg {
    119 		struct gtidmac_softc *ia_sc;
    120 		uint32_t ia_cause;
    121 		uint32_t ia_mask;
    122 		uint32_t ia_eaddr;
    123 		uint32_t ia_eselect;
    124 	} sc_intrarg[GTIDMAC_NINTRRUPT];
    125 
    126 	int sc_mvxore_nchan;
    127 	struct mvxore_desc *sc_dbuf_xore;
    128 	bus_dmamap_t sc_dmap_xore;
    129 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
    130 	struct {
    131 		bus_dmamap_t chan_in[MVXORE_NSRC];	/* In dmamap */
    132 		bus_dmamap_t chan_out;			/* Out dmamap */
    133 		uint64_t chan_totalcnt;			/* total transfered */
    134 		int chan_ddidx;
    135 		void *chan_running;			/* opaque object data */
    136 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
    137 				      bus_dmamap_t *, int);
    138 	} sc_cdesc_xore[MVXORE_MAXCHAN];
    139 
    140 	struct dmover_backend sc_dmb;
    141 	struct dmover_backend sc_dmb_xore;
    142 	int sc_dmb_busy;
    143 };
    144 struct gtidmac_softc *gtidmac_softc = NULL;
    145 
    146 static int gtidmac_match(device_t, struct cfdata *, void *);
    147 static void gtidmac_attach(device_t, device_t, void *);
    148 
    149 static int gtidmac_intr(void *);
    150 static int mvxore_intr(void *);
    151 
    152 static void gtidmac_process(struct dmover_backend *);
    153 static void gtidmac_dmover_run(struct dmover_backend *);
    154 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
    155 				int);
    156 __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
    157 				dmover_buffer_type, dmover_buffer *, int);
    158 __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
    159 
    160 static uint32_t gtidmac_finish(void *, int, int);
    161 static uint32_t mvxore_finish(void *, int, int);
    162 
    163 static void gtidmac_wininit(struct gtidmac_softc *);
    164 static void mvxore_wininit(struct gtidmac_softc *);
    165 
    166 #ifdef GTIDMAC_DEBUG
    167 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
    168 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
    169 				   struct gtidmac_dma_desc *, uint32_t, int);
    170 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
    171 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
    172 				  struct gtidmac_dma_desc *, uint32_t, int);
    173 #endif
    174 
    175 
    176 static struct gtidmac_function gtidmac_functions = {
    177 	.chan_alloc = gtidmac_chan_alloc,
    178 	.chan_free = gtidmac_chan_free,
    179 	.dma_setup = gtidmac_setup,
    180 	.dma_start = gtidmac_start,
    181 	.dma_finish = gtidmac_finish,
    182 };
    183 
    184 static struct gtidmac_function mvxore_functions = {
    185 	.chan_alloc = mvxore_chan_alloc,
    186 	.chan_free = mvxore_chan_free,
    187 	.dma_setup = mvxore_setup,
    188 	.dma_start = mvxore_start,
    189 	.dma_finish = mvxore_finish,
    190 };
    191 
    192 static const struct dmover_algdesc gtidmac_algdescs[] = {
    193 	{
    194 		.dad_name = DMOVER_FUNC_ZERO,
    195 		.dad_data = &gtidmac_functions,
    196 		.dad_ninputs = 0
    197 	},
    198 	{
    199 		.dad_name = DMOVER_FUNC_FILL8,
    200 		.dad_data = &gtidmac_functions,
    201 		.dad_ninputs = 0
    202 	},
    203 	{
    204 		.dad_name = DMOVER_FUNC_COPY,
    205 		.dad_data = &gtidmac_functions,
    206 		.dad_ninputs = 1
    207 	},
    208 };
    209 
    210 static const struct dmover_algdesc mvxore_algdescs[] = {
    211 #if 0
    212 	/*
    213 	 * As for these operations, there are a lot of restrictions.  It is
    214 	 * necessary to use IDMAC.
    215 	 */
    216 	{
    217 		.dad_name = DMOVER_FUNC_ZERO,
    218 		.dad_data = &mvxore_functions,
    219 		.dad_ninputs = 0
    220 	},
    221 	{
    222 		.dad_name = DMOVER_FUNC_FILL8,
    223 		.dad_data = &mvxore_functions,
    224 		.dad_ninputs = 0
    225 	},
    226 #endif
    227 	{
    228 		.dad_name = DMOVER_FUNC_COPY,
    229 		.dad_data = &mvxore_functions,
    230 		.dad_ninputs = 1
    231 	},
    232 	{
    233 		.dad_name = DMOVER_FUNC_ISCSI_CRC32C,
    234 		.dad_data = &mvxore_functions,
    235 		.dad_ninputs = 1
    236 	},
    237 	{
    238 		.dad_name = DMOVER_FUNC_XOR2,
    239 		.dad_data = &mvxore_functions,
    240 		.dad_ninputs = 2
    241 	},
    242 	{
    243 		.dad_name = DMOVER_FUNC_XOR3,
    244 		.dad_data = &mvxore_functions,
    245 		.dad_ninputs = 3
    246 	},
    247 	{
    248 		.dad_name = DMOVER_FUNC_XOR4,
    249 		.dad_data = &mvxore_functions,
    250 		.dad_ninputs = 4
    251 	},
    252 	{
    253 		.dad_name = DMOVER_FUNC_XOR5,
    254 		.dad_data = &mvxore_functions,
    255 		.dad_ninputs = 5
    256 	},
    257 	{
    258 		.dad_name = DMOVER_FUNC_XOR6,
    259 		.dad_data = &mvxore_functions,
    260 		.dad_ninputs = 6
    261 	},
    262 	{
    263 		.dad_name = DMOVER_FUNC_XOR7,
    264 		.dad_data = &mvxore_functions,
    265 		.dad_ninputs = 7
    266 	},
    267 	{
    268 		.dad_name = DMOVER_FUNC_XOR8,
    269 		.dad_data = &mvxore_functions,
    270 		.dad_ninputs = 8
    271 	},
    272 };
    273 
    274 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
    275     gtidmac_match, gtidmac_attach, NULL, NULL);
    276 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
    277     gtidmac_match, gtidmac_attach, NULL, NULL);
    278 
    279 
    280 /* ARGSUSED */
    281 static int
    282 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
    283 {
    284 	struct marvell_attach_args *mva = aux;
    285 
    286 	if (strcmp(mva->mva_name, match->cf_name) != 0)
    287 		return 0;
    288 	if (mva->mva_offset == MVA_OFFSET_DEFAULT ||
    289 	    mva->mva_irq == MVA_IRQ_DEFAULT)
    290 		return 0;
    291 
    292 	mva->mva_size = GTIDMAC_SIZE;
    293 	return 1;
    294 }
    295 
    296 /* ARGSUSED */
    297 static void
    298 gtidmac_attach(device_t parent, device_t self, void *aux)
    299 {
    300 	struct gtidmac_softc *sc = device_private(self);
    301 	struct marvell_attach_args *mva = aux;
    302 	bus_dma_segment_t segs, segs_xore;
    303 	struct gtidmac_dma_desc *dd;
    304 	prop_dictionary_t dict = device_properties(self);
    305 	uint32_t mask, dmb_speed, xore_irq;
    306 	int idmac_nchan, xore_nchan, nsegs, nsegs_xore, i, j, k, n;
    307 
    308 	xore_irq = 0;
    309 	idmac_nchan = 8;
    310 	xore_nchan = 0;
    311 	switch (mva->mva_model) {
    312 	case MARVELL_DISCOVERY:
    313 	case MARVELL_DISCOVERY_II:
    314 	case MARVELL_DISCOVERY_III:
    315 		break;
    316 
    317 	case MARVELL_ORION_1_88F1181:
    318 	case MARVELL_ORION_1_88F5082:
    319 	case MARVELL_ORION_1_88F5180N:
    320 	case MARVELL_ORION_1_88F5181:
    321 	case MARVELL_ORION_1_88W8660:
    322 	case MARVELL_ORION_2_88F1281:
    323 	case MARVELL_ORION_2_88F5281:
    324 		idmac_nchan = 4;
    325 		break;
    326 
    327 #if 0
    328 	case MARVELL_DISCOVERY_LT:
    329 	case MARVELL_DISCOVERY_V:
    330 	case MARVELL_DISCOVERY_VI:	????
    331 #endif
    332 	case MARVELL_ORION_1_88F5182:
    333 		idmac_nchan = 4;
    334 		xore_nchan = 2;
    335 		break;
    336 	}
    337 	if (xore_nchan != 0)
    338 		if (!prop_dictionary_get_uint32(dict, "xore-irq-begin",
    339 		    &xore_irq)) {
    340 			aprint_error(": no xore-irq-begin property\n");
    341 			return;
    342 		}
    343 
    344 	aprint_naive("\n");
    345 	aprint_normal(": Marvell IDMA Controller%s\n",
    346 	    xore_nchan ? "/XOR Engine" : "");
    347 
    348 	sc->sc_dev = self;
    349 	sc->sc_iot = mva->mva_iot;
    350 
    351 	/* Map I/O registers */
    352 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
    353 	    mva->mva_size, &sc->sc_ioh)) {
    354 		aprint_error_dev(self, "can't map registers\n");
    355 		return;
    356 	}
    357 
    358 	/*
    359 	 * Initialise DMA descriptors and associated metadata
    360 	 */
    361 	sc->sc_dmat = mva->mva_dmat;
    362 	n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
    363 	sc->sc_dd_buffer =
    364 	    kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
    365 	if (sc->sc_dd_buffer == NULL) {
    366 		aprint_error_dev(self, "can't allocate memory\n");
    367 		goto fail1;
    368 	}
    369 	/* pattern buffer */
    370 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
    371 	    &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
    372 		aprint_error_dev(self,
    373 		    "bus_dmamem_alloc failed: pattern buffer\n");
    374 		goto fail2;
    375 	}
    376 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
    377 	    (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
    378 		aprint_error_dev(self,
    379 		    "bus_dmamem_map failed: pattern buffer\n");
    380 		goto fail3;
    381 	}
    382 	for (i = 0; i < 0x100; i++)
    383 		for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
    384 			sc->sc_pbuf[i].pbuf[j] = i;
    385 
    386 	/* IDMAC DMA descriptor buffer */
    387 	sc->sc_gtidmac_nchan = idmac_nchan;
    388 	if (bus_dmamem_alloc(sc->sc_dmat,
    389 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan,
    390 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
    391 		aprint_error_dev(self,
    392 		    "bus_dmamem_alloc failed: descriptor buffer\n");
    393 		goto fail4;
    394 	}
    395 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
    396 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan,
    397 	    (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
    398 		aprint_error_dev(self,
    399 		    "bus_dmamem_map failed: descriptor buffer\n");
    400 		goto fail5;
    401 	}
    402 	if (bus_dmamap_create(sc->sc_dmat,
    403 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, 1,
    404 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, 0,
    405 	    BUS_DMA_NOWAIT, &sc->sc_dmap)) {
    406 		aprint_error_dev(self,
    407 		    "bus_dmamap_create failed: descriptor buffer\n");
    408 		goto fail6;
    409 	}
    410 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
    411 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, NULL,
    412 	    BUS_DMA_NOWAIT)) {
    413 		aprint_error_dev(self,
    414 		    "bus_dmamap_load failed: descriptor buffer\n");
    415 		goto fail7;
    416 	}
    417 	SLIST_INIT(&sc->sc_dlist);
    418 	for (i = 0; i < GTIDMAC_NDESC * idmac_nchan; i++) {
    419 		dd = &sc->sc_dd_buffer[i];
    420 		dd->dd_index = i;
    421 		dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
    422 		dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
    423 		    (sizeof(struct gtidmac_desc) * i);
    424 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
    425 	}
    426 
    427 	/* Initialize IDMAC DMA channels */
    428 	mask = 0;
    429 	for (i = 0; i < idmac_nchan; i++) {
    430 		if (i > 0 &&
    431 		    ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
    432 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    433 			    GTIDMAC_IMR(i - 1), mask);
    434 			mask = 0;
    435 		}
    436 
    437 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
    438 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
    439 		    &sc->sc_cdesc[i].chan_in)) {
    440 			aprint_error_dev(self,
    441 			    "bus_dmamap_create failed: chan%d in\n", i);
    442 			goto fail8;
    443 		}
    444 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
    445 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
    446 		    &sc->sc_cdesc[i].chan_out)) {
    447 			aprint_error_dev(self,
    448 			    "bus_dmamap_create failed: chan%d out\n", i);
    449 			bus_dmamap_destroy(sc->sc_dmat,
    450 			    sc->sc_cdesc[i].chan_in);
    451 			goto fail8;
    452 		}
    453 		sc->sc_cdesc[i].chan_totalcnt = 0;
    454 		sc->sc_cdesc[i].chan_running = NULL;
    455 
    456 		/* Ignore bits overflow.  The mask is 32bit. */
    457 		mask |= GTIDMAC_I(i,
    458 		    GTIDMAC_I_COMP	|
    459 		    GTIDMAC_I_ADDRMISS	|
    460 		    GTIDMAC_I_ACCPROT	|
    461 		    GTIDMAC_I_WRPROT	|
    462 		    GTIDMAC_I_OWN);
    463 	}
    464 	if (i > 0)
    465 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_IMR(i - 1),
    466 		    mask);
    467 
    468 	/* Setup interrupt */
    469 	for (j = 0; j < GTIDMAC_NINTRRUPT; j++) {
    470 		int c = j * idmac_nchan / __arraycount(sc->sc_intrarg);
    471 
    472 		sc->sc_intrarg[j].ia_sc = sc;
    473 		sc->sc_intrarg[j].ia_cause = GTIDMAC_ICR(c);
    474 		sc->sc_intrarg[j].ia_eaddr = GTIDMAC_EAR(c);
    475 		sc->sc_intrarg[j].ia_eselect = GTIDMAC_ESR(c);
    476 		marvell_intr_establish(mva->mva_irq + j, IPL_BIO,
    477 		    gtidmac_intr, &sc->sc_intrarg[j]);
    478 	}
    479 
    480 	if (mva->mva_model != MARVELL_DISCOVERY)
    481 		gtidmac_wininit(sc);
    482 
    483 	/* Register us with dmover. */
    484 	sc->sc_dmb.dmb_name = device_xname(self);
    485 	if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
    486 		aprint_error_dev(self, "no dmb_speed property\n");
    487 		dmb_speed = 10;		/* More than fast swdmover perhaps. */
    488 	}
    489 	sc->sc_dmb.dmb_speed = dmb_speed;
    490 	sc->sc_dmb.dmb_cookie = sc;
    491 	sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
    492 	sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
    493 	sc->sc_dmb.dmb_process = gtidmac_process;
    494 	dmover_backend_register(&sc->sc_dmb);
    495 	sc->sc_dmb_busy = 0;
    496 
    497 	if (xore_nchan) {
    498 		/* XORE DMA descriptor buffer */
    499 		sc->sc_mvxore_nchan = xore_nchan;
    500 		if (bus_dmamem_alloc(sc->sc_dmat,
    501 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
    502 		    PAGE_SIZE, 0, &segs_xore, 1, &nsegs_xore, BUS_DMA_NOWAIT)) {
    503 			aprint_error_dev(self, "bus_dmamem_alloc failed:"
    504 			    " xore descriptor buffer\n");
    505 			goto fail8;
    506 		}
    507 		if (bus_dmamem_map(sc->sc_dmat, &segs_xore, 1,
    508 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
    509 		    (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
    510 			aprint_error_dev(self,
    511 			    "bus_dmamem_map failed: xore descriptor buffer\n");
    512 			goto fail9;
    513 		}
    514 		if (bus_dmamap_create(sc->sc_dmat,
    515 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan, 1,
    516 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan, 0,
    517 		    BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
    518 			aprint_error_dev(self, "bus_dmamap_create failed:"
    519 			    " xore descriptor buffer\n");
    520 			goto fail10;
    521 		}
    522 		if (bus_dmamap_load(
    523 		    sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
    524 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
    525 		    NULL, BUS_DMA_NOWAIT)) {
    526 			aprint_error_dev(self,
    527 			    "bus_dmamap_load failed: xore descriptor buffer\n");
    528 			goto fail11;
    529 		}
    530 		SLIST_INIT(&sc->sc_dlist_xore);
    531 		for (j = 0; j < MVXORE_NDESC * xore_nchan; j++) {
    532 			dd = &sc->sc_dd_buffer[j + GTIDMAC_NDESC * idmac_nchan];
    533 			dd->dd_index = j;
    534 			dd->dd_xore_vaddr = &sc->sc_dbuf_xore[j];
    535 			dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
    536 			    (sizeof(struct mvxore_desc) * j);
    537 			SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
    538 		}
    539 
    540 		/* Initialize XORE DMA channels */
    541 		mask = 0;
    542 		for (j = 0; j < xore_nchan; j++) {
    543 			for (k = 0; k < MVXORE_NSRC; k++) {
    544 				if (bus_dmamap_create(sc->sc_dmat,
    545 				    MVXORE_MAXXFER, MVXORE_NSEGS,
    546 				    MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
    547 				    &sc->sc_cdesc_xore[j].chan_in[k])) {
    548 					aprint_error_dev(self,
    549 					    "bus_dmamap_create failed:"
    550 					    " xore chan%d in[%d]\n", j, k);
    551 					goto fail12;
    552 				}
    553 			}
    554 			if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
    555 			    MVXORE_NSEGS, MVXORE_MAXXFER, 0,
    556 			    BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[j].chan_out)) {
    557 				aprint_error_dev(self,
    558 				    "bus_dmamap_create failed: chan%d out\n",
    559 				    j);
    560 				goto fail13;
    561 			}
    562 			sc->sc_cdesc_xore[j].chan_totalcnt = 0;
    563 			sc->sc_cdesc_xore[j].chan_running = NULL;
    564 
    565 			mask |= MVXORE_I(j,
    566 			    MVXORE_I_EOC	|
    567 			    MVXORE_I_ADDRDECODE	|
    568 			    MVXORE_I_ACCPROT	|
    569 			    MVXORE_I_WRPROT	|
    570 			    MVXORE_I_OWN	|
    571 			    MVXORE_I_INTPARITY	|
    572 			    MVXORE_I_XBAR);
    573 		}
    574 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIMR, mask);
    575 
    576 		marvell_intr_establish(xore_irq + 0, IPL_BIO, mvxore_intr, sc);
    577 		marvell_intr_establish(xore_irq + 1, IPL_BIO, mvxore_intr, sc);
    578 
    579 		mvxore_wininit(sc);
    580 
    581 		/* Register us with dmover. */
    582 		sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
    583 		sc->sc_dmb_xore.dmb_speed = dmb_speed;
    584 		sc->sc_dmb_xore.dmb_cookie = sc;
    585 		sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
    586 		sc->sc_dmb_xore.dmb_nalgdescs =
    587 		    __arraycount(mvxore_algdescs);
    588 		sc->sc_dmb_xore.dmb_process = gtidmac_process;
    589 		dmover_backend_register(&sc->sc_dmb_xore);
    590 	}
    591 
    592 	gtidmac_softc = sc;
    593 
    594 	return;
    595 
    596 	for (; j-- > 0;) {
    597 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[j].chan_out);
    598 
    599 fail13:
    600 		k = MVXORE_NSRC;
    601 fail12:
    602 		for (; k-- > 0;)
    603 			bus_dmamap_destroy(sc->sc_dmat,
    604 			    sc->sc_cdesc_xore[j].chan_in[k]);
    605 	}
    606 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
    607 fail11:
    608 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
    609 fail10:
    610 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
    611 	    sizeof(struct mvxore_desc) * MVXORE_NDESC);
    612 fail9:
    613 	bus_dmamem_free(sc->sc_dmat, &segs_xore, 1);
    614 fail8:
    615 	for (; i-- > 0;) {
    616 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
    617 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
    618 	}
    619 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
    620 fail7:
    621 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
    622 fail6:
    623 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
    624 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
    625 fail5:
    626 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
    627 fail4:
    628 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
    629 fail3:
    630 	bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
    631 fail2:
    632 	kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
    633 fail1:
    634 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
    635 	return;
    636 }
    637 
    638 
    639 static int
    640 gtidmac_intr(void *arg)
    641 {
    642 	struct gtidmac_intr_arg *ia = arg;
    643 	struct gtidmac_softc *sc = ia->ia_sc;
    644 	uint32_t cause;
    645 	int handled = 0, chan, error;
    646 
    647 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
    648 	DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
    649 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
    650 
    651 	chan = 0;
    652 	while (cause) {
    653 		error = 0;
    654 		if (cause & GTIDMAC_I_ADDRMISS) {
    655 			aprint_error_dev(sc->sc_dev, "Address Miss");
    656 			error = EINVAL;
    657 		}
    658 		if (cause & GTIDMAC_I_ACCPROT) {
    659 			aprint_error_dev(sc->sc_dev,
    660 			    "Access Protect Violation");
    661 			error = EACCES;
    662 		}
    663 		if (cause & GTIDMAC_I_WRPROT) {
    664 			aprint_error_dev(sc->sc_dev, "Write Protect");
    665 			error = EACCES;
    666 		}
    667 		if (cause & GTIDMAC_I_OWN) {
    668 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
    669 			error = EINVAL;
    670 		}
    671 
    672 #define GTIDMAC_I_ERROR		  \
    673 	   (GTIDMAC_I_ADDRMISS	| \
    674 	    GTIDMAC_I_ACCPROT	| \
    675 	    GTIDMAC_I_WRPROT	| \
    676 	    GTIDMAC_I_OWN)
    677 		if (cause & GTIDMAC_I_ERROR) {
    678 			uint32_t sel;
    679 			int select;
    680 
    681 			sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    682 			    ia->ia_eselect) & GTIDMAC_ESR_SEL;
    683 			select = sel - chan * GTIDMAC_I_BITS;
    684 			if (select >= 0 && select < GTIDMAC_I_BITS) {
    685 				uint32_t ear;
    686 
    687 				ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    688 				    ia->ia_eaddr);
    689 				aprint_error(": Error Address 0x%x\n", ear);
    690 			} else
    691 				aprint_error(": lost Error Address\n");
    692 		}
    693 
    694 		if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
    695 			sc->sc_cdesc[chan].chan_dma_done(
    696 			    sc->sc_cdesc[chan].chan_running, chan,
    697 			    &sc->sc_cdesc[chan].chan_in,
    698 			    &sc->sc_cdesc[chan].chan_out, error);
    699 			handled++;
    700 		}
    701 
    702 		cause >>= GTIDMAC_I_BITS;
    703 	}
    704 	DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
    705 
    706 	return handled;
    707 }
    708 
    709 static int
    710 mvxore_intr(void *arg)
    711 {
    712 	struct gtidmac_softc *sc = arg;
    713 	uint32_t cause;
    714 	int handled = 0, chan, error;
    715 
    716 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR);
    717 	DPRINTF(("XORE intr: cause=0x%x\n", cause));
    718 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR, ~cause);
    719 
    720 	chan = 0;
    721 	while (cause) {
    722 		error = 0;
    723 		if (cause & MVXORE_I_ADDRDECODE) {
    724 			aprint_error_dev(sc->sc_dev, "Failed address decoding");
    725 			error = EINVAL;
    726 		}
    727 		if (cause & MVXORE_I_ACCPROT) {
    728 			aprint_error_dev(sc->sc_dev,
    729 			    "Access Protect Violation");
    730 			error = EACCES;
    731 		}
    732 		if (cause & MVXORE_I_WRPROT) {
    733 			aprint_error_dev(sc->sc_dev, "Write Protect");
    734 			error = EACCES;
    735 		}
    736 		if (cause & MVXORE_I_OWN) {
    737 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
    738 			error = EINVAL;
    739 		}
    740 		if (cause & MVXORE_I_INTPARITY) {
    741 			aprint_error_dev(sc->sc_dev, "Parity Error");
    742 			error = EIO;
    743 		}
    744 		if (cause & MVXORE_I_XBAR) {
    745 			aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
    746 			error = EINVAL;
    747 		}
    748 
    749 #define MVXORE_I_ERROR		  \
    750 	   (MVXORE_I_ADDRDECODE	| \
    751 	    MVXORE_I_ACCPROT	| \
    752 	    MVXORE_I_WRPROT	| \
    753 	    MVXORE_I_OWN	| \
    754 	    MVXORE_I_INTPARITY	| \
    755 	    MVXORE_I_XBAR)
    756 		if (cause & MVXORE_I_ERROR) {
    757 			uint32_t type;
    758 			int event;
    759 
    760 			type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    761 			    MVXORE_XEECR) & MVXORE_XEECR_ERRORTYPE_MASK;
    762 			event = type - chan * MVXORE_I_BITS;
    763 			if (event >= 0 && event < MVXORE_I_BITS) {
    764 				uint32_t xeear;
    765 
    766 				xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    767 				    MVXORE_XEEAR);
    768 				aprint_error(": Error Address 0x%x\n", xeear);
    769 			} else
    770 				aprint_error(": lost Error Address\n");
    771 		}
    772 
    773 		if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
    774 			sc->sc_cdesc_xore[chan].chan_dma_done(
    775 			    sc->sc_cdesc_xore[chan].chan_running, chan,
    776 			    sc->sc_cdesc_xore[chan].chan_in,
    777 			    &sc->sc_cdesc_xore[chan].chan_out, error);
    778 			handled++;
    779 		}
    780 
    781 		cause >>= MVXORE_I_BITS;
    782 	}
    783 	DPRINTF(("XORE intr: %shandled\n", handled ? "" : "not "));
    784 
    785 	return handled;
    786 }
    787 
    788 
    789 /*
    790  * dmover(9) backend function.
    791  */
    792 static void
    793 gtidmac_process(struct dmover_backend *dmb)
    794 {
    795 	struct gtidmac_softc *sc = dmb->dmb_cookie;
    796 	int s;
    797 
    798 	/* If the backend is currently idle, go process the queue. */
    799 	s = splbio();
    800 	if (!sc->sc_dmb_busy)
    801 		gtidmac_dmover_run(dmb);
    802 	splx(s);
    803 }
    804 
    805 static void
    806 gtidmac_dmover_run(struct dmover_backend *dmb)
    807 {
    808 	struct gtidmac_softc *sc = dmb->dmb_cookie;
    809 	struct dmover_request *dreq;
    810 	const struct dmover_algdesc *algdesc;
    811 	struct gtidmac_function *df;
    812 	bus_dmamap_t *dmamap_in, *dmamap_out;
    813 	int chan, ninputs, error, i;
    814 
    815 	sc->sc_dmb_busy = 1;
    816 
    817 	for (;;) {
    818 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
    819 		if (dreq == NULL)
    820 			break;
    821 		algdesc = dreq->dreq_assignment->das_algdesc;
    822 		df = algdesc->dad_data;
    823 		chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
    824 		if (chan == -1)
    825 			return;
    826 
    827 		dmover_backend_remque(dmb, dreq);
    828 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
    829 
    830 		/* XXXUNLOCK */
    831 
    832 		error = 0;
    833 
    834 		/* Load in/out buffers of dmover to bus_dmamap. */
    835 		ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    836 		if (ninputs == 0) {
    837 			int pno = 0;
    838 
    839 			if (algdesc->dad_name == DMOVER_FUNC_FILL8)
    840 				pno = dreq->dreq_immediate[0];
    841 
    842 			i = 0;
    843 			error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
    844 			    &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
    845 			    BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
    846 			if (error == 0) {
    847 				bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
    848 				    sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
    849 
    850 				/*
    851 				 * We will call gtidmac_dmmap_unload() when
    852 				 * becoming an error.
    853 				 */
    854 				i = 1;
    855 			}
    856 		} else
    857 			for (i = 0; i < ninputs; i++) {
    858 				error = gtidmac_dmmap_load(sc,
    859 				    *(dmamap_in + i), dreq->dreq_inbuf_type,
    860 				    &dreq->dreq_inbuf[i], 0/*write*/);
    861 				if (error != 0)
    862 					break;
    863 			}
    864 		if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
    865 			if (error == 0)
    866 				error = gtidmac_dmmap_load(sc, *dmamap_out,
    867 				    dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
    868 				    1/*read*/);
    869 
    870 			if (error == 0) {
    871 				/*
    872 				 * The size of outbuf is always believed to be
    873 				 * DMA transfer size in dmover request.
    874 				 */
    875 				error = (*df->dma_setup)(sc, chan, ninputs,
    876 				    dmamap_in, dmamap_out,
    877 				    (*dmamap_out)->dm_mapsize);
    878 				if (error != 0)
    879 					gtidmac_dmmap_unload(sc, *dmamap_out,
    880 					    1);
    881 			}
    882 		} else
    883 			if (error == 0)
    884 				error = (*df->dma_setup)(sc, chan, ninputs,
    885 				    dmamap_in, dmamap_out,
    886 				    (*dmamap_in)->dm_mapsize);
    887 
    888 		/* XXXLOCK */
    889 
    890 		if (error != 0) {
    891 			for (; i-- > 0;)
    892 				gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
    893 			(*df->chan_free)(sc, chan);
    894 
    895 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
    896 			dreq->dreq_error = error;
    897 			/* XXXUNLOCK */
    898 			dmover_done(dreq);
    899 			/* XXXLOCK */
    900 			continue;
    901 		}
    902 
    903 		(*df->dma_start)(sc, chan, gtidmac_dmover_done);
    904 		break;
    905 	}
    906 
    907 	/* All done */
    908 	sc->sc_dmb_busy = 0;
    909 }
    910 
    911 static void
    912 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
    913 		    bus_dmamap_t *dmamap_out, int error)
    914 {
    915 	struct gtidmac_softc *sc;
    916 	struct dmover_request *dreq = object;
    917 	struct dmover_backend *dmb;
    918 	struct gtidmac_function *df;
    919 	uint32_t result;
    920 	int ninputs, i;
    921 
    922 	KASSERT(dreq != NULL);
    923 
    924 	dmb = dreq->dreq_assignment->das_backend;
    925 	df = dreq->dreq_assignment->das_algdesc->dad_data;
    926 	ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    927 	sc = dmb->dmb_cookie;
    928 
    929 	result = (*df->dma_finish)(sc, chan, error);
    930 	for (i = 0; i < ninputs; i++)
    931 		gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
    932 	if (dreq->dreq_assignment->das_algdesc->dad_name ==
    933 	    DMOVER_FUNC_ISCSI_CRC32C)
    934 		memcpy(dreq->dreq_immediate, &result, sizeof(result));
    935 	else
    936 		gtidmac_dmmap_unload(sc, *dmamap_out, 1);
    937 
    938 	(*df->chan_free)(sc, chan);
    939 
    940 	if (error) {
    941 		dreq->dreq_error = error;
    942 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
    943 	}
    944 
    945 	dmover_done(dreq);
    946 
    947 	/*
    948 	 * See if we can start some more dmover(9) requests.
    949 	 *
    950 	 * Note: We're already at splbio() here.
    951 	 */
    952 	if (!sc->sc_dmb_busy)
    953 		gtidmac_dmover_run(dmb);
    954 }
    955 
    956 __inline int
    957 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
    958 		   dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
    959 		   int read)
    960 {
    961 	int error, flags;
    962 
    963 	flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
    964 	    read ? BUS_DMA_READ : BUS_DMA_WRITE;
    965 
    966 	switch (dmbuf_type) {
    967 	case DMOVER_BUF_LINEAR:
    968 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    969 		    dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
    970 		    NULL, flags);
    971 		break;
    972 
    973 	case DMOVER_BUF_UIO:
    974 		if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
    975 		    (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
    976 			return (EINVAL);
    977 
    978 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    979 		    dmbuf->dmbuf_uio, flags);
    980 		break;
    981 
    982 	default:
    983 		error = EINVAL;
    984 	}
    985 
    986 	if (error == 0)
    987 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    988 		    read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    989 
    990 	return error;
    991 }
    992 
    993 __inline void
    994 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
    995 {
    996 
    997 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    998 	    read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    999 
   1000 	bus_dmamap_unload(sc->sc_dmat, dmamap);
   1001 }
   1002 
   1003 
   1004 void *
   1005 gtidmac_tag_get(void)
   1006 {
   1007 
   1008 	return gtidmac_softc;
   1009 }
   1010 
   1011 /*
   1012  * IDMAC functions
   1013  */
   1014 int
   1015 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
   1016 		   bus_dmamap_t **dmamap_out, void *object)
   1017 {
   1018 	struct gtidmac_softc *sc = tag;
   1019 	int chan;
   1020 
   1021 /* maybe need lock */
   1022 
   1023 	for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
   1024 		if (sc->sc_cdesc[chan].chan_running == NULL)
   1025 			break;
   1026 	if (chan >= sc->sc_gtidmac_nchan)
   1027 		return -1;
   1028 
   1029 
   1030 	sc->sc_cdesc[chan].chan_running = object;
   1031 
   1032 /* unlock */
   1033 
   1034 	*dmamap_in = &sc->sc_cdesc[chan].chan_in;
   1035 	*dmamap_out = &sc->sc_cdesc[chan].chan_out;
   1036 
   1037 	return chan;
   1038 }
   1039 
   1040 void
   1041 gtidmac_chan_free(void *tag, int chan)
   1042 {
   1043 	struct gtidmac_softc *sc = tag;
   1044 
   1045 /* maybe need lock */
   1046 
   1047 	sc->sc_cdesc[chan].chan_running = NULL;
   1048 
   1049 /* unlock */
   1050 }
   1051 
   1052 /* ARGSUSED */
   1053 int
   1054 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
   1055 	      bus_dmamap_t *dmamap_out, bus_size_t size)
   1056 {
   1057 	struct gtidmac_softc *sc = tag;
   1058 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1059 	struct gtidmac_desc *desc;
   1060 	uint32_t ccl, bcnt, ires, ores;
   1061 	int n = 0, iidx, oidx;
   1062 
   1063 	KASSERT(ninputs == 0 || ninputs == 1);
   1064 
   1065 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1066 #ifdef DIAGNOSTIC
   1067 	if (ccl & GTIDMAC_CCLR_CHANACT)
   1068 		panic("gtidmac_setup: chan%d already active", chan);
   1069 #endif
   1070 
   1071 	/* We always Chain-mode and max (16M - 1)byte/desc */
   1072 	ccl = (GTIDMAC_CCLR_DESCMODE_16M				|
   1073 #ifdef GTIDMAC_DEBUG
   1074 	    GTIDMAC_CCLR_CDEN						|
   1075 #endif
   1076 	    GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */	|
   1077 	    GTIDMAC_CCLR_INTMODE_NULL   /* Intr Mode: Next Desc NULL */	|
   1078 	    GTIDMAC_CCLR_CHAINMODE_C    /* Chain Mode: Chaind */);
   1079 	if (size != (*dmamap_in)->dm_mapsize) {
   1080 		ccl |= GTIDMAC_CCLR_SRCHOLD;
   1081 		if ((*dmamap_in)->dm_mapsize == 8)
   1082 			ccl |= GTIDMAC_CCLR_SBL_8B;
   1083 		else if ((*dmamap_in)->dm_mapsize == 16)
   1084 			ccl |= GTIDMAC_CCLR_SBL_16B;
   1085 		else if ((*dmamap_in)->dm_mapsize == 32)
   1086 			ccl |= GTIDMAC_CCLR_SBL_32B;
   1087 		else if ((*dmamap_in)->dm_mapsize == 64)
   1088 			ccl |= GTIDMAC_CCLR_SBL_64B;
   1089 		else if ((*dmamap_in)->dm_mapsize == 128)
   1090 			ccl |= GTIDMAC_CCLR_SBL_128B;
   1091 		else
   1092 			panic("gtidmac_setup: chan%d source:"
   1093 			    " unsupport hold size", chan);
   1094 	} else
   1095 		ccl |= GTIDMAC_CCLR_SBL_128B;
   1096 	if (size != (*dmamap_out)->dm_mapsize) {
   1097 		ccl |= GTIDMAC_CCLR_DESTHOLD;
   1098 		if ((*dmamap_out)->dm_mapsize == 8)
   1099 			ccl |= GTIDMAC_CCLR_DBL_8B;
   1100 		else if ((*dmamap_out)->dm_mapsize == 16)
   1101 			ccl |= GTIDMAC_CCLR_DBL_16B;
   1102 		else if ((*dmamap_out)->dm_mapsize == 32)
   1103 			ccl |= GTIDMAC_CCLR_DBL_32B;
   1104 		else if ((*dmamap_out)->dm_mapsize == 64)
   1105 			ccl |= GTIDMAC_CCLR_DBL_64B;
   1106 		else if ((*dmamap_out)->dm_mapsize == 128)
   1107 			ccl |= GTIDMAC_CCLR_DBL_128B;
   1108 		else
   1109 			panic("gtidmac_setup: chan%d destination:"
   1110 			    " unsupport hold size", chan);
   1111 	} else
   1112 		ccl |= GTIDMAC_CCLR_DBL_128B;
   1113 
   1114 	fstdd = SLIST_FIRST(&sc->sc_dlist);
   1115 	if (fstdd == NULL) {
   1116 		aprint_error_dev(sc->sc_dev, "no descriptor\n");
   1117 		return ENOMEM;
   1118 	}
   1119 	SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
   1120 	sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
   1121 
   1122 	dd = fstdd;
   1123 	ires = ores = 0;
   1124 	iidx = oidx = 0;
   1125 	while (1 /*CONSTCOND*/) {
   1126 		if (ccl & GTIDMAC_CCLR_SRCHOLD) {
   1127 			if (ccl & GTIDMAC_CCLR_DESTHOLD)
   1128 				bcnt = size;	/* src/dst hold */
   1129 			else
   1130 				bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
   1131 		} else if (ccl & GTIDMAC_CCLR_DESTHOLD)
   1132 			bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
   1133 		else
   1134 			bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
   1135 			    (*dmamap_out)->dm_segs[oidx].ds_len - ores);
   1136 
   1137 		desc = dd->dd_idmac_vaddr;
   1138 		desc->bc.mode16m.bcnt =
   1139 		    bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
   1140 		desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
   1141 		desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
   1142 
   1143 		n += bcnt;
   1144 		if (n >= size)
   1145 			break;
   1146 		if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
   1147 			ires += bcnt;
   1148 			if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
   1149 				ires = 0;
   1150 				iidx++;
   1151 				KASSERT(iidx < (*dmamap_in)->dm_nsegs);
   1152 			}
   1153 		}
   1154 		if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
   1155 			ores += bcnt;
   1156 			if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
   1157 				ores = 0;
   1158 				oidx++;
   1159 				KASSERT(oidx < (*dmamap_out)->dm_nsegs);
   1160 			}
   1161 		}
   1162 
   1163 		nxtdd = SLIST_FIRST(&sc->sc_dlist);
   1164 		if (nxtdd == NULL) {
   1165 			aprint_error_dev(sc->sc_dev, "no descriptor\n");
   1166 			return ENOMEM;
   1167 		}
   1168 		SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
   1169 
   1170 		desc->nextdp = (uint32_t)nxtdd->dd_paddr;
   1171 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1172 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1173 #ifdef GTIDMAC_DEBUG
   1174 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1175 #else
   1176 		    BUS_DMASYNC_PREWRITE);
   1177 #endif
   1178 
   1179 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
   1180 		dd = nxtdd;
   1181 	}
   1182 	desc->nextdp = (uint32_t)NULL;
   1183 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
   1184 #ifdef GTIDMAC_DEBUG
   1185 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1186 #else
   1187 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
   1188 #endif
   1189 
   1190 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
   1191 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
   1192 	    fstdd->dd_paddr);
   1193 
   1194 #if BYTE_ORDER == LITTLE_ENDIAN
   1195 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
   1196 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
   1197 #else
   1198 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
   1199 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
   1200 #endif
   1201 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
   1202 
   1203 #ifdef GTIDMAC_DEBUG
   1204 	gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
   1205 #endif
   1206 
   1207 	sc->sc_cdesc[chan].chan_totalcnt += size;
   1208 
   1209 	return 0;
   1210 }
   1211 
   1212 void
   1213 gtidmac_start(void *tag, int chan,
   1214 	      void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
   1215 				  int))
   1216 {
   1217 	struct gtidmac_softc *sc = tag;
   1218 	uint32_t ccl;
   1219 
   1220 	DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
   1221 
   1222 #ifdef GTIDMAC_DEBUG
   1223 	gtidmac_dump_idmacreg(sc, chan);
   1224 #endif
   1225 
   1226 	sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
   1227 
   1228 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1229 	/* Start and 'Fetch Next Descriptor' */
   1230 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
   1231 	    ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
   1232 }
   1233 
   1234 static uint32_t
   1235 gtidmac_finish(void *tag, int chan, int error)
   1236 {
   1237 	struct gtidmac_softc *sc = tag;
   1238 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1239 	struct gtidmac_desc *desc;
   1240 
   1241 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
   1242 
   1243 #ifdef GTIDMAC_DEBUG
   1244 	if (error || gtidmac_debug > 1) {
   1245 		uint32_t ccl;
   1246 
   1247 		gtidmac_dump_idmacreg(sc, chan);
   1248 		ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1249 		    GTIDMAC_CCLR(chan));
   1250 		gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
   1251 	}
   1252 #endif
   1253 
   1254 	dd = fstdd;
   1255 	do {
   1256 		desc = dd->dd_idmac_vaddr;
   1257 
   1258 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1259 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1260 #ifdef GTIDMAC_DEBUG
   1261 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1262 #else
   1263 		    BUS_DMASYNC_POSTWRITE);
   1264 #endif
   1265 
   1266 		nxtdd = SLIST_NEXT(dd, dd_next);
   1267 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
   1268 		dd = nxtdd;
   1269 	} while (desc->nextdp);
   1270 
   1271 	return 0;
   1272 }
   1273 
   1274 /*
   1275  * XORE functions
   1276  */
   1277 int
   1278 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
   1279 		  bus_dmamap_t **dmamap_out, void *object)
   1280 {
   1281 	struct gtidmac_softc *sc = tag;
   1282 	int chan;
   1283 
   1284 /* maybe need lock */
   1285 
   1286 	for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
   1287 		if (sc->sc_cdesc_xore[chan].chan_running == NULL)
   1288 			break;
   1289 	if (chan >= sc->sc_mvxore_nchan)
   1290 		return -1;
   1291 
   1292 
   1293 	sc->sc_cdesc_xore[chan].chan_running = object;
   1294 
   1295 /* unlock */
   1296 
   1297 	*dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
   1298 	*dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
   1299 
   1300 	return chan;
   1301 }
   1302 
   1303 void
   1304 mvxore_chan_free(void *tag, int chan)
   1305 {
   1306 	struct gtidmac_softc *sc = tag;
   1307 
   1308 /* maybe need lock */
   1309 
   1310 	sc->sc_cdesc_xore[chan].chan_running = NULL;
   1311 
   1312 /* unlock */
   1313 }
   1314 
   1315 /* ARGSUSED */
   1316 int
   1317 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
   1318 	     bus_dmamap_t *dmamap_out, bus_size_t size)
   1319 {
   1320 	struct gtidmac_softc *sc = tag;
   1321 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1322 	struct mvxore_desc *desc;
   1323 	uint32_t xexc, bcnt, cmd, lastcmd;
   1324 	int n = 0, i;
   1325 	uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
   1326 	int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
   1327 
   1328 #ifdef DIAGNOSTIC
   1329 	uint32_t xexact;
   1330 
   1331 	xexact = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
   1332 	if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
   1333 	    MVXORE_XEXACTR_XESTATUS_ACT)
   1334 		panic("mvxore_setup: chan%d already active."
   1335 		    " mvxore not support hot insertion", chan);
   1336 #endif
   1337 
   1338 	xexc =
   1339 	    (MVXORE_XEXCR_REGACCPROTECT	|
   1340 	     MVXORE_XEXCR_DBL_128B	|
   1341 	     MVXORE_XEXCR_SBL_128B);
   1342 	cmd = lastcmd = 0;
   1343 	if (ninputs > 1) {
   1344 		xexc |= MVXORE_XEXCR_OM_XOR;
   1345 		lastcmd = cmd = (1 << ninputs) - 1;
   1346 	} else if (ninputs == 1) {
   1347 		if ((*dmamap_out)->dm_nsegs == 0) {
   1348 			xexc |= MVXORE_XEXCR_OM_CRC32;
   1349 			lastcmd = MVXORE_DESC_CMD_CRCLAST;
   1350 		} else
   1351 			xexc |= MVXORE_XEXCR_OM_DMA;
   1352 	} else if (ninputs == 0) {
   1353 		if ((*dmamap_out)->dm_nsegs != 1) {
   1354 			aprint_error_dev(sc->sc_dev,
   1355 			    "XORE not supports %d DMA segments\n",
   1356 			    (*dmamap_out)->dm_nsegs);
   1357 			return EINVAL;
   1358 		}
   1359 
   1360 		if ((*dmamap_in)->dm_mapsize == 0) {
   1361 			xexc |= MVXORE_XEXCR_OM_ECC;
   1362 
   1363 			/* XXXXX: Maybe need to set Timer Mode registers? */
   1364 
   1365 #if 0
   1366 		} else if ((*dmamap_in)->dm_mapsize == 8 ||
   1367 		    (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
   1368 			uint64_t pattern;
   1369 
   1370 			/* XXXX: Get pattern data */
   1371 
   1372 			KASSERT((*dmamap_in)->dm_mapsize == 8 ||
   1373 			    (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
   1374 						~PAGE_MASK) == sc->sc_pbuf);
   1375 			pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
   1376 
   1377 			/* XXXXX: XORE has a IVR.  We should get this first. */
   1378 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
   1379 			    pattern);
   1380 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
   1381 			    pattern >> 32);
   1382 
   1383 			xexc |= MVXORE_XEXCR_OM_MEMINIT;
   1384 #endif
   1385 		} else {
   1386 			aprint_error_dev(sc->sc_dev,
   1387 			    "XORE not supports DMA mapsize %zd\n",
   1388 			    (*dmamap_in)->dm_mapsize);
   1389 			return EINVAL;
   1390 		}
   1391 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXDPR(chan),
   1392 		    (*dmamap_out)->dm_segs[0].ds_addr);
   1393 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBSR(chan),
   1394 		    (*dmamap_out)->dm_mapsize);
   1395 
   1396 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan),
   1397 		    xexc);
   1398 		sc->sc_cdesc_xore[chan].chan_totalcnt += size;
   1399 
   1400 		return 0;
   1401 	}
   1402 
   1403 	/* Make descriptor for DMA/CRC32/XOR */
   1404 
   1405 	fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
   1406 	if (fstdd == NULL) {
   1407 		aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
   1408 		return ENOMEM;
   1409 	}
   1410 	SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
   1411 	sc->sc_cdesc_xore[chan].chan_ddidx =
   1412 	    fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
   1413 
   1414 	dd = fstdd;
   1415 	while (1 /*CONSTCOND*/) {
   1416 		desc = dd->dd_xore_vaddr;
   1417 		desc->stat = MVXORE_DESC_STAT_OWN;
   1418 		desc->cmd = cmd;
   1419 		if ((*dmamap_out)->dm_nsegs != 0) {
   1420 			desc->dstaddr =
   1421 			    (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
   1422 			bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
   1423 		} else {
   1424 			desc->dstaddr = 0;
   1425 			bcnt = MVXORE_MAXXFER;	/* XXXXX */
   1426 		}
   1427 		for (i = 0; i < ninputs; i++) {
   1428 			desc->srcaddr[i] =
   1429 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
   1430 			bcnt = min(bcnt,
   1431 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
   1432 		}
   1433 		desc->bcnt = bcnt;
   1434 
   1435 		n += bcnt;
   1436 		if (n >= size)
   1437 			break;
   1438 		ores += bcnt;
   1439 		if ((*dmamap_out)->dm_nsegs != 0 &&
   1440 		    ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
   1441 			ores = 0;
   1442 			oidx++;
   1443 			KASSERT(oidx < (*dmamap_out)->dm_nsegs);
   1444 		}
   1445 		for (i = 0; i < ninputs; i++) {
   1446 			ires[i] += bcnt;
   1447 			if (ires[i] >=
   1448 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
   1449 				ires[i] = 0;
   1450 				iidx[i]++;
   1451 				KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
   1452 			}
   1453 		}
   1454 
   1455 		nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
   1456 		if (nxtdd == NULL) {
   1457 			aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
   1458 			return ENOMEM;
   1459 		}
   1460 		SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
   1461 
   1462 		desc->nextda = (uint32_t)nxtdd->dd_paddr;
   1463 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1464 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1465 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1466 
   1467 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
   1468 		dd = nxtdd;
   1469 	}
   1470 	desc->cmd = lastcmd;
   1471 	desc->nextda = (uint32_t)NULL;
   1472 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1473 	    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1474 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1475 
   1476 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
   1477 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(chan),
   1478 	    fstdd->dd_paddr);
   1479 
   1480 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan), xexc);
   1481 
   1482 #ifdef GTIDMAC_DEBUG
   1483 	gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
   1484 #endif
   1485 
   1486 	sc->sc_cdesc_xore[chan].chan_totalcnt += size;
   1487 
   1488 	return 0;
   1489 }
   1490 
   1491 void
   1492 mvxore_start(void *tag, int chan,
   1493 	     void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
   1494 				 int))
   1495 {
   1496 	struct gtidmac_softc *sc = tag;
   1497 	uint32_t xexact;
   1498 
   1499 	DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
   1500 
   1501 #ifdef GTIDMAC_DEBUG
   1502 	gtidmac_dump_xorereg(sc, chan);
   1503 #endif
   1504 
   1505 	sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
   1506 
   1507 	xexact = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
   1508 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan),
   1509 	    xexact | MVXORE_XEXACTR_XESTART);
   1510 }
   1511 
   1512 static uint32_t
   1513 mvxore_finish(void *tag, int chan, int error)
   1514 {
   1515 	struct gtidmac_softc *sc = tag;
   1516 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1517 	struct mvxore_desc *desc;
   1518 	uint32_t xexc;
   1519 
   1520 #ifdef GTIDMAC_DEBUG
   1521 	if (error || gtidmac_debug > 1)
   1522 		gtidmac_dump_xorereg(sc, chan);
   1523 #endif
   1524 
   1525 	xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan));
   1526 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
   1527 	    (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
   1528 		return 0;
   1529 
   1530 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
   1531 
   1532 #ifdef GTIDMAC_DEBUG
   1533 	if (error || gtidmac_debug > 1)
   1534 		gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
   1535 #endif
   1536 
   1537 	dd = fstdd;
   1538 	do {
   1539 		desc = dd->dd_xore_vaddr;
   1540 
   1541 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1542 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1543 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1544 
   1545 		nxtdd = SLIST_NEXT(dd, dd_next);
   1546 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
   1547 		dd = nxtdd;
   1548 	} while (desc->nextda);
   1549 
   1550 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
   1551 		return desc->result;
   1552 	return 0;
   1553 }
   1554 
   1555 static void
   1556 gtidmac_wininit(struct gtidmac_softc *sc)
   1557 {
   1558 	device_t pdev = device_parent(sc->sc_dev);
   1559 	uint64_t base;
   1560 	uint32_t size, cxap, en;
   1561 	int window, target, attr, rv, i;
   1562 	struct {
   1563 		int tag;
   1564 		int winacc;
   1565 	} targets[] = {
   1566 		{ MARVELL_TAG_SDRAM_CS0,	GTIDMAC_CXAPR_WINACC_FA },
   1567 		{ MARVELL_TAG_SDRAM_CS1,	GTIDMAC_CXAPR_WINACC_FA },
   1568 		{ MARVELL_TAG_SDRAM_CS2,	GTIDMAC_CXAPR_WINACC_FA },
   1569 		{ MARVELL_TAG_SDRAM_CS3,	GTIDMAC_CXAPR_WINACC_FA },
   1570 
   1571 		/* Also can set following targets. */
   1572 		/*   Devices       = 0x1(ORION_TARGETID_DEVICE_*) */
   1573 		/*   PCI           = 0x3(ORION_TARGETID_PCI0_*) */
   1574 		/*   PCI Express   = 0x4(ORION_TARGETID_PEX?_*) */
   1575 		/*   Tunit SRAM(?) = 0x5(???) */
   1576 
   1577 		{ MARVELL_TAG_UNDEFINED,	GTIDMAC_CXAPR_WINACC_NOAA }
   1578 	};
   1579 
   1580 	en = 0xff;
   1581 	cxap = 0;
   1582 	for (window = 0, i = 0;
   1583 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW;
   1584 	    i++) {
   1585 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
   1586 		    &target, &attr, &base, &size);
   1587 		if (rv != 0 || size == 0)
   1588 			continue;
   1589 
   1590 		if (base > 0xffffffffULL) {
   1591 			if (window >= GTIDMAC_NREMAP) {
   1592 				aprint_error_dev(sc->sc_dev,
   1593 				    "can't remap window %d\n", window);
   1594 				continue;
   1595 			}
   1596 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1597 			    GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
   1598 		}
   1599 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
   1600 		    GTIDMAC_BARX_TARGET(target)	|
   1601 		    GTIDMAC_BARX_ATTR(attr)	|
   1602 		    GTIDMAC_BARX_BASE(base));
   1603 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
   1604 		    GTIDMAC_SRX_SIZE(size));
   1605 		en &= ~GTIDMAC_BAER_EN(window);
   1606 		cxap |= GTIDMAC_CXAPR_WINACC(window, targets[i].winacc);
   1607 		window++;
   1608 	}
   1609 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
   1610 
   1611 	for (i = 0; i < GTIDMAC_NACCPROT; i++)
   1612 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
   1613 		    cxap);
   1614 }
   1615 
   1616 static void
   1617 mvxore_wininit(struct gtidmac_softc *sc)
   1618 {
   1619 	device_t pdev = device_parent(sc->sc_dev);
   1620 	uint64_t base;
   1621 	uint32_t target, attr, size, xexwc;
   1622 	int window, rv, i;
   1623 	struct {
   1624 		int tag;
   1625 		int winacc;
   1626 	} targets[] = {
   1627 		{ MARVELL_TAG_SDRAM_CS0,	MVXORE_XEXWCR_WINACC_FA },
   1628 		{ MARVELL_TAG_SDRAM_CS1,	MVXORE_XEXWCR_WINACC_FA },
   1629 		{ MARVELL_TAG_SDRAM_CS2,	MVXORE_XEXWCR_WINACC_FA },
   1630 		{ MARVELL_TAG_SDRAM_CS3,	MVXORE_XEXWCR_WINACC_FA },
   1631 
   1632 		{ MARVELL_TAG_UNDEFINED,	MVXORE_XEXWCR_WINACC_NOAA }
   1633 	};
   1634 
   1635 	xexwc = 0;
   1636 	for (window = 0, i = 0;
   1637 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW;
   1638 	    i++) {
   1639 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
   1640 		    &target, &attr, &base, &size);
   1641 		if (rv != 0 || size == 0)
   1642 			continue;
   1643 
   1644 		if (base > 0xffffffffULL) {
   1645 			if (window >= MVXORE_NREMAP) {
   1646 				aprint_error_dev(sc->sc_dev,
   1647 				    "can't remap window %d\n", window);
   1648 				continue;
   1649 			}
   1650 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1651 			    MVXORE_XEHARRX(window), (base >> 32) & 0xffffffff);
   1652 		}
   1653 
   1654 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEBARX(window),
   1655 		    MVXORE_XEBARX_TARGET(target) |
   1656 		    MVXORE_XEBARX_ATTR(attr) |
   1657 		    MVXORE_XEBARX_BASE(base));
   1658 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1659 		    MVXORE_XESMRX(window), MVXORE_XESMRX_SIZE(size));
   1660 		xexwc |= (MVXORE_XEXWCR_WINEN(window) |
   1661 		    MVXORE_XEXWCR_WINACC(window, targets[i].winacc));
   1662 		window++;
   1663 	}
   1664 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(0), xexwc);
   1665 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(1), xexwc);
   1666 
   1667 	/* XXXXX: reset... */
   1668 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(0), 0);
   1669 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(1), 0);
   1670 }
   1671 
   1672 
   1673 #ifdef GTIDMAC_DEBUG
   1674 static void
   1675 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
   1676 {
   1677 	uint32_t val;
   1678 	char buf[256];
   1679 
   1680 	printf("IDMAC Registers\n");
   1681 
   1682 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
   1683 	snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
   1684 	printf("  Byte Count                 : %s\n", buf);
   1685 	printf("    ByteCnt                  :   0x%06x\n",
   1686 	    val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
   1687 	printf("  Source Address             : 0x%08x\n",
   1688 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
   1689 	printf("  Destination Address        : 0x%08x\n",
   1690 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
   1691 	printf("  Next Descriptor Pointer    : 0x%08x\n",
   1692 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
   1693 	printf("  Current Descriptor Pointer : 0x%08x\n",
   1694 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
   1695 
   1696 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1697 	snprintb(buf, sizeof(buf),
   1698 	    "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
   1699 	    "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
   1700 	    val);
   1701 	printf("  Channel Control (Low)      : %s\n", buf);
   1702 	printf("    SrcBurstLimit            : %s Bytes\n",
   1703 	  (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
   1704 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
   1705 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
   1706 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
   1707 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
   1708 	    "unknwon");
   1709 	printf("    DstBurstLimit            : %s Bytes\n",
   1710 	  (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
   1711 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
   1712 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
   1713 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
   1714 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
   1715 	    "unknwon");
   1716 	printf("    ChainMode                : %sChained\n",
   1717 	    val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
   1718 	printf("    TransferMode             : %s\n",
   1719 	    val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
   1720 	printf("    DescMode                 : %s\n",
   1721 	    val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
   1722 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
   1723 	snprintb(buf, sizeof(buf),
   1724 	    "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
   1725 	printf("  Channel Control (High)     : %s\n", buf);
   1726 }
   1727 
   1728 static void
   1729 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
   1730 		       uint32_t mode, int post)
   1731 {
   1732 	struct gtidmac_desc *desc;
   1733 	int i;
   1734 	char buf[256];
   1735 
   1736 	printf("IDMAC Descriptor\n");
   1737 
   1738 	i = 0;
   1739 	while (1 /*CONSTCOND*/) {
   1740 		if (post)
   1741 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1742 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1743 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1744 
   1745 		desc = dd->dd_idmac_vaddr;
   1746 
   1747 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
   1748 		if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
   1749 			snprintb(buf, sizeof(buf),
   1750 			    "\177\020b\037Own\0b\036BCLeft\0",
   1751 			    desc->bc.mode16m.bcnt);
   1752 			printf("  Byte Count              : %s\n", buf);
   1753 			printf("    ByteCount             :   0x%06x\n",
   1754 			    desc->bc.mode16m.bcnt &
   1755 			    GTIDMAC_CIDMABCR_BYTECNT_MASK);
   1756 		} else {
   1757 			printf("  Byte Count              :     0x%04x\n",
   1758 			    desc->bc.mode64k.bcnt);
   1759 			printf("  Remind Byte Count       :     0x%04x\n",
   1760 			    desc->bc.mode64k.rbc);
   1761 		}
   1762 		printf("  Source Address          : 0x%08x\n", desc->srcaddr);
   1763 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
   1764 		printf("  Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
   1765 
   1766 		if (desc->nextdp == (uint32_t)NULL)
   1767 			break;
   1768 
   1769 		if (!post)
   1770 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1771 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1772 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1773 
   1774 		i++;
   1775 		dd = SLIST_NEXT(dd, dd_next);
   1776 	}
   1777 	if (!post)
   1778 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1779 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1780 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1781 }
   1782 
   1783 static void
   1784 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
   1785 {
   1786 	uint32_t val, opmode;
   1787 	char buf[64];
   1788 
   1789 	printf("XORE Registers\n");
   1790 
   1791 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan));
   1792 	snprintb(buf, sizeof(buf),
   1793 	    "\177\020"
   1794 	    "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
   1795 	    val);
   1796 	printf(" Configuration    : 0x%s\n", buf);
   1797 	opmode = val & MVXORE_XEXCR_OM_MASK;
   1798 	printf("    OperationMode : %s operation\n",
   1799 	  opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
   1800 	  opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
   1801 	  opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
   1802 	  opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
   1803 	  opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
   1804 	  "unknown");
   1805 	printf("    SrcBurstLimit : %s Bytes\n",
   1806 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
   1807 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
   1808 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
   1809 	    "unknwon");
   1810 	printf("    DstBurstLimit : %s Bytes\n",
   1811 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
   1812 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
   1813 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
   1814 	    "unknwon");
   1815 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
   1816 	printf("  Activation      : 0x%08x\n", val);
   1817 	val &= MVXORE_XEXACTR_XESTATUS_MASK;
   1818 	printf("    XEstatus      : %s\n",
   1819 	    val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
   1820 	    val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
   1821 	    val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
   1822 
   1823 	if (opmode == MVXORE_XEXCR_OM_XOR ||
   1824 	    opmode == MVXORE_XEXCR_OM_CRC32 ||
   1825 	    opmode == MVXORE_XEXCR_OM_DMA) {
   1826 		printf("  NextDescPtr     : 0x%08x\n",
   1827 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1828 		    MVXORE_XEXNDPR(chan)));
   1829 		printf("  CurrentDescPtr  : 0x%08x\n",
   1830 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1831 		    MVXORE_XEXCDPR(chan)));
   1832 	}
   1833 	printf("  ByteCnt         : 0x%08x\n",
   1834 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
   1835 
   1836 	if (opmode == MVXORE_XEXCR_OM_ECC ||
   1837 	    opmode == MVXORE_XEXCR_OM_MEMINIT) {
   1838 		printf("  DstPtr          : 0x%08x\n",
   1839 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1840 		    MVXORE_XEXDPR(chan)));
   1841 		printf("  BlockSize       : 0x%08x\n",
   1842 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1843 		    MVXORE_XEXBSR(chan)));
   1844 
   1845 		if (opmode == MVXORE_XEXCR_OM_ECC) {
   1846 			val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1847 			    MVXORE_XETMCR);
   1848 			if (val & MVXORE_XETMCR_TIMEREN) {
   1849 				val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
   1850 				val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
   1851 				printf("  SectionSizeCtrl : 0x%08x\n", 2 ^ val);
   1852 				printf("  TimerInitVal    : 0x%08x\n",
   1853 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1854 				    MVXORE_XETMIVR));
   1855 				printf("  TimerCrntVal    : 0x%08x\n",
   1856 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1857 				    MVXORE_XETMCVR));
   1858 			}
   1859 		} else	/* MVXORE_XEXCR_OM_MEMINIT */
   1860 			printf("  InitVal         : 0x%08x%08x\n",
   1861 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1862 			    MVXORE_XEIVRH),
   1863 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1864 			    MVXORE_XEIVRL));
   1865 	}
   1866 }
   1867 
   1868 static void
   1869 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
   1870 		      uint32_t mode, int post)
   1871 {
   1872 	struct mvxore_desc *desc;
   1873 	int i, j;
   1874 	char buf[256];
   1875 
   1876 	printf("XORE Descriptor\n");
   1877 
   1878 	mode &= MVXORE_XEXCR_OM_MASK;
   1879 
   1880 	i = 0;
   1881 	while (1 /*CONSTCOND*/) {
   1882 		if (post)
   1883 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1884 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1885 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1886 
   1887 		desc = dd->dd_xore_vaddr;
   1888 
   1889 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
   1890 
   1891 		snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
   1892 		    desc->stat);
   1893 		printf("  Status                  : 0x%s\n", buf);
   1894 		if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
   1895 			printf("  CRC-32 Result           : 0x%08x\n",
   1896 			    desc->result);
   1897 		snprintb(buf, sizeof(buf),
   1898 		    "\177\020b\037EODIntEn\0b\036CRCLast\0"
   1899 		    "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
   1900 		    "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
   1901 		    desc->cmd);
   1902 		printf("  Command                 : 0x%s\n", buf);
   1903 		printf("  Next Descriptor Address : 0x%08x\n", desc->nextda);
   1904 		printf("  Byte Count              :   0x%06x\n", desc->bcnt);
   1905 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
   1906 		if (mode == MVXORE_XEXCR_OM_XOR) {
   1907 			for (j = 0; j < MVXORE_NSRC; j++)
   1908 				if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
   1909 					printf("  Source Address#%d        :"
   1910 					    " 0x%08x\n", j, desc->srcaddr[j]);
   1911 		} else
   1912 			printf("  Source Address          : 0x%08x\n",
   1913 			    desc->srcaddr[0]);
   1914 
   1915 		if (desc->nextda == (uint32_t)NULL)
   1916 			break;
   1917 
   1918 		if (!post)
   1919 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1920 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1921 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1922 
   1923 		i++;
   1924 		dd = SLIST_NEXT(dd, dd_next);
   1925 	}
   1926 	if (!post)
   1927 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1928 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1929 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1930 }
   1931 #endif
   1932