Home | History | Annotate | Line # | Download | only in marvell
gtidmac.c revision 1.5
      1 /*	$NetBSD: gtidmac.c,v 1.5 2010/07/20 11:47:59 kiyohara Exp $	*/
      2 /*
      3  * Copyright (c) 2008 KIYOHARA Takashi
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  * POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.5 2010/07/20 11:47:59 kiyohara Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/bus.h>
     33 #include <sys/device.h>
     34 #include <sys/errno.h>
     35 #include <sys/endian.h>
     36 #include <sys/kmem.h>
     37 
     38 #include <uvm/uvm_param.h>	/* For PAGE_SIZE */
     39 
     40 #include <dev/dmover/dmovervar.h>
     41 
     42 #include <dev/marvell/gtidmacreg.h>
     43 #include <dev/marvell/gtidmacvar.h>
     44 #include <dev/marvell/marvellreg.h>
     45 #include <dev/marvell/marvellvar.h>
     46 
     47 #include <prop/proplib.h>
     48 
     49 #include "locators.h"
     50 
     51 #ifdef GTIDMAC_DEBUG
     52 #define DPRINTF(x)	if (gtidmac_debug) printf x
     53 int gtidmac_debug = 0;
     54 #else
     55 #define DPRINTF(x)
     56 #endif
     57 
     58 #define GTIDMAC_NDESC		64
     59 #define GTIDMAC_MAXCHAN		8
     60 #define MVXORE_NDESC		128
     61 #define MVXORE_MAXCHAN		2
     62 
     63 #define GTIDMAC_NSEGS		((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
     64 #define MVXORE_NSEGS		((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
     65 
     66 
     67 struct gtidmac_softc;
     68 
     69 struct gtidmac_function {
     70 	int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
     71 	void (*chan_free)(void *, int);
     72 	int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
     73 			 bus_size_t);
     74 	void (*dma_start)(void *, int,
     75 			  void (*dma_done_cb)(void *, int, bus_dmamap_t *,
     76 						      bus_dmamap_t *, int));
     77 	uint32_t (*dma_finish)(void *, int, int);
     78 };
     79 
     80 struct gtidmac_dma_desc {
     81 	int dd_index;
     82 	union {
     83 		struct gtidmac_desc *idmac_vaddr;
     84 		struct mvxore_desc *xore_vaddr;
     85 	} dd_vaddr;
     86 #define dd_idmac_vaddr	dd_vaddr.idmac_vaddr
     87 #define dd_xore_vaddr	dd_vaddr.xore_vaddr
     88 	paddr_t dd_paddr;
     89 	SLIST_ENTRY(gtidmac_dma_desc) dd_next;
     90 };
     91 
     92 struct gtidmac_softc {
     93 	device_t sc_dev;
     94 
     95 	bus_space_tag_t sc_iot;
     96 	bus_space_handle_t sc_ioh;
     97 
     98 	bus_dma_tag_t sc_dmat;
     99 	struct gtidmac_dma_desc *sc_dd_buffer;
    100 	bus_dma_segment_t sc_pattern_segment;
    101 	struct {
    102 		u_char pbuf[16];	/* 16byte/pattern */
    103 	} *sc_pbuf;			/*   x256 pattern */
    104 
    105 	int sc_gtidmac_nchan;
    106 	struct gtidmac_desc *sc_dbuf;
    107 	bus_dmamap_t sc_dmap;
    108 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
    109 	struct {
    110 		bus_dmamap_t chan_in;		/* In dmamap */
    111 		bus_dmamap_t chan_out;		/* Out dmamap */
    112 		uint64_t chan_totalcnt;		/* total transfered byte */
    113 		int chan_ddidx;
    114 		void *chan_running;		/* opaque object data */
    115 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
    116 				      bus_dmamap_t *, int);
    117 	} sc_cdesc[GTIDMAC_MAXCHAN];
    118 	struct gtidmac_intr_arg {
    119 		struct gtidmac_softc *ia_sc;
    120 		uint32_t ia_cause;
    121 		uint32_t ia_mask;
    122 		uint32_t ia_eaddr;
    123 		uint32_t ia_eselect;
    124 	} sc_intrarg[GTIDMAC_NINTRRUPT];
    125 
    126 	int sc_mvxore_nchan;
    127 	struct mvxore_desc *sc_dbuf_xore;
    128 	bus_dmamap_t sc_dmap_xore;
    129 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
    130 	struct {
    131 		bus_dmamap_t chan_in[MVXORE_NSRC];	/* In dmamap */
    132 		bus_dmamap_t chan_out;			/* Out dmamap */
    133 		uint64_t chan_totalcnt;			/* total transfered */
    134 		int chan_ddidx;
    135 		void *chan_running;			/* opaque object data */
    136 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
    137 				      bus_dmamap_t *, int);
    138 	} sc_cdesc_xore[MVXORE_MAXCHAN];
    139 
    140 	struct dmover_backend sc_dmb;
    141 	struct dmover_backend sc_dmb_xore;
    142 	int sc_dmb_busy;
    143 };
    144 struct gtidmac_softc *gtidmac_softc = NULL;
    145 
    146 static int gtidmac_match(device_t, struct cfdata *, void *);
    147 static void gtidmac_attach(device_t, device_t, void *);
    148 
    149 static int gtidmac_intr(void *);
    150 static int mvxore_intr(void *);
    151 
    152 static void gtidmac_process(struct dmover_backend *);
    153 static void gtidmac_dmover_run(struct dmover_backend *);
    154 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
    155 				int);
    156 __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
    157 				dmover_buffer_type, dmover_buffer *, int);
    158 __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
    159 
    160 static uint32_t gtidmac_finish(void *, int, int);
    161 static uint32_t mvxore_finish(void *, int, int);
    162 
    163 static void gtidmac_wininit(struct gtidmac_softc *);
    164 static void mvxore_wininit(struct gtidmac_softc *);
    165 
    166 #ifdef GTIDMAC_DEBUG
    167 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
    168 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
    169 				   struct gtidmac_dma_desc *, uint32_t, int);
    170 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
    171 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
    172 				  struct gtidmac_dma_desc *, uint32_t, int);
    173 #endif
    174 
    175 
    176 static struct gtidmac_function gtidmac_functions = {
    177 	.chan_alloc = gtidmac_chan_alloc,
    178 	.chan_free = gtidmac_chan_free,
    179 	.dma_setup = gtidmac_setup,
    180 	.dma_start = gtidmac_start,
    181 	.dma_finish = gtidmac_finish,
    182 };
    183 
    184 static struct gtidmac_function mvxore_functions = {
    185 	.chan_alloc = mvxore_chan_alloc,
    186 	.chan_free = mvxore_chan_free,
    187 	.dma_setup = mvxore_setup,
    188 	.dma_start = mvxore_start,
    189 	.dma_finish = mvxore_finish,
    190 };
    191 
    192 static const struct dmover_algdesc gtidmac_algdescs[] = {
    193 	{
    194 		.dad_name = DMOVER_FUNC_ZERO,
    195 		.dad_data = &gtidmac_functions,
    196 		.dad_ninputs = 0
    197 	},
    198 	{
    199 		.dad_name = DMOVER_FUNC_FILL8,
    200 		.dad_data = &gtidmac_functions,
    201 		.dad_ninputs = 0
    202 	},
    203 	{
    204 		.dad_name = DMOVER_FUNC_COPY,
    205 		.dad_data = &gtidmac_functions,
    206 		.dad_ninputs = 1
    207 	},
    208 };
    209 
    210 static const struct dmover_algdesc mvxore_algdescs[] = {
    211 #if 0
    212 	/*
    213 	 * As for these operations, there are a lot of restrictions.  It is
    214 	 * necessary to use IDMAC.
    215 	 */
    216 	{
    217 		.dad_name = DMOVER_FUNC_ZERO,
    218 		.dad_data = &mvxore_functions,
    219 		.dad_ninputs = 0
    220 	},
    221 	{
    222 		.dad_name = DMOVER_FUNC_FILL8,
    223 		.dad_data = &mvxore_functions,
    224 		.dad_ninputs = 0
    225 	},
    226 #endif
    227 	{
    228 		.dad_name = DMOVER_FUNC_COPY,
    229 		.dad_data = &mvxore_functions,
    230 		.dad_ninputs = 1
    231 	},
    232 	{
    233 		.dad_name = DMOVER_FUNC_ISCSI_CRC32C,
    234 		.dad_data = &mvxore_functions,
    235 		.dad_ninputs = 1
    236 	},
    237 	{
    238 		.dad_name = DMOVER_FUNC_XOR2,
    239 		.dad_data = &mvxore_functions,
    240 		.dad_ninputs = 2
    241 	},
    242 	{
    243 		.dad_name = DMOVER_FUNC_XOR3,
    244 		.dad_data = &mvxore_functions,
    245 		.dad_ninputs = 3
    246 	},
    247 	{
    248 		.dad_name = DMOVER_FUNC_XOR4,
    249 		.dad_data = &mvxore_functions,
    250 		.dad_ninputs = 4
    251 	},
    252 	{
    253 		.dad_name = DMOVER_FUNC_XOR5,
    254 		.dad_data = &mvxore_functions,
    255 		.dad_ninputs = 5
    256 	},
    257 	{
    258 		.dad_name = DMOVER_FUNC_XOR6,
    259 		.dad_data = &mvxore_functions,
    260 		.dad_ninputs = 6
    261 	},
    262 	{
    263 		.dad_name = DMOVER_FUNC_XOR7,
    264 		.dad_data = &mvxore_functions,
    265 		.dad_ninputs = 7
    266 	},
    267 	{
    268 		.dad_name = DMOVER_FUNC_XOR8,
    269 		.dad_data = &mvxore_functions,
    270 		.dad_ninputs = 8
    271 	},
    272 };
    273 
    274 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
    275     gtidmac_match, gtidmac_attach, NULL, NULL);
    276 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
    277     gtidmac_match, gtidmac_attach, NULL, NULL);
    278 
    279 
    280 /* ARGSUSED */
    281 static int
    282 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
    283 {
    284 	struct marvell_attach_args *mva = aux;
    285 
    286 	if (strcmp(mva->mva_name, match->cf_name) != 0)
    287 		return 0;
    288 
    289 	if (mva->mva_model == MARVELL_ORION_1_88F6082)
    290 		return 0;
    291 
    292 	if (mva->mva_offset == MVA_OFFSET_DEFAULT ||
    293 	    mva->mva_irq == MVA_IRQ_DEFAULT)
    294 		return 0;
    295 
    296 	mva->mva_size = GTIDMAC_SIZE;
    297 	return 1;
    298 }
    299 
    300 /* ARGSUSED */
    301 static void
    302 gtidmac_attach(device_t parent, device_t self, void *aux)
    303 {
    304 	struct gtidmac_softc *sc = device_private(self);
    305 	struct marvell_attach_args *mva = aux;
    306 	bus_dma_segment_t segs, segs_xore;
    307 	struct gtidmac_dma_desc *dd;
    308 	prop_dictionary_t dict = device_properties(self);
    309 	uint32_t mask, dmb_speed, xore_irq;
    310 	int idmac_nchan, xore_nchan, nsegs, nsegs_xore, i, j, k, n;
    311 
    312 	xore_irq = 0;
    313 	idmac_nchan = 8;
    314 	xore_nchan = 0;
    315 	switch (mva->mva_model) {
    316 	case MARVELL_DISCOVERY:
    317 	case MARVELL_DISCOVERY_II:
    318 	case MARVELL_DISCOVERY_III:
    319 		break;
    320 
    321 	case MARVELL_ORION_1_88F1181:
    322 	case MARVELL_ORION_1_88F5082:
    323 	case MARVELL_ORION_1_88F5180N:
    324 	case MARVELL_ORION_1_88F5181:
    325 	case MARVELL_ORION_1_88W8660:
    326 	case MARVELL_ORION_2_88F1281:
    327 	case MARVELL_ORION_2_88F5281:
    328 		idmac_nchan = 4;
    329 		break;
    330 
    331 #if 0
    332 	case MARVELL_DISCOVERY_LT:
    333 	case MARVELL_DISCOVERY_V:
    334 	case MARVELL_DISCOVERY_VI:	????
    335 #endif
    336 	case MARVELL_ORION_1_88F5182:
    337 		idmac_nchan = 4;
    338 		xore_nchan = 2;
    339 		break;
    340 	}
    341 	if (xore_nchan != 0)
    342 		if (!prop_dictionary_get_uint32(dict, "xore-irq-begin",
    343 		    &xore_irq)) {
    344 			aprint_error(": no xore-irq-begin property\n");
    345 			return;
    346 		}
    347 
    348 	aprint_naive("\n");
    349 	aprint_normal(": Marvell IDMA Controller%s\n",
    350 	    xore_nchan ? "/XOR Engine" : "");
    351 
    352 	sc->sc_dev = self;
    353 	sc->sc_iot = mva->mva_iot;
    354 
    355 	/* Map I/O registers */
    356 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
    357 	    mva->mva_size, &sc->sc_ioh)) {
    358 		aprint_error_dev(self, "can't map registers\n");
    359 		return;
    360 	}
    361 
    362 	/*
    363 	 * Initialise DMA descriptors and associated metadata
    364 	 */
    365 	sc->sc_dmat = mva->mva_dmat;
    366 	n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
    367 	sc->sc_dd_buffer =
    368 	    kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
    369 	if (sc->sc_dd_buffer == NULL) {
    370 		aprint_error_dev(self, "can't allocate memory\n");
    371 		goto fail1;
    372 	}
    373 	/* pattern buffer */
    374 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
    375 	    &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
    376 		aprint_error_dev(self,
    377 		    "bus_dmamem_alloc failed: pattern buffer\n");
    378 		goto fail2;
    379 	}
    380 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
    381 	    (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
    382 		aprint_error_dev(self,
    383 		    "bus_dmamem_map failed: pattern buffer\n");
    384 		goto fail3;
    385 	}
    386 	for (i = 0; i < 0x100; i++)
    387 		for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
    388 			sc->sc_pbuf[i].pbuf[j] = i;
    389 
    390 	/* IDMAC DMA descriptor buffer */
    391 	sc->sc_gtidmac_nchan = idmac_nchan;
    392 	if (bus_dmamem_alloc(sc->sc_dmat,
    393 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan,
    394 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
    395 		aprint_error_dev(self,
    396 		    "bus_dmamem_alloc failed: descriptor buffer\n");
    397 		goto fail4;
    398 	}
    399 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
    400 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan,
    401 	    (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
    402 		aprint_error_dev(self,
    403 		    "bus_dmamem_map failed: descriptor buffer\n");
    404 		goto fail5;
    405 	}
    406 	if (bus_dmamap_create(sc->sc_dmat,
    407 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, 1,
    408 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, 0,
    409 	    BUS_DMA_NOWAIT, &sc->sc_dmap)) {
    410 		aprint_error_dev(self,
    411 		    "bus_dmamap_create failed: descriptor buffer\n");
    412 		goto fail6;
    413 	}
    414 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
    415 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, NULL,
    416 	    BUS_DMA_NOWAIT)) {
    417 		aprint_error_dev(self,
    418 		    "bus_dmamap_load failed: descriptor buffer\n");
    419 		goto fail7;
    420 	}
    421 	SLIST_INIT(&sc->sc_dlist);
    422 	for (i = 0; i < GTIDMAC_NDESC * idmac_nchan; i++) {
    423 		dd = &sc->sc_dd_buffer[i];
    424 		dd->dd_index = i;
    425 		dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
    426 		dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
    427 		    (sizeof(struct gtidmac_desc) * i);
    428 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
    429 	}
    430 
    431 	/* Initialize IDMAC DMA channels */
    432 	mask = 0;
    433 	for (i = 0; i < idmac_nchan; i++) {
    434 		if (i > 0 &&
    435 		    ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
    436 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    437 			    GTIDMAC_IMR(i - 1), mask);
    438 			mask = 0;
    439 		}
    440 
    441 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
    442 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
    443 		    &sc->sc_cdesc[i].chan_in)) {
    444 			aprint_error_dev(self,
    445 			    "bus_dmamap_create failed: chan%d in\n", i);
    446 			goto fail8;
    447 		}
    448 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
    449 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
    450 		    &sc->sc_cdesc[i].chan_out)) {
    451 			aprint_error_dev(self,
    452 			    "bus_dmamap_create failed: chan%d out\n", i);
    453 			bus_dmamap_destroy(sc->sc_dmat,
    454 			    sc->sc_cdesc[i].chan_in);
    455 			goto fail8;
    456 		}
    457 		sc->sc_cdesc[i].chan_totalcnt = 0;
    458 		sc->sc_cdesc[i].chan_running = NULL;
    459 
    460 		/* Ignore bits overflow.  The mask is 32bit. */
    461 		mask |= GTIDMAC_I(i,
    462 		    GTIDMAC_I_COMP	|
    463 		    GTIDMAC_I_ADDRMISS	|
    464 		    GTIDMAC_I_ACCPROT	|
    465 		    GTIDMAC_I_WRPROT	|
    466 		    GTIDMAC_I_OWN);
    467 	}
    468 	if (i > 0)
    469 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_IMR(i - 1),
    470 		    mask);
    471 
    472 	/* Setup interrupt */
    473 	for (j = 0; j < GTIDMAC_NINTRRUPT; j++) {
    474 		int c = j * idmac_nchan / __arraycount(sc->sc_intrarg);
    475 
    476 		sc->sc_intrarg[j].ia_sc = sc;
    477 		sc->sc_intrarg[j].ia_cause = GTIDMAC_ICR(c);
    478 		sc->sc_intrarg[j].ia_eaddr = GTIDMAC_EAR(c);
    479 		sc->sc_intrarg[j].ia_eselect = GTIDMAC_ESR(c);
    480 		marvell_intr_establish(mva->mva_irq + j, IPL_BIO,
    481 		    gtidmac_intr, &sc->sc_intrarg[j]);
    482 	}
    483 
    484 	if (mva->mva_model != MARVELL_DISCOVERY)
    485 		gtidmac_wininit(sc);
    486 
    487 	/* Register us with dmover. */
    488 	sc->sc_dmb.dmb_name = device_xname(self);
    489 	if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
    490 		aprint_error_dev(self, "no dmb_speed property\n");
    491 		dmb_speed = 10;		/* More than fast swdmover perhaps. */
    492 	}
    493 	sc->sc_dmb.dmb_speed = dmb_speed;
    494 	sc->sc_dmb.dmb_cookie = sc;
    495 	sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
    496 	sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
    497 	sc->sc_dmb.dmb_process = gtidmac_process;
    498 	dmover_backend_register(&sc->sc_dmb);
    499 	sc->sc_dmb_busy = 0;
    500 
    501 	if (xore_nchan) {
    502 		/* XORE DMA descriptor buffer */
    503 		sc->sc_mvxore_nchan = xore_nchan;
    504 		if (bus_dmamem_alloc(sc->sc_dmat,
    505 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
    506 		    PAGE_SIZE, 0, &segs_xore, 1, &nsegs_xore, BUS_DMA_NOWAIT)) {
    507 			aprint_error_dev(self, "bus_dmamem_alloc failed:"
    508 			    " xore descriptor buffer\n");
    509 			goto fail8;
    510 		}
    511 		if (bus_dmamem_map(sc->sc_dmat, &segs_xore, 1,
    512 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
    513 		    (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
    514 			aprint_error_dev(self,
    515 			    "bus_dmamem_map failed: xore descriptor buffer\n");
    516 			goto fail9;
    517 		}
    518 		if (bus_dmamap_create(sc->sc_dmat,
    519 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan, 1,
    520 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan, 0,
    521 		    BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
    522 			aprint_error_dev(self, "bus_dmamap_create failed:"
    523 			    " xore descriptor buffer\n");
    524 			goto fail10;
    525 		}
    526 		if (bus_dmamap_load(
    527 		    sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
    528 	    	    sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
    529 		    NULL, BUS_DMA_NOWAIT)) {
    530 			aprint_error_dev(self,
    531 			    "bus_dmamap_load failed: xore descriptor buffer\n");
    532 			goto fail11;
    533 		}
    534 		SLIST_INIT(&sc->sc_dlist_xore);
    535 		for (j = 0; j < MVXORE_NDESC * xore_nchan; j++) {
    536 			dd = &sc->sc_dd_buffer[j + GTIDMAC_NDESC * idmac_nchan];
    537 			dd->dd_index = j;
    538 			dd->dd_xore_vaddr = &sc->sc_dbuf_xore[j];
    539 			dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
    540 			    (sizeof(struct mvxore_desc) * j);
    541 			SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
    542 		}
    543 
    544 		/* Initialize XORE DMA channels */
    545 		mask = 0;
    546 		for (j = 0; j < xore_nchan; j++) {
    547 			for (k = 0; k < MVXORE_NSRC; k++) {
    548 				if (bus_dmamap_create(sc->sc_dmat,
    549 				    MVXORE_MAXXFER, MVXORE_NSEGS,
    550 				    MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
    551 				    &sc->sc_cdesc_xore[j].chan_in[k])) {
    552 					aprint_error_dev(self,
    553 					    "bus_dmamap_create failed:"
    554 					    " xore chan%d in[%d]\n", j, k);
    555 					goto fail12;
    556 				}
    557 			}
    558 			if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
    559 			    MVXORE_NSEGS, MVXORE_MAXXFER, 0,
    560 			    BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[j].chan_out)) {
    561 				aprint_error_dev(self,
    562 				    "bus_dmamap_create failed: chan%d out\n",
    563 				    j);
    564 				goto fail13;
    565 			}
    566 			sc->sc_cdesc_xore[j].chan_totalcnt = 0;
    567 			sc->sc_cdesc_xore[j].chan_running = NULL;
    568 
    569 			mask |= MVXORE_I(j,
    570 			    MVXORE_I_EOC	|
    571 			    MVXORE_I_ADDRDECODE	|
    572 			    MVXORE_I_ACCPROT	|
    573 			    MVXORE_I_WRPROT	|
    574 			    MVXORE_I_OWN	|
    575 			    MVXORE_I_INTPARITY	|
    576 			    MVXORE_I_XBAR);
    577 		}
    578 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIMR, mask);
    579 
    580 		marvell_intr_establish(xore_irq + 0, IPL_BIO, mvxore_intr, sc);
    581 		marvell_intr_establish(xore_irq + 1, IPL_BIO, mvxore_intr, sc);
    582 
    583 		mvxore_wininit(sc);
    584 
    585 		/* Register us with dmover. */
    586 		sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
    587 		sc->sc_dmb_xore.dmb_speed = dmb_speed;
    588 		sc->sc_dmb_xore.dmb_cookie = sc;
    589 		sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
    590 		sc->sc_dmb_xore.dmb_nalgdescs =
    591 		    __arraycount(mvxore_algdescs);
    592 		sc->sc_dmb_xore.dmb_process = gtidmac_process;
    593 		dmover_backend_register(&sc->sc_dmb_xore);
    594 	}
    595 
    596 	gtidmac_softc = sc;
    597 
    598 	return;
    599 
    600 	for (; j-- > 0;) {
    601 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[j].chan_out);
    602 
    603 fail13:
    604 		k = MVXORE_NSRC;
    605 fail12:
    606 		for (; k-- > 0;)
    607 			bus_dmamap_destroy(sc->sc_dmat,
    608 			    sc->sc_cdesc_xore[j].chan_in[k]);
    609 	}
    610 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
    611 fail11:
    612 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
    613 fail10:
    614 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
    615 	    sizeof(struct mvxore_desc) * MVXORE_NDESC);
    616 fail9:
    617 	bus_dmamem_free(sc->sc_dmat, &segs_xore, 1);
    618 fail8:
    619 	for (; i-- > 0;) {
    620 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
    621 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
    622 	}
    623 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
    624 fail7:
    625 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
    626 fail6:
    627 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
    628 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
    629 fail5:
    630 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
    631 fail4:
    632 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
    633 fail3:
    634 	bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
    635 fail2:
    636 	kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
    637 fail1:
    638 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
    639 	return;
    640 }
    641 
    642 
    643 static int
    644 gtidmac_intr(void *arg)
    645 {
    646 	struct gtidmac_intr_arg *ia = arg;
    647 	struct gtidmac_softc *sc = ia->ia_sc;
    648 	uint32_t cause;
    649 	int handled = 0, chan, error;
    650 
    651 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
    652 	DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
    653 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
    654 
    655 	chan = 0;
    656 	while (cause) {
    657 		error = 0;
    658 		if (cause & GTIDMAC_I_ADDRMISS) {
    659 			aprint_error_dev(sc->sc_dev, "Address Miss");
    660 			error = EINVAL;
    661 		}
    662 		if (cause & GTIDMAC_I_ACCPROT) {
    663 			aprint_error_dev(sc->sc_dev,
    664 			    "Access Protect Violation");
    665 			error = EACCES;
    666 		}
    667 		if (cause & GTIDMAC_I_WRPROT) {
    668 			aprint_error_dev(sc->sc_dev, "Write Protect");
    669 			error = EACCES;
    670 		}
    671 		if (cause & GTIDMAC_I_OWN) {
    672 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
    673 			error = EINVAL;
    674 		}
    675 
    676 #define GTIDMAC_I_ERROR		  \
    677 	   (GTIDMAC_I_ADDRMISS	| \
    678 	    GTIDMAC_I_ACCPROT	| \
    679 	    GTIDMAC_I_WRPROT	| \
    680 	    GTIDMAC_I_OWN)
    681 		if (cause & GTIDMAC_I_ERROR) {
    682 			uint32_t sel;
    683 			int select;
    684 
    685 			sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    686 			    ia->ia_eselect) & GTIDMAC_ESR_SEL;
    687 			select = sel - chan * GTIDMAC_I_BITS;
    688 			if (select >= 0 && select < GTIDMAC_I_BITS) {
    689 				uint32_t ear;
    690 
    691 				ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    692 				    ia->ia_eaddr);
    693 				aprint_error(": Error Address 0x%x\n", ear);
    694 			} else
    695 				aprint_error(": lost Error Address\n");
    696 		}
    697 
    698 		if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
    699 			sc->sc_cdesc[chan].chan_dma_done(
    700 			    sc->sc_cdesc[chan].chan_running, chan,
    701 			    &sc->sc_cdesc[chan].chan_in,
    702 			    &sc->sc_cdesc[chan].chan_out, error);
    703 			handled++;
    704 		}
    705 
    706 		cause >>= GTIDMAC_I_BITS;
    707 	}
    708 	DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
    709 
    710 	return handled;
    711 }
    712 
    713 static int
    714 mvxore_intr(void *arg)
    715 {
    716 	struct gtidmac_softc *sc = arg;
    717 	uint32_t cause;
    718 	int handled = 0, chan, error;
    719 
    720 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR);
    721 	DPRINTF(("XORE intr: cause=0x%x\n", cause));
    722 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR, ~cause);
    723 
    724 	chan = 0;
    725 	while (cause) {
    726 		error = 0;
    727 		if (cause & MVXORE_I_ADDRDECODE) {
    728 			aprint_error_dev(sc->sc_dev, "Failed address decoding");
    729 			error = EINVAL;
    730 		}
    731 		if (cause & MVXORE_I_ACCPROT) {
    732 			aprint_error_dev(sc->sc_dev,
    733 			    "Access Protect Violation");
    734 			error = EACCES;
    735 		}
    736 		if (cause & MVXORE_I_WRPROT) {
    737 			aprint_error_dev(sc->sc_dev, "Write Protect");
    738 			error = EACCES;
    739 		}
    740 		if (cause & MVXORE_I_OWN) {
    741 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
    742 			error = EINVAL;
    743 		}
    744 		if (cause & MVXORE_I_INTPARITY) {
    745 			aprint_error_dev(sc->sc_dev, "Parity Error");
    746 			error = EIO;
    747 		}
    748 		if (cause & MVXORE_I_XBAR) {
    749 			aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
    750 			error = EINVAL;
    751 		}
    752 
    753 #define MVXORE_I_ERROR		  \
    754 	   (MVXORE_I_ADDRDECODE	| \
    755 	    MVXORE_I_ACCPROT	| \
    756 	    MVXORE_I_WRPROT	| \
    757 	    MVXORE_I_OWN	| \
    758 	    MVXORE_I_INTPARITY	| \
    759 	    MVXORE_I_XBAR)
    760 		if (cause & MVXORE_I_ERROR) {
    761 			uint32_t type;
    762 			int event;
    763 
    764 			type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    765 			    MVXORE_XEECR) & MVXORE_XEECR_ERRORTYPE_MASK;
    766 			event = type - chan * MVXORE_I_BITS;
    767 			if (event >= 0 && event < MVXORE_I_BITS) {
    768 				uint32_t xeear;
    769 
    770 				xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    771 				    MVXORE_XEEAR);
    772 				aprint_error(": Error Address 0x%x\n", xeear);
    773 			} else
    774 				aprint_error(": lost Error Address\n");
    775 		}
    776 
    777 		if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
    778 			sc->sc_cdesc_xore[chan].chan_dma_done(
    779 			    sc->sc_cdesc_xore[chan].chan_running, chan,
    780 			    sc->sc_cdesc_xore[chan].chan_in,
    781 			    &sc->sc_cdesc_xore[chan].chan_out, error);
    782 			handled++;
    783 		}
    784 
    785 		cause >>= MVXORE_I_BITS;
    786 	}
    787 	DPRINTF(("XORE intr: %shandled\n", handled ? "" : "not "));
    788 
    789 	return handled;
    790 }
    791 
    792 
    793 /*
    794  * dmover(9) backend function.
    795  */
    796 static void
    797 gtidmac_process(struct dmover_backend *dmb)
    798 {
    799 	struct gtidmac_softc *sc = dmb->dmb_cookie;
    800 	int s;
    801 
    802 	/* If the backend is currently idle, go process the queue. */
    803 	s = splbio();
    804 	if (!sc->sc_dmb_busy)
    805 		gtidmac_dmover_run(dmb);
    806 	splx(s);
    807 }
    808 
    809 static void
    810 gtidmac_dmover_run(struct dmover_backend *dmb)
    811 {
    812 	struct gtidmac_softc *sc = dmb->dmb_cookie;
    813 	struct dmover_request *dreq;
    814 	const struct dmover_algdesc *algdesc;
    815 	struct gtidmac_function *df;
    816 	bus_dmamap_t *dmamap_in, *dmamap_out;
    817 	int chan, ninputs, error, i;
    818 
    819 	sc->sc_dmb_busy = 1;
    820 
    821 	for (;;) {
    822 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
    823 		if (dreq == NULL)
    824 			break;
    825 		algdesc = dreq->dreq_assignment->das_algdesc;
    826 		df = algdesc->dad_data;
    827 		chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
    828 		if (chan == -1)
    829 			return;
    830 
    831 		dmover_backend_remque(dmb, dreq);
    832 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
    833 
    834 		/* XXXUNLOCK */
    835 
    836 		error = 0;
    837 
    838 		/* Load in/out buffers of dmover to bus_dmamap. */
    839 		ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    840 		if (ninputs == 0) {
    841 			int pno = 0;
    842 
    843 			if (algdesc->dad_name == DMOVER_FUNC_FILL8)
    844 				pno = dreq->dreq_immediate[0];
    845 
    846 			i = 0;
    847 			error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
    848 			    &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
    849 			    BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
    850 			if (error == 0) {
    851 				bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
    852 				    sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
    853 
    854 				/*
    855 				 * We will call gtidmac_dmmap_unload() when
    856 				 * becoming an error.
    857 				 */
    858 				i = 1;
    859 			}
    860 		} else
    861 			for (i = 0; i < ninputs; i++) {
    862 				error = gtidmac_dmmap_load(sc,
    863 				    *(dmamap_in + i), dreq->dreq_inbuf_type,
    864 				    &dreq->dreq_inbuf[i], 0/*write*/);
    865 				if (error != 0)
    866 					break;
    867 			}
    868 		if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
    869 			if (error == 0)
    870 				error = gtidmac_dmmap_load(sc, *dmamap_out,
    871 				    dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
    872 				    1/*read*/);
    873 
    874 			if (error == 0) {
    875 				/*
    876 				 * The size of outbuf is always believed to be
    877 				 * DMA transfer size in dmover request.
    878 				 */
    879 				error = (*df->dma_setup)(sc, chan, ninputs,
    880 				    dmamap_in, dmamap_out,
    881 				    (*dmamap_out)->dm_mapsize);
    882 				if (error != 0)
    883 					gtidmac_dmmap_unload(sc, *dmamap_out,
    884 					    1);
    885 			}
    886 		} else
    887 			if (error == 0)
    888 				error = (*df->dma_setup)(sc, chan, ninputs,
    889 				    dmamap_in, dmamap_out,
    890 				    (*dmamap_in)->dm_mapsize);
    891 
    892 		/* XXXLOCK */
    893 
    894 		if (error != 0) {
    895 			for (; i-- > 0;)
    896 				gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
    897 			(*df->chan_free)(sc, chan);
    898 
    899 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
    900 			dreq->dreq_error = error;
    901 			/* XXXUNLOCK */
    902 			dmover_done(dreq);
    903 			/* XXXLOCK */
    904 			continue;
    905 		}
    906 
    907 		(*df->dma_start)(sc, chan, gtidmac_dmover_done);
    908 		break;
    909 	}
    910 
    911 	/* All done */
    912 	sc->sc_dmb_busy = 0;
    913 }
    914 
    915 static void
    916 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
    917 		    bus_dmamap_t *dmamap_out, int error)
    918 {
    919 	struct gtidmac_softc *sc;
    920 	struct dmover_request *dreq = object;
    921 	struct dmover_backend *dmb;
    922 	struct gtidmac_function *df;
    923 	uint32_t result;
    924 	int ninputs, i;
    925 
    926 	KASSERT(dreq != NULL);
    927 
    928 	dmb = dreq->dreq_assignment->das_backend;
    929 	df = dreq->dreq_assignment->das_algdesc->dad_data;
    930 	ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    931 	sc = dmb->dmb_cookie;
    932 
    933 	result = (*df->dma_finish)(sc, chan, error);
    934 	for (i = 0; i < ninputs; i++)
    935 		gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
    936 	if (dreq->dreq_assignment->das_algdesc->dad_name ==
    937 	    DMOVER_FUNC_ISCSI_CRC32C)
    938 		memcpy(dreq->dreq_immediate, &result, sizeof(result));
    939 	else
    940 		gtidmac_dmmap_unload(sc, *dmamap_out, 1);
    941 
    942 	(*df->chan_free)(sc, chan);
    943 
    944 	if (error) {
    945 		dreq->dreq_error = error;
    946 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
    947 	}
    948 
    949 	dmover_done(dreq);
    950 
    951 	/*
    952 	 * See if we can start some more dmover(9) requests.
    953 	 *
    954 	 * Note: We're already at splbio() here.
    955 	 */
    956 	if (!sc->sc_dmb_busy)
    957 		gtidmac_dmover_run(dmb);
    958 }
    959 
    960 __inline int
    961 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
    962 		   dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
    963 		   int read)
    964 {
    965 	int error, flags;
    966 
    967 	flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
    968 	    read ? BUS_DMA_READ : BUS_DMA_WRITE;
    969 
    970 	switch (dmbuf_type) {
    971 	case DMOVER_BUF_LINEAR:
    972 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    973 		    dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
    974 		    NULL, flags);
    975 		break;
    976 
    977 	case DMOVER_BUF_UIO:
    978 		if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
    979 		    (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
    980 			return (EINVAL);
    981 
    982 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    983 		    dmbuf->dmbuf_uio, flags);
    984 		break;
    985 
    986 	default:
    987 		error = EINVAL;
    988 	}
    989 
    990 	if (error == 0)
    991 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    992 		    read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    993 
    994 	return error;
    995 }
    996 
    997 __inline void
    998 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
    999 {
   1000 
   1001 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1002 	    read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
   1003 
   1004 	bus_dmamap_unload(sc->sc_dmat, dmamap);
   1005 }
   1006 
   1007 
   1008 void *
   1009 gtidmac_tag_get()
   1010 {
   1011 
   1012 	return gtidmac_softc;
   1013 }
   1014 
   1015 /*
   1016  * IDMAC functions
   1017  */
   1018 int
   1019 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
   1020 		   bus_dmamap_t **dmamap_out, void *object)
   1021 {
   1022 	struct gtidmac_softc *sc = tag;
   1023 	int chan;
   1024 
   1025 /* maybe need lock */
   1026 
   1027 	for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
   1028 		if (sc->sc_cdesc[chan].chan_running == NULL)
   1029 			break;
   1030 	if (chan >= sc->sc_gtidmac_nchan)
   1031 		return -1;
   1032 
   1033 
   1034 	sc->sc_cdesc[chan].chan_running = object;
   1035 
   1036 /* unlock */
   1037 
   1038 	*dmamap_in = &sc->sc_cdesc[chan].chan_in;
   1039 	*dmamap_out = &sc->sc_cdesc[chan].chan_out;
   1040 
   1041 	return chan;
   1042 }
   1043 
   1044 void
   1045 gtidmac_chan_free(void *tag, int chan)
   1046 {
   1047 	struct gtidmac_softc *sc = tag;
   1048 
   1049 /* maybe need lock */
   1050 
   1051 	sc->sc_cdesc[chan].chan_running = NULL;
   1052 
   1053 /* unlock */
   1054 }
   1055 
   1056 /* ARGSUSED */
   1057 int
   1058 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
   1059 	      bus_dmamap_t *dmamap_out, bus_size_t size)
   1060 {
   1061 	struct gtidmac_softc *sc = tag;
   1062 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1063 	struct gtidmac_desc *desc;
   1064 	uint32_t ccl, bcnt, ires, ores;
   1065 	int n = 0, iidx, oidx;
   1066 
   1067 	KASSERT(ninputs == 0 || ninputs == 1);
   1068 
   1069 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1070 #ifdef DIAGNOSTIC
   1071 	if (ccl & GTIDMAC_CCLR_CHANACT)
   1072 		panic("gtidmac_setup: chan%d already active", chan);
   1073 #endif
   1074 
   1075 	/* We always Chain-mode and max (16M - 1)byte/desc */
   1076 	ccl = (GTIDMAC_CCLR_DESCMODE_16M				|
   1077 #ifdef GTIDMAC_DEBUG
   1078 	    GTIDMAC_CCLR_CDEN						|
   1079 #endif
   1080 	    GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */	|
   1081 	    GTIDMAC_CCLR_INTMODE_NULL   /* Intr Mode: Next Desc NULL */	|
   1082 	    GTIDMAC_CCLR_CHAINMODE_C    /* Chain Mode: Chaind */);
   1083 	if (size != (*dmamap_in)->dm_mapsize) {
   1084 		ccl |= GTIDMAC_CCLR_SRCHOLD;
   1085 		if ((*dmamap_in)->dm_mapsize == 8)
   1086 			ccl |= GTIDMAC_CCLR_SBL_8B;
   1087 		else if ((*dmamap_in)->dm_mapsize == 16)
   1088 			ccl |= GTIDMAC_CCLR_SBL_16B;
   1089 		else if ((*dmamap_in)->dm_mapsize == 32)
   1090 			ccl |= GTIDMAC_CCLR_SBL_32B;
   1091 		else if ((*dmamap_in)->dm_mapsize == 64)
   1092 			ccl |= GTIDMAC_CCLR_SBL_64B;
   1093 		else if ((*dmamap_in)->dm_mapsize == 128)
   1094 			ccl |= GTIDMAC_CCLR_SBL_128B;
   1095 		else
   1096 			panic("gtidmac_setup: chan%d source:"
   1097 			    " unsupport hold size", chan);
   1098 	} else
   1099 		ccl |= GTIDMAC_CCLR_SBL_128B;
   1100 	if (size != (*dmamap_out)->dm_mapsize) {
   1101 		ccl |= GTIDMAC_CCLR_DESTHOLD;
   1102 		if ((*dmamap_out)->dm_mapsize == 8)
   1103 			ccl |= GTIDMAC_CCLR_DBL_8B;
   1104 		else if ((*dmamap_out)->dm_mapsize == 16)
   1105 			ccl |= GTIDMAC_CCLR_DBL_16B;
   1106 		else if ((*dmamap_out)->dm_mapsize == 32)
   1107 			ccl |= GTIDMAC_CCLR_DBL_32B;
   1108 		else if ((*dmamap_out)->dm_mapsize == 64)
   1109 			ccl |= GTIDMAC_CCLR_DBL_64B;
   1110 		else if ((*dmamap_out)->dm_mapsize == 128)
   1111 			ccl |= GTIDMAC_CCLR_DBL_128B;
   1112 		else
   1113 			panic("gtidmac_setup: chan%d destination:"
   1114 			    " unsupport hold size", chan);
   1115 	} else
   1116 		ccl |= GTIDMAC_CCLR_DBL_128B;
   1117 
   1118 	fstdd = SLIST_FIRST(&sc->sc_dlist);
   1119 	if (fstdd == NULL) {
   1120 		aprint_error_dev(sc->sc_dev, "no descriptor\n");
   1121 		return ENOMEM;
   1122 	}
   1123 	SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
   1124 	sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
   1125 
   1126 	dd = fstdd;
   1127 	ires = ores = 0;
   1128 	iidx = oidx = 0;
   1129 	while (1 /*CONSTCOND*/) {
   1130 		if (ccl & GTIDMAC_CCLR_SRCHOLD) {
   1131 			if (ccl & GTIDMAC_CCLR_DESTHOLD)
   1132 				bcnt = size;	/* src/dst hold */
   1133 			else
   1134 				bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
   1135 		} else if (ccl & GTIDMAC_CCLR_DESTHOLD)
   1136 			bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
   1137 		else
   1138 			bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
   1139 			    (*dmamap_out)->dm_segs[oidx].ds_len - ores);
   1140 
   1141 		desc = dd->dd_idmac_vaddr;
   1142 		desc->bc.mode16m.bcnt =
   1143 		    bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
   1144 		desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
   1145 		desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
   1146 
   1147 		n += bcnt;
   1148 		if (n >= size)
   1149 			break;
   1150 		if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
   1151 			ires += bcnt;
   1152 			if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
   1153 				ires = 0;
   1154 				iidx++;
   1155 				KASSERT(iidx < (*dmamap_in)->dm_nsegs);
   1156 			}
   1157 		}
   1158 		if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
   1159 			ores += bcnt;
   1160 			if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
   1161 				ores = 0;
   1162 				oidx++;
   1163 				KASSERT(oidx < (*dmamap_out)->dm_nsegs);
   1164 			}
   1165 		}
   1166 
   1167 		nxtdd = SLIST_FIRST(&sc->sc_dlist);
   1168 		if (nxtdd == NULL) {
   1169 			aprint_error_dev(sc->sc_dev, "no descriptor\n");
   1170 			return ENOMEM;
   1171 		}
   1172 		SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
   1173 
   1174 		desc->nextdp = (uint32_t)nxtdd->dd_paddr;
   1175 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1176 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1177 #ifdef GTIDMAC_DEBUG
   1178 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1179 #else
   1180 		    BUS_DMASYNC_PREWRITE);
   1181 #endif
   1182 
   1183 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
   1184 		dd = nxtdd;
   1185 	}
   1186 	desc->nextdp = (uint32_t)NULL;
   1187 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
   1188 #ifdef GTIDMAC_DEBUG
   1189 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1190 #else
   1191 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
   1192 #endif
   1193 
   1194 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
   1195 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
   1196 	    fstdd->dd_paddr);
   1197 
   1198 #if BYTE_ORDER == LITTLE_ENDIAN
   1199 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
   1200 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
   1201 #else
   1202 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
   1203 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
   1204 #endif
   1205 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
   1206 
   1207 #ifdef GTIDMAC_DEBUG
   1208 	gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
   1209 #endif
   1210 
   1211 	sc->sc_cdesc[chan].chan_totalcnt += size;
   1212 
   1213 	return 0;
   1214 }
   1215 
   1216 void
   1217 gtidmac_start(void *tag, int chan,
   1218 	      void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
   1219 				  int))
   1220 {
   1221 	struct gtidmac_softc *sc = tag;
   1222 	uint32_t ccl;
   1223 
   1224 	DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
   1225 
   1226 #ifdef GTIDMAC_DEBUG
   1227 	gtidmac_dump_idmacreg(sc, chan);
   1228 #endif
   1229 
   1230 	sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
   1231 
   1232 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1233 	/* Start and 'Fetch Next Descriptor' */
   1234 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
   1235 	    ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
   1236 }
   1237 
   1238 static uint32_t
   1239 gtidmac_finish(void *tag, int chan, int error)
   1240 {
   1241 	struct gtidmac_softc *sc = tag;
   1242 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1243 	struct gtidmac_desc *desc;
   1244 
   1245 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
   1246 
   1247 #ifdef GTIDMAC_DEBUG
   1248 	if (error || gtidmac_debug > 1) {
   1249 		uint32_t ccl;
   1250 
   1251 		gtidmac_dump_idmacreg(sc, chan);
   1252 		ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1253 		    GTIDMAC_CCLR(chan));
   1254 		gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
   1255 	}
   1256 #endif
   1257 
   1258 	dd = fstdd;
   1259 	do {
   1260 		desc = dd->dd_idmac_vaddr;
   1261 
   1262 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1263 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1264 #ifdef GTIDMAC_DEBUG
   1265 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1266 #else
   1267 		    BUS_DMASYNC_POSTWRITE);
   1268 #endif
   1269 
   1270 		nxtdd = SLIST_NEXT(dd, dd_next);
   1271 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
   1272 		dd = nxtdd;
   1273 	} while (desc->nextdp);
   1274 
   1275 	return 0;
   1276 }
   1277 
   1278 /*
   1279  * XORE functions
   1280  */
   1281 int
   1282 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
   1283 		  bus_dmamap_t **dmamap_out, void *object)
   1284 {
   1285 	struct gtidmac_softc *sc = tag;
   1286 	int chan;
   1287 
   1288 /* maybe need lock */
   1289 
   1290 	for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
   1291 		if (sc->sc_cdesc_xore[chan].chan_running == NULL)
   1292 			break;
   1293 	if (chan >= sc->sc_mvxore_nchan)
   1294 		return -1;
   1295 
   1296 
   1297 	sc->sc_cdesc_xore[chan].chan_running = object;
   1298 
   1299 /* unlock */
   1300 
   1301 	*dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
   1302 	*dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
   1303 
   1304 	return chan;
   1305 }
   1306 
   1307 void
   1308 mvxore_chan_free(void *tag, int chan)
   1309 {
   1310 	struct gtidmac_softc *sc = tag;
   1311 
   1312 /* maybe need lock */
   1313 
   1314 	sc->sc_cdesc_xore[chan].chan_running = NULL;
   1315 
   1316 /* unlock */
   1317 }
   1318 
   1319 /* ARGSUSED */
   1320 int
   1321 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
   1322 	     bus_dmamap_t *dmamap_out, bus_size_t size)
   1323 {
   1324 	struct gtidmac_softc *sc = tag;
   1325 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1326 	struct mvxore_desc *desc;
   1327 	uint32_t xexc, bcnt, cmd, lastcmd;
   1328 	int n = 0, i;
   1329 	uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
   1330 	int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
   1331 
   1332 #ifdef DIAGNOSTIC
   1333 	uint32_t xexact;
   1334 
   1335 	xexact = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
   1336 	if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
   1337 	    MVXORE_XEXACTR_XESTATUS_ACT)
   1338 		panic("mvxore_setup: chan%d already active."
   1339 		    " mvxore not support hot insertion", chan);
   1340 #endif
   1341 
   1342 	xexc =
   1343 	    (MVXORE_XEXCR_REGACCPROTECT	|
   1344 	     MVXORE_XEXCR_DBL_128B	|
   1345 	     MVXORE_XEXCR_SBL_128B);
   1346 	cmd = lastcmd = 0;
   1347 	if (ninputs > 1) {
   1348 		xexc |= MVXORE_XEXCR_OM_XOR;
   1349 		lastcmd = cmd = (1 << ninputs) - 1;
   1350 	} else if (ninputs == 1) {
   1351 		if ((*dmamap_out)->dm_nsegs == 0) {
   1352 			xexc |= MVXORE_XEXCR_OM_CRC32;
   1353 			lastcmd = MVXORE_DESC_CMD_CRCLAST;
   1354 		} else
   1355 			xexc |= MVXORE_XEXCR_OM_DMA;
   1356 	} else if (ninputs == 0) {
   1357 		if ((*dmamap_out)->dm_nsegs != 1) {
   1358 			aprint_error_dev(sc->sc_dev,
   1359 			    "XORE not supports %d DMA segments\n",
   1360 			    (*dmamap_out)->dm_nsegs);
   1361 			return EINVAL;
   1362 		}
   1363 
   1364 		if ((*dmamap_in)->dm_mapsize == 0) {
   1365 			xexc |= MVXORE_XEXCR_OM_ECC;
   1366 
   1367 			/* XXXXX: Maybe need to set Timer Mode registers? */
   1368 
   1369 #if 0
   1370 		} else if ((*dmamap_in)->dm_mapsize == 8 ||
   1371 		    (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
   1372 			uint64_t pattern;
   1373 
   1374 			/* XXXX: Get pattern data */
   1375 
   1376 			KASSERT((*dmamap_in)->dm_mapsize == 8 ||
   1377 			    (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
   1378 						~PAGE_MASK) == sc->sc_pbuf);
   1379 			pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
   1380 
   1381 			/* XXXXX: XORE has a IVR.  We should get this first. */
   1382 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
   1383 			    pattern);
   1384 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
   1385 			    pattern >> 32);
   1386 
   1387 			xexc |= MVXORE_XEXCR_OM_MEMINIT;
   1388 #endif
   1389 		} else {
   1390 			aprint_error_dev(sc->sc_dev,
   1391 			    "XORE not supports DMA mapsize %zd\n",
   1392 			    (*dmamap_in)->dm_mapsize);
   1393 			return EINVAL;
   1394 		}
   1395 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXDPR(chan),
   1396 		    (*dmamap_out)->dm_segs[0].ds_addr);
   1397 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBSR(chan),
   1398 		    (*dmamap_out)->dm_mapsize);
   1399 
   1400 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan),
   1401 		    xexc);
   1402 		sc->sc_cdesc_xore[chan].chan_totalcnt += size;
   1403 
   1404 		return 0;
   1405 	}
   1406 
   1407 	/* Make descriptor for DMA/CRC32/XOR */
   1408 
   1409 	fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
   1410 	if (fstdd == NULL) {
   1411 		aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
   1412 		return ENOMEM;
   1413 	}
   1414 	SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
   1415 	sc->sc_cdesc_xore[chan].chan_ddidx =
   1416 	    fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
   1417 
   1418 	dd = fstdd;
   1419 	while (1 /*CONSTCOND*/) {
   1420 		desc = dd->dd_xore_vaddr;
   1421 		desc->stat = MVXORE_DESC_STAT_OWN;
   1422 		desc->cmd = cmd;
   1423 		if ((*dmamap_out)->dm_nsegs != 0) {
   1424 			desc->dstaddr =
   1425 			    (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
   1426 			bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
   1427 		} else {
   1428 			desc->dstaddr = 0;
   1429 			bcnt = MVXORE_MAXXFER;	/* XXXXX */
   1430 		}
   1431 		for (i = 0; i < ninputs; i++) {
   1432 			desc->srcaddr[i] =
   1433 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
   1434 			bcnt = min(bcnt,
   1435 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
   1436 		}
   1437 		desc->bcnt = bcnt;
   1438 
   1439 		n += bcnt;
   1440 		if (n >= size)
   1441 			break;
   1442 		ores += bcnt;
   1443 		if ((*dmamap_out)->dm_nsegs != 0 &&
   1444 		    ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
   1445 			ores = 0;
   1446 			oidx++;
   1447 			KASSERT(oidx < (*dmamap_out)->dm_nsegs);
   1448 		}
   1449 		for (i = 0; i < ninputs; i++) {
   1450 			ires[i] += bcnt;
   1451 			if (ires[i] >=
   1452 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
   1453 				ires[i] = 0;
   1454 				iidx[i]++;
   1455 				KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
   1456 			}
   1457 		}
   1458 
   1459 		nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
   1460 		if (nxtdd == NULL) {
   1461 			aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
   1462 			return ENOMEM;
   1463 		}
   1464 		SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
   1465 
   1466 		desc->nextda = (uint32_t)nxtdd->dd_paddr;
   1467 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1468 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1469 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1470 
   1471 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
   1472 		dd = nxtdd;
   1473 	}
   1474 	desc->cmd = lastcmd;
   1475 	desc->nextda = (uint32_t)NULL;
   1476 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1477 	    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1478 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1479 
   1480 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
   1481 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(chan),
   1482 	    fstdd->dd_paddr);
   1483 
   1484 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan), xexc);
   1485 
   1486 #ifdef GTIDMAC_DEBUG
   1487 	gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
   1488 #endif
   1489 
   1490 	sc->sc_cdesc_xore[chan].chan_totalcnt += size;
   1491 
   1492 	return 0;
   1493 }
   1494 
   1495 void
   1496 mvxore_start(void *tag, int chan,
   1497 	     void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
   1498 				 int))
   1499 {
   1500 	struct gtidmac_softc *sc = tag;
   1501 	uint32_t xexact;
   1502 
   1503 	DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
   1504 
   1505 #ifdef GTIDMAC_DEBUG
   1506 	gtidmac_dump_xorereg(sc, chan);
   1507 #endif
   1508 
   1509 	sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
   1510 
   1511 	xexact = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
   1512 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan),
   1513 	    xexact | MVXORE_XEXACTR_XESTART);
   1514 }
   1515 
   1516 static uint32_t
   1517 mvxore_finish(void *tag, int chan, int error)
   1518 {
   1519 	struct gtidmac_softc *sc = tag;
   1520 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1521 	struct mvxore_desc *desc;
   1522 	uint32_t xexc;
   1523 
   1524 #ifdef GTIDMAC_DEBUG
   1525 	if (error || gtidmac_debug > 1)
   1526 		gtidmac_dump_xorereg(sc, chan);
   1527 #endif
   1528 
   1529 	xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan));
   1530 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
   1531 	    (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
   1532 		return 0;
   1533 
   1534 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
   1535 
   1536 #ifdef GTIDMAC_DEBUG
   1537 	if (error || gtidmac_debug > 1)
   1538 		gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
   1539 #endif
   1540 
   1541 	dd = fstdd;
   1542 	do {
   1543 		desc = dd->dd_xore_vaddr;
   1544 
   1545 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1546 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1547 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1548 
   1549 		nxtdd = SLIST_NEXT(dd, dd_next);
   1550 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
   1551 		dd = nxtdd;
   1552 	} while (desc->nextda);
   1553 
   1554 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
   1555 		return desc->result;
   1556 	return 0;
   1557 }
   1558 
   1559 static void
   1560 gtidmac_wininit(struct gtidmac_softc *sc)
   1561 {
   1562 	device_t pdev = device_parent(sc->sc_dev);
   1563 	uint64_t base;
   1564 	uint32_t size, cxap, en;
   1565 	int window, target, attr, rv, i;
   1566 	struct {
   1567 		int tag;
   1568 		int winacc;
   1569 	} targets[] = {
   1570 		{ MARVELL_TAG_SDRAM_CS0,	GTIDMAC_CXAPR_WINACC_FA },
   1571 		{ MARVELL_TAG_SDRAM_CS1,	GTIDMAC_CXAPR_WINACC_FA },
   1572 		{ MARVELL_TAG_SDRAM_CS2,	GTIDMAC_CXAPR_WINACC_FA },
   1573 		{ MARVELL_TAG_SDRAM_CS3,	GTIDMAC_CXAPR_WINACC_FA },
   1574 
   1575 		/* Also can set following targets. */
   1576 		/*   Devices       = 0x1(ORION_TARGETID_DEVICE_*) */
   1577 		/*   PCI           = 0x3(ORION_TARGETID_PCI0_*) */
   1578 		/*   PCI Express   = 0x4(ORION_TARGETID_PEX?_*) */
   1579 		/*   Tunit SRAM(?) = 0x5(???) */
   1580 
   1581 		{ MARVELL_TAG_UNDEFINED,	GTIDMAC_CXAPR_WINACC_NOAA }
   1582 	};
   1583 
   1584 	en = 0xff;
   1585 	cxap = 0;
   1586 	for (window = 0, i = 0;
   1587 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW;
   1588 	    i++) {
   1589 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
   1590 		    &target, &attr, &base, &size);
   1591 		if (rv != 0 || size == 0)
   1592 			continue;
   1593 
   1594 		if (base > 0xffffffffULL) {
   1595 			if (window >= GTIDMAC_NREMAP) {
   1596 				aprint_error_dev(sc->sc_dev,
   1597 				    "can't remap window %d\n", window);
   1598 				continue;
   1599 			}
   1600 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1601 			    GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
   1602 		}
   1603 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
   1604 		    GTIDMAC_BARX_TARGET(target)	|
   1605 		    GTIDMAC_BARX_ATTR(attr)	|
   1606 		    GTIDMAC_BARX_BASE(base));
   1607 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
   1608 		    GTIDMAC_SRX_SIZE(size));
   1609 		en &= ~GTIDMAC_BAER_EN(window);
   1610 		cxap |= GTIDMAC_CXAPR_WINACC(window, targets[i].winacc);
   1611 		window++;
   1612 	}
   1613 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
   1614 
   1615 	for (i = 0; i < GTIDMAC_NACCPROT; i++)
   1616 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
   1617 		    cxap);
   1618 }
   1619 
   1620 static void
   1621 mvxore_wininit(struct gtidmac_softc *sc)
   1622 {
   1623 	device_t pdev = device_parent(sc->sc_dev);
   1624 	uint64_t base;
   1625 	uint32_t target, attr, size, xexwc;
   1626 	int window, rv, i;
   1627 	struct {
   1628 		int tag;
   1629 		int winacc;
   1630 	} targets[] = {
   1631 		{ MARVELL_TAG_SDRAM_CS0,	MVXORE_XEXWCR_WINACC_FA },
   1632 		{ MARVELL_TAG_SDRAM_CS1,	MVXORE_XEXWCR_WINACC_FA },
   1633 		{ MARVELL_TAG_SDRAM_CS2,	MVXORE_XEXWCR_WINACC_FA },
   1634 		{ MARVELL_TAG_SDRAM_CS3,	MVXORE_XEXWCR_WINACC_FA },
   1635 
   1636 		{ MARVELL_TAG_UNDEFINED,	MVXORE_XEXWCR_WINACC_NOAA }
   1637 	};
   1638 
   1639 	xexwc = 0;
   1640 	for (window = 0, i = 0;
   1641 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW;
   1642 	    i++) {
   1643 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
   1644 		    &target, &attr, &base, &size);
   1645 		if (rv != 0 || size == 0)
   1646 			continue;
   1647 
   1648 		if (base > 0xffffffffULL) {
   1649 			if (window >= MVXORE_NREMAP) {
   1650 				aprint_error_dev(sc->sc_dev,
   1651 				    "can't remap window %d\n", window);
   1652 				continue;
   1653 			}
   1654 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1655 			    MVXORE_XEHARRX(window), (base >> 32) & 0xffffffff);
   1656 		}
   1657 
   1658 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEBARX(window),
   1659 		    MVXORE_XEBARX_TARGET(target) |
   1660 		    MVXORE_XEBARX_ATTR(attr) |
   1661 		    MVXORE_XEBARX_BASE(base));
   1662 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1663 		    MVXORE_XESMRX(window), MVXORE_XESMRX_SIZE(size));
   1664 		xexwc |= (MVXORE_XEXWCR_WINEN(window) |
   1665 		    MVXORE_XEXWCR_WINACC(window, targets[i].winacc));
   1666 		window++;
   1667 	}
   1668 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(0), xexwc);
   1669 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(1), xexwc);
   1670 
   1671 	/* XXXXX: reset... */
   1672 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(0), 0);
   1673 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(1), 0);
   1674 }
   1675 
   1676 
   1677 #ifdef GTIDMAC_DEBUG
   1678 static void
   1679 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
   1680 {
   1681 	uint32_t val;
   1682 	char buf[256];
   1683 
   1684 	printf("IDMAC Registers\n");
   1685 
   1686 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
   1687 	snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
   1688 	printf("  Byte Count                 : %s\n", buf);
   1689 	printf("    ByteCnt                  :   0x%06x\n",
   1690 	    val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
   1691 	printf("  Source Address             : 0x%08x\n",
   1692 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
   1693 	printf("  Destination Address        : 0x%08x\n",
   1694 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
   1695 	printf("  Next Descriptor Pointer    : 0x%08x\n",
   1696 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
   1697 	printf("  Current Descriptor Pointer : 0x%08x\n",
   1698 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
   1699 
   1700 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1701 	snprintb(buf, sizeof(buf),
   1702 	    "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
   1703 	    "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
   1704 	    val);
   1705 	printf("  Channel Control (Low)      : %s\n", buf);
   1706 	printf("    SrcBurstLimit            : %s Bytes\n",
   1707 	  (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
   1708 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
   1709 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
   1710 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
   1711 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
   1712 	    "unknwon");
   1713 	printf("    DstBurstLimit            : %s Bytes\n",
   1714 	  (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
   1715 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
   1716 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
   1717 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
   1718 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
   1719 	    "unknwon");
   1720 	printf("    ChainMode                : %sChained\n",
   1721 	    val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
   1722 	printf("    TransferMode             : %s\n",
   1723 	    val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
   1724 	printf("    DescMode                 : %s\n",
   1725 	    val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
   1726 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
   1727 	snprintb(buf, sizeof(buf),
   1728 	    "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
   1729 	printf("  Channel Control (High)     : %s\n", buf);
   1730 }
   1731 
   1732 static void
   1733 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
   1734 		       uint32_t mode, int post)
   1735 {
   1736 	struct gtidmac_desc *desc;
   1737 	int i;
   1738 	char buf[256];
   1739 
   1740 	printf("IDMAC Descriptor\n");
   1741 
   1742 	i = 0;
   1743 	while (1 /*CONSTCOND*/) {
   1744 		if (post)
   1745 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1746 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1747 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1748 
   1749 		desc = dd->dd_idmac_vaddr;
   1750 
   1751 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
   1752 		if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
   1753 			snprintb(buf, sizeof(buf),
   1754 			    "\177\020b\037Own\0b\036BCLeft\0",
   1755 			    desc->bc.mode16m.bcnt);
   1756 			printf("  Byte Count              : %s\n", buf);
   1757 			printf("    ByteCount             :   0x%06x\n",
   1758 			    desc->bc.mode16m.bcnt &
   1759 			    GTIDMAC_CIDMABCR_BYTECNT_MASK);
   1760 		} else {
   1761 			printf("  Byte Count              :     0x%04x\n",
   1762 			    desc->bc.mode64k.bcnt);
   1763 			printf("  Remind Byte Count       :     0x%04x\n",
   1764 			    desc->bc.mode64k.rbc);
   1765 		}
   1766 		printf("  Source Address          : 0x%08x\n", desc->srcaddr);
   1767 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
   1768 		printf("  Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
   1769 
   1770 		if (desc->nextdp == (uint32_t)NULL)
   1771 			break;
   1772 
   1773 		if (!post)
   1774 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1775 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1776 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1777 
   1778 		i++;
   1779 		dd = SLIST_NEXT(dd, dd_next);
   1780 	}
   1781 	if (!post)
   1782 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1783 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1784 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1785 }
   1786 
   1787 static void
   1788 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
   1789 {
   1790 	uint32_t val, opmode;
   1791 	char buf[64];
   1792 
   1793 	printf("XORE Registers\n");
   1794 
   1795 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan));
   1796 	snprintb(buf, sizeof(buf),
   1797 	    "\177\020"
   1798 	    "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
   1799 	    val);
   1800 	printf(" Configuration    : 0x%s\n", buf);
   1801 	opmode = val & MVXORE_XEXCR_OM_MASK;
   1802 	printf("    OperationMode : %s operation\n",
   1803 	  opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
   1804 	  opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
   1805 	  opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
   1806 	  opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
   1807 	  opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
   1808 	  "unknown");
   1809 	printf("    SrcBurstLimit : %s Bytes\n",
   1810 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
   1811 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
   1812 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
   1813 	    "unknwon");
   1814 	printf("    DstBurstLimit : %s Bytes\n",
   1815 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
   1816 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
   1817 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
   1818 	    "unknwon");
   1819 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
   1820 	printf("  Activation      : 0x%08x\n", val);
   1821 	val &= MVXORE_XEXACTR_XESTATUS_MASK;
   1822 	printf("    XEstatus      : %s\n",
   1823 	    val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
   1824 	    val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
   1825 	    val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
   1826 
   1827 	if (opmode == MVXORE_XEXCR_OM_XOR ||
   1828 	    opmode == MVXORE_XEXCR_OM_CRC32 ||
   1829 	    opmode == MVXORE_XEXCR_OM_DMA) {
   1830 		printf("  NextDescPtr     : 0x%08x\n",
   1831 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1832 		    MVXORE_XEXNDPR(chan)));
   1833 		printf("  CurrentDescPtr  : 0x%08x\n",
   1834 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1835 		    MVXORE_XEXCDPR(chan)));
   1836 	}
   1837 	printf("  ByteCnt         : 0x%08x\n",
   1838 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
   1839 
   1840 	if (opmode == MVXORE_XEXCR_OM_ECC ||
   1841 	    opmode == MVXORE_XEXCR_OM_MEMINIT) {
   1842 		printf("  DstPtr          : 0x%08x\n",
   1843 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1844 		    MVXORE_XEXDPR(chan)));
   1845 		printf("  BlockSize       : 0x%08x\n",
   1846 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1847 		    MVXORE_XEXBSR(chan)));
   1848 
   1849 		if (opmode == MVXORE_XEXCR_OM_ECC) {
   1850 			val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1851 			    MVXORE_XETMCR);
   1852 			if (val & MVXORE_XETMCR_TIMEREN) {
   1853 				val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
   1854 				val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
   1855 				printf("  SectionSizeCtrl : 0x%08x\n", 2 ^ val);
   1856 				printf("  TimerInitVal    : 0x%08x\n",
   1857 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1858 				    MVXORE_XETMIVR));
   1859 				printf("  TimerCrntVal    : 0x%08x\n",
   1860 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1861 				    MVXORE_XETMCVR));
   1862 			}
   1863 		} else	/* MVXORE_XEXCR_OM_MEMINIT */
   1864 			printf("  InitVal         : 0x%08x%08x\n",
   1865 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1866 			    MVXORE_XEIVRH),
   1867 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1868 			    MVXORE_XEIVRL));
   1869 	}
   1870 }
   1871 
   1872 static void
   1873 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
   1874 		      uint32_t mode, int post)
   1875 {
   1876 	struct mvxore_desc *desc;
   1877 	int i, j;
   1878 	char buf[256];
   1879 
   1880 	printf("XORE Descriptor\n");
   1881 
   1882 	mode &= MVXORE_XEXCR_OM_MASK;
   1883 
   1884 	i = 0;
   1885 	while (1 /*CONSTCOND*/) {
   1886 		if (post)
   1887 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1888 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1889 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1890 
   1891 		desc = dd->dd_xore_vaddr;
   1892 
   1893 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
   1894 
   1895 		snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
   1896 		    desc->stat);
   1897 		printf("  Status                  : 0x%s\n", buf);
   1898 		if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
   1899 			printf("  CRC-32 Result           : 0x%08x\n",
   1900 			    desc->result);
   1901 		snprintb(buf, sizeof(buf),
   1902 		    "\177\020b\037EODIntEn\0b\036CRCLast\0"
   1903 		    "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
   1904 		    "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
   1905 		    desc->cmd);
   1906 		printf("  Command                 : 0x%s\n", buf);
   1907 		printf("  Next Descriptor Address : 0x%08x\n", desc->nextda);
   1908 		printf("  Byte Count              :   0x%06x\n", desc->bcnt);
   1909 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
   1910 		if (mode == MVXORE_XEXCR_OM_XOR) {
   1911 			for (j = 0; j < MVXORE_NSRC; j++)
   1912 				if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
   1913 					printf("  Source Address#%d        :"
   1914 					    " 0x%08x\n", j, desc->srcaddr[j]);
   1915 		} else
   1916 			printf("  Source Address          : 0x%08x\n",
   1917 			    desc->srcaddr[0]);
   1918 
   1919 		if (desc->nextda == (uint32_t)NULL)
   1920 			break;
   1921 
   1922 		if (!post)
   1923 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1924 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1925 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1926 
   1927 		i++;
   1928 		dd = SLIST_NEXT(dd, dd_next);
   1929 	}
   1930 	if (!post)
   1931 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1932 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1933 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1934 }
   1935 #endif
   1936