Home | History | Annotate | Line # | Download | only in marvell
gtidmac.c revision 1.11.6.1
      1 /*	$NetBSD: gtidmac.c,v 1.11.6.1 2017/02/05 13:40:28 skrll Exp $	*/
      2 /*
      3  * Copyright (c) 2008, 2012, 2016 KIYOHARA Takashi
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  * POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.11.6.1 2017/02/05 13:40:28 skrll Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/bus.h>
     33 #include <sys/device.h>
     34 #include <sys/errno.h>
     35 #include <sys/endian.h>
     36 #include <sys/kmem.h>
     37 
     38 #include <uvm/uvm_param.h>	/* For PAGE_SIZE */
     39 
     40 #include <dev/dmover/dmovervar.h>
     41 
     42 #include <dev/marvell/gtidmacreg.h>
     43 #include <dev/marvell/gtidmacvar.h>
     44 #include <dev/marvell/marvellreg.h>
     45 #include <dev/marvell/marvellvar.h>
     46 
     47 #include <prop/proplib.h>
     48 
     49 #include "locators.h"
     50 
     51 #ifdef GTIDMAC_DEBUG
     52 #define DPRINTF(x)	if (gtidmac_debug) printf x
     53 int gtidmac_debug = 0;
     54 #else
     55 #define DPRINTF(x)
     56 #endif
     57 
     58 #define GTIDMAC_NDESC		64
     59 #define GTIDMAC_MAXCHAN		8
     60 #define MVXORE_NDESC		128
     61 #define MVXORE_MAXCHAN		2
     62 
     63 #define GTIDMAC_NSEGS		((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
     64 #define MVXORE_NSEGS		((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
     65 
     66 
     67 struct gtidmac_softc;
     68 
     69 struct gtidmac_function {
     70 	int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
     71 	void (*chan_free)(void *, int);
     72 	int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
     73 			 bus_size_t);
     74 	void (*dma_start)(void *, int,
     75 			  void (*dma_done_cb)(void *, int, bus_dmamap_t *,
     76 						      bus_dmamap_t *, int));
     77 	uint32_t (*dma_finish)(void *, int, int);
     78 };
     79 
     80 struct gtidmac_dma_desc {
     81 	int dd_index;
     82 	union {
     83 		struct gtidmac_desc *idmac_vaddr;
     84 		struct mvxore_desc *xore_vaddr;
     85 	} dd_vaddr;
     86 #define dd_idmac_vaddr	dd_vaddr.idmac_vaddr
     87 #define dd_xore_vaddr	dd_vaddr.xore_vaddr
     88 	paddr_t dd_paddr;
     89 	SLIST_ENTRY(gtidmac_dma_desc) dd_next;
     90 };
     91 
     92 struct gtidmac_softc {
     93 	device_t sc_dev;
     94 
     95 	bus_space_tag_t sc_iot;
     96 	bus_space_handle_t sc_ioh;
     97 
     98 	bus_dma_tag_t sc_dmat;
     99 	struct gtidmac_dma_desc *sc_dd_buffer;
    100 	bus_dma_segment_t sc_pattern_segment;
    101 	struct {
    102 		u_char pbuf[16];	/* 16byte/pattern */
    103 	} *sc_pbuf;			/*   x256 pattern */
    104 
    105 	int sc_gtidmac_nchan;
    106 	struct gtidmac_desc *sc_dbuf;
    107 	bus_dmamap_t sc_dmap;
    108 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
    109 	struct {
    110 		bus_dmamap_t chan_in;		/* In dmamap */
    111 		bus_dmamap_t chan_out;		/* Out dmamap */
    112 		uint64_t chan_totalcnt;		/* total transfered byte */
    113 		int chan_ddidx;
    114 		void *chan_running;		/* opaque object data */
    115 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
    116 				      bus_dmamap_t *, int);
    117 	} sc_cdesc[GTIDMAC_MAXCHAN];
    118 	struct gtidmac_intr_arg {
    119 		struct gtidmac_softc *ia_sc;
    120 		uint32_t ia_cause;
    121 		uint32_t ia_mask;
    122 		uint32_t ia_eaddr;
    123 		uint32_t ia_eselect;
    124 	} sc_intrarg[GTIDMAC_NINTRRUPT];
    125 
    126 	int sc_mvxore_nchan;
    127 	struct mvxore_desc *sc_dbuf_xore;
    128 	bus_dmamap_t sc_dmap_xore;
    129 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
    130 	struct {
    131 		bus_dmamap_t chan_in[MVXORE_NSRC];	/* In dmamap */
    132 		bus_dmamap_t chan_out;			/* Out dmamap */
    133 		uint64_t chan_totalcnt;			/* total transfered */
    134 		int chan_ddidx;
    135 		void *chan_running;			/* opaque object data */
    136 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
    137 				      bus_dmamap_t *, int);
    138 	} sc_cdesc_xore[MVXORE_MAXCHAN];
    139 
    140 	struct dmover_backend sc_dmb;
    141 	struct dmover_backend sc_dmb_xore;
    142 	int sc_dmb_busy;
    143 };
    144 struct gtidmac_softc *gtidmac_softc = NULL;
    145 
    146 static int gtidmac_match(device_t, struct cfdata *, void *);
    147 static void gtidmac_attach(device_t, device_t, void *);
    148 
    149 static int gtidmac_intr(void *);
    150 static int mvxore_port0_intr(void *);
    151 static int mvxore_port1_intr(void *);
    152 static int mvxore_intr(struct gtidmac_softc *, int);
    153 
    154 static void gtidmac_process(struct dmover_backend *);
    155 static void gtidmac_dmover_run(struct dmover_backend *);
    156 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
    157 				int);
    158 static __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
    159 				dmover_buffer_type, dmover_buffer *, int);
    160 static __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
    161 
    162 static uint32_t gtidmac_finish(void *, int, int);
    163 static uint32_t mvxore_finish(void *, int, int);
    164 
    165 static void gtidmac_wininit(struct gtidmac_softc *, enum marvell_tags *);
    166 static void mvxore_wininit(struct gtidmac_softc *, enum marvell_tags *);
    167 
    168 static int gtidmac_buffer_setup(struct gtidmac_softc *);
    169 static int mvxore_buffer_setup(struct gtidmac_softc *);
    170 
    171 #ifdef GTIDMAC_DEBUG
    172 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
    173 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
    174 				   struct gtidmac_dma_desc *, uint32_t, int);
    175 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
    176 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
    177 				  struct gtidmac_dma_desc *, uint32_t, int);
    178 #endif
    179 
    180 
    181 static struct gtidmac_function gtidmac_functions = {
    182 	.chan_alloc = gtidmac_chan_alloc,
    183 	.chan_free = gtidmac_chan_free,
    184 	.dma_setup = gtidmac_setup,
    185 	.dma_start = gtidmac_start,
    186 	.dma_finish = gtidmac_finish,
    187 };
    188 
    189 static struct gtidmac_function mvxore_functions = {
    190 	.chan_alloc = mvxore_chan_alloc,
    191 	.chan_free = mvxore_chan_free,
    192 	.dma_setup = mvxore_setup,
    193 	.dma_start = mvxore_start,
    194 	.dma_finish = mvxore_finish,
    195 };
    196 
    197 static const struct dmover_algdesc gtidmac_algdescs[] = {
    198 	{
    199 		.dad_name = DMOVER_FUNC_ZERO,
    200 		.dad_data = &gtidmac_functions,
    201 		.dad_ninputs = 0
    202 	},
    203 	{
    204 		.dad_name = DMOVER_FUNC_FILL8,
    205 		.dad_data = &gtidmac_functions,
    206 		.dad_ninputs = 0
    207 	},
    208 	{
    209 		.dad_name = DMOVER_FUNC_COPY,
    210 		.dad_data = &gtidmac_functions,
    211 		.dad_ninputs = 1
    212 	},
    213 };
    214 
    215 static const struct dmover_algdesc mvxore_algdescs[] = {
    216 #if 0
    217 	/*
    218 	 * As for these operations, there are a lot of restrictions.  It is
    219 	 * necessary to use IDMAC.
    220 	 */
    221 	{
    222 		.dad_name = DMOVER_FUNC_ZERO,
    223 		.dad_data = &mvxore_functions,
    224 		.dad_ninputs = 0
    225 	},
    226 	{
    227 		.dad_name = DMOVER_FUNC_FILL8,
    228 		.dad_data = &mvxore_functions,
    229 		.dad_ninputs = 0
    230 	},
    231 #endif
    232 	{
    233 		.dad_name = DMOVER_FUNC_COPY,
    234 		.dad_data = &mvxore_functions,
    235 		.dad_ninputs = 1
    236 	},
    237 	{
    238 		.dad_name = DMOVER_FUNC_ISCSI_CRC32C,
    239 		.dad_data = &mvxore_functions,
    240 		.dad_ninputs = 1
    241 	},
    242 	{
    243 		.dad_name = DMOVER_FUNC_XOR2,
    244 		.dad_data = &mvxore_functions,
    245 		.dad_ninputs = 2
    246 	},
    247 	{
    248 		.dad_name = DMOVER_FUNC_XOR3,
    249 		.dad_data = &mvxore_functions,
    250 		.dad_ninputs = 3
    251 	},
    252 	{
    253 		.dad_name = DMOVER_FUNC_XOR4,
    254 		.dad_data = &mvxore_functions,
    255 		.dad_ninputs = 4
    256 	},
    257 	{
    258 		.dad_name = DMOVER_FUNC_XOR5,
    259 		.dad_data = &mvxore_functions,
    260 		.dad_ninputs = 5
    261 	},
    262 	{
    263 		.dad_name = DMOVER_FUNC_XOR6,
    264 		.dad_data = &mvxore_functions,
    265 		.dad_ninputs = 6
    266 	},
    267 	{
    268 		.dad_name = DMOVER_FUNC_XOR7,
    269 		.dad_data = &mvxore_functions,
    270 		.dad_ninputs = 7
    271 	},
    272 	{
    273 		.dad_name = DMOVER_FUNC_XOR8,
    274 		.dad_data = &mvxore_functions,
    275 		.dad_ninputs = 8
    276 	},
    277 };
    278 
    279 static int orion_88f5182_xore_irqs[] = { 30, 31 };
    280 static int kirkwood_xore_irqs[] = { 5, 6, 7, 8 };
    281 static int dove_xore_irqs[] = { 39, 40, 42, 43 };
    282 static int armadaxp_xore_irqs0[] = { 51, 52 };
    283 static int armadaxp_xore_irqs1[] = { 94, 95 };
    284 
    285 static struct {
    286 	int model;
    287 	int idmac_nchan;
    288 	int idmac_irq;
    289 	int xore_nchan;
    290 	int *xore_irqs;
    291 } channels[] = {
    292 	/*
    293 	 * Marvell System Controllers:
    294 	 * need irqs in attach_args.
    295 	 */
    296 	{ MARVELL_DISCOVERY,		8, -1, 0, NULL },
    297 	{ MARVELL_DISCOVERY_II,		8, -1, 0, NULL },
    298 	{ MARVELL_DISCOVERY_III,	8, -1, 0, NULL },
    299 #if 0
    300 	{ MARVELL_DISCOVERY_LT,		4, -1, 2, NULL },
    301 	{ MARVELL_DISCOVERY_V,		4, -1, 2, NULL },
    302 	{ MARVELL_DISCOVERY_VI,		4, -1, 2, NULL },		????
    303 #endif
    304 
    305 	/*
    306 	 * Marvell System on Chips:
    307 	 * No need irqs in attach_args.  We always connecting to interrupt-pin
    308 	 * statically.
    309 	 */
    310 	{ MARVELL_ORION_1_88F1181,	4, 24, 0, NULL },
    311 	{ MARVELL_ORION_2_88F1281,	4, 24, 0, NULL },
    312 	{ MARVELL_ORION_1_88F5082,	4, 24, 0, NULL },
    313 	{ MARVELL_ORION_1_88F5180N,	4, 24, 0, NULL },
    314 	{ MARVELL_ORION_1_88F5181,	4, 24, 0, NULL },
    315 	{ MARVELL_ORION_1_88F5182,	4, 24, 2, orion_88f5182_xore_irqs },
    316 	{ MARVELL_ORION_2_88F5281,	4, 24, 0, NULL },
    317 	{ MARVELL_ORION_1_88W8660,	4, 24, 0, NULL },
    318 	{ MARVELL_KIRKWOOD_88F6180,	0, -1, 4, kirkwood_xore_irqs },
    319 	{ MARVELL_KIRKWOOD_88F6192,	0, -1, 4, kirkwood_xore_irqs },
    320 	{ MARVELL_KIRKWOOD_88F6281,	0, -1, 4, kirkwood_xore_irqs },
    321 	{ MARVELL_KIRKWOOD_88F6282,	0, -1, 4, kirkwood_xore_irqs },
    322 	{ MARVELL_DOVE_88AP510,		0, -1, 4, dove_xore_irqs },
    323 	{ MARVELL_ARMADAXP_MV78130,	4, 33, 2, armadaxp_xore_irqs0 },
    324 	{ MARVELL_ARMADAXP_MV78130,	0, -1, 2, armadaxp_xore_irqs1 },
    325 	{ MARVELL_ARMADAXP_MV78160,	4, 33, 2, armadaxp_xore_irqs0 },
    326 	{ MARVELL_ARMADAXP_MV78160,	0, -1, 2, armadaxp_xore_irqs1 },
    327 	{ MARVELL_ARMADAXP_MV78230,	4, 33, 2, armadaxp_xore_irqs0 },
    328 	{ MARVELL_ARMADAXP_MV78230,	0, -1, 2, armadaxp_xore_irqs1 },
    329 	{ MARVELL_ARMADAXP_MV78260,	4, 33, 2, armadaxp_xore_irqs0 },
    330 	{ MARVELL_ARMADAXP_MV78260,	0, -1, 2, armadaxp_xore_irqs1 },
    331 	{ MARVELL_ARMADAXP_MV78460,	4, 33, 2, armadaxp_xore_irqs0 },
    332 	{ MARVELL_ARMADAXP_MV78460,	0, -1, 2, armadaxp_xore_irqs1 },
    333 };
    334 
    335 struct gtidmac_winacctbl *gtidmac_winacctbl;
    336 struct gtidmac_winacctbl *mvxore_winacctbl;
    337 
    338 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
    339     gtidmac_match, gtidmac_attach, NULL, NULL);
    340 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
    341     gtidmac_match, gtidmac_attach, NULL, NULL);
    342 
    343 
    344 /* ARGSUSED */
    345 static int
    346 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
    347 {
    348 	struct marvell_attach_args *mva = aux;
    349 	int unit, i;
    350 
    351 	if (strcmp(mva->mva_name, match->cf_name) != 0)
    352 		return 0;
    353 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
    354 		return 0;
    355 	unit = 0;
    356 	for (i = 0; i < __arraycount(channels); i++)
    357 		if (mva->mva_model == channels[i].model) {
    358 			if (mva->mva_unit == unit) {
    359 				mva->mva_size = GTIDMAC_SIZE;
    360 				return 1;
    361 			}
    362 			unit++;
    363 		}
    364 	return 0;
    365 }
    366 
    367 /* ARGSUSED */
    368 static void
    369 gtidmac_attach(device_t parent, device_t self, void *aux)
    370 {
    371 	struct gtidmac_softc *sc = device_private(self);
    372 	struct marvell_attach_args *mva = aux;
    373 	prop_dictionary_t dict = device_properties(self);
    374 	uint32_t idmac_irq, xore_irq, *xore_irqs, dmb_speed;
    375 	int unit, idmac_nchan, xore_nchan, nsegs, i, j, n;
    376 
    377 	unit = 0;
    378 	for (i = 0; i < __arraycount(channels); i++)
    379 		if (mva->mva_model == channels[i].model) {
    380 			if (mva->mva_unit == unit)
    381 				break;
    382 			unit++;
    383 		}
    384 	idmac_nchan = channels[i].idmac_nchan;
    385 	idmac_irq = channels[i].idmac_irq;
    386 	if (idmac_nchan != 0) {
    387 		if (idmac_irq == -1)
    388 			idmac_irq = mva->mva_irq;
    389 		if (idmac_irq == -1)
    390 			/* Discovery */
    391 			if (!prop_dictionary_get_uint32(dict,
    392 			    "idmac-irq", &idmac_irq)) {
    393 				aprint_error(": no idmac-irq property\n");
    394 				return;
    395 			}
    396 	}
    397 	xore_nchan = channels[i].xore_nchan;
    398 	xore_irqs = channels[i].xore_irqs;
    399 	xore_irq = MVA_IRQ_DEFAULT;
    400 	if (xore_nchan != 0) {
    401 		if (xore_irqs == NULL)
    402 			xore_irq = mva->mva_irq;
    403 		if (xore_irqs == NULL && xore_irq == MVA_IRQ_DEFAULT)
    404 			/* Discovery LT/V/VI */
    405 			if (!prop_dictionary_get_uint32(dict,
    406 			    "xore-irq", &xore_irq)) {
    407 				aprint_error(": no xore-irq property\n");
    408 				return;
    409 			}
    410 	}
    411 
    412 	aprint_naive("\n");
    413 	aprint_normal(": Marvell IDMA Controller%s\n",
    414 	    xore_nchan ? "/XOR Engine" : "");
    415 	if (idmac_nchan > 0)
    416 		aprint_normal_dev(self,
    417 		    "IDMA Controller %d channels, intr %d...%d\n",
    418 		    idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1);
    419 	if (xore_nchan > 0) {
    420 		aprint_normal_dev(self, "XOR Engine %d channels", xore_nchan);
    421 		if (xore_irqs == NULL)
    422 			aprint_normal(", intr %d...%d\n",
    423 			    xore_irq, xore_irq + xore_nchan - 1);
    424 		else {
    425 			aprint_normal(", intr %d", xore_irqs[0]);
    426 			for (i = 1; i < xore_nchan; i++)
    427 				aprint_normal(", %d", xore_irqs[i]);
    428 			aprint_normal("\n");
    429 		}
    430 	}
    431 
    432 	sc->sc_dev = self;
    433 	sc->sc_iot = mva->mva_iot;
    434 
    435 	/* Map I/O registers */
    436 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
    437 	    mva->mva_size, &sc->sc_ioh)) {
    438 		aprint_error_dev(self, "can't map registers\n");
    439 		return;
    440 	}
    441 
    442 	/*
    443 	 * Initialise DMA descriptors and associated metadata
    444 	 */
    445 	sc->sc_dmat = mva->mva_dmat;
    446 	n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
    447 	sc->sc_dd_buffer =
    448 	    kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
    449 	if (sc->sc_dd_buffer == NULL) {
    450 		aprint_error_dev(self, "can't allocate memory\n");
    451 		goto fail1;
    452 	}
    453 	/* pattern buffer */
    454 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
    455 	    &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
    456 		aprint_error_dev(self,
    457 		    "bus_dmamem_alloc failed: pattern buffer\n");
    458 		goto fail2;
    459 	}
    460 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
    461 	    (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
    462 		aprint_error_dev(self,
    463 		    "bus_dmamem_map failed: pattern buffer\n");
    464 		goto fail3;
    465 	}
    466 	for (i = 0; i < 0x100; i++)
    467 		for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
    468 			sc->sc_pbuf[i].pbuf[j] = i;
    469 
    470 	if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
    471 		aprint_error_dev(self, "no dmb_speed property\n");
    472 		dmb_speed = 10;	/* More than fast swdmover perhaps. */
    473 	}
    474 
    475 	/* IDMAC DMA descriptor buffer */
    476 	sc->sc_gtidmac_nchan = idmac_nchan;
    477 	if (sc->sc_gtidmac_nchan > 0) {
    478 		if (gtidmac_buffer_setup(sc) != 0)
    479 			goto fail4;
    480 
    481 		if (mva->mva_model != MARVELL_DISCOVERY)
    482 			gtidmac_wininit(sc, mva->mva_tags);
    483 
    484 		/* Setup interrupt */
    485 		for (i = 0; i < GTIDMAC_NINTRRUPT; i++) {
    486 			j = i * idmac_nchan / GTIDMAC_NINTRRUPT;
    487 
    488 			sc->sc_intrarg[i].ia_sc = sc;
    489 			sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j);
    490 			sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j);
    491 			sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j);
    492 			marvell_intr_establish(idmac_irq + i, IPL_BIO,
    493 			    gtidmac_intr, &sc->sc_intrarg[i]);
    494 		}
    495 
    496 		/* Register us with dmover. */
    497 		sc->sc_dmb.dmb_name = device_xname(self);
    498 		sc->sc_dmb.dmb_speed = dmb_speed;
    499 		sc->sc_dmb.dmb_cookie = sc;
    500 		sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
    501 		sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
    502 		sc->sc_dmb.dmb_process = gtidmac_process;
    503 		dmover_backend_register(&sc->sc_dmb);
    504 		sc->sc_dmb_busy = 0;
    505 	}
    506 
    507 	/* XORE DMA descriptor buffer */
    508 	sc->sc_mvxore_nchan = xore_nchan;
    509 	if (sc->sc_mvxore_nchan > 0) {
    510 		if (mvxore_buffer_setup(sc) != 0)
    511 			goto fail5;
    512 
    513 		/* Setup interrupt */
    514 		for (i = 0; i < sc->sc_mvxore_nchan; i++)
    515 			marvell_intr_establish(
    516 			    xore_irqs != NULL ? xore_irqs[i] : xore_irq + i,
    517 			    IPL_BIO,
    518 			    (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr,
    519 			    sc);
    520 
    521 		mvxore_wininit(sc, mva->mva_tags);
    522 
    523 		/* Register us with dmover. */
    524 		sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
    525 		sc->sc_dmb_xore.dmb_speed = dmb_speed;
    526 		sc->sc_dmb_xore.dmb_cookie = sc;
    527 		sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
    528 		sc->sc_dmb_xore.dmb_nalgdescs = __arraycount(mvxore_algdescs);
    529 		sc->sc_dmb_xore.dmb_process = gtidmac_process;
    530 		dmover_backend_register(&sc->sc_dmb_xore);
    531 	}
    532 
    533 	gtidmac_softc = sc;
    534 
    535 	return;
    536 
    537 fail5:
    538 	for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) {
    539 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
    540 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
    541 	}
    542 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
    543 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
    544 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
    545 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
    546 	bus_dmamem_free(sc->sc_dmat,
    547 	    sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs);
    548 fail4:
    549 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
    550 fail3:
    551 	bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
    552 fail2:
    553 	kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
    554 fail1:
    555 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
    556 	return;
    557 }
    558 
    559 
    560 static int
    561 gtidmac_intr(void *arg)
    562 {
    563 	struct gtidmac_intr_arg *ia = arg;
    564 	struct gtidmac_softc *sc = ia->ia_sc;
    565 	uint32_t cause;
    566 	int handled = 0, chan, error;
    567 
    568 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
    569 	DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
    570 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
    571 
    572 	chan = 0;
    573 	while (cause) {
    574 		error = 0;
    575 		if (cause & GTIDMAC_I_ADDRMISS) {
    576 			aprint_error_dev(sc->sc_dev, "Address Miss");
    577 			error = EINVAL;
    578 		}
    579 		if (cause & GTIDMAC_I_ACCPROT) {
    580 			aprint_error_dev(sc->sc_dev,
    581 			    "Access Protect Violation");
    582 			error = EACCES;
    583 		}
    584 		if (cause & GTIDMAC_I_WRPROT) {
    585 			aprint_error_dev(sc->sc_dev, "Write Protect");
    586 			error = EACCES;
    587 		}
    588 		if (cause & GTIDMAC_I_OWN) {
    589 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
    590 			error = EINVAL;
    591 		}
    592 
    593 #define GTIDMAC_I_ERROR		  \
    594 	   (GTIDMAC_I_ADDRMISS	| \
    595 	    GTIDMAC_I_ACCPROT	| \
    596 	    GTIDMAC_I_WRPROT	| \
    597 	    GTIDMAC_I_OWN)
    598 		if (cause & GTIDMAC_I_ERROR) {
    599 			uint32_t sel;
    600 			int select;
    601 
    602 			sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    603 			    ia->ia_eselect) & GTIDMAC_ESR_SEL;
    604 			select = sel - chan * GTIDMAC_I_BITS;
    605 			if (select >= 0 && select < GTIDMAC_I_BITS) {
    606 				uint32_t ear;
    607 
    608 				ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    609 				    ia->ia_eaddr);
    610 				aprint_error(": Error Address 0x%x\n", ear);
    611 			} else
    612 				aprint_error(": lost Error Address\n");
    613 		}
    614 
    615 		if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
    616 			sc->sc_cdesc[chan].chan_dma_done(
    617 			    sc->sc_cdesc[chan].chan_running, chan,
    618 			    &sc->sc_cdesc[chan].chan_in,
    619 			    &sc->sc_cdesc[chan].chan_out, error);
    620 			handled++;
    621 		}
    622 
    623 		cause >>= GTIDMAC_I_BITS;
    624 	}
    625 	DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
    626 
    627 	return handled;
    628 }
    629 
    630 static int
    631 mvxore_port0_intr(void *arg)
    632 {
    633 	struct gtidmac_softc *sc = arg;
    634 
    635 	return mvxore_intr(sc, 0);
    636 }
    637 
    638 static int
    639 mvxore_port1_intr(void *arg)
    640 {
    641 	struct gtidmac_softc *sc = arg;
    642 
    643 	return mvxore_intr(sc, 1);
    644 }
    645 
    646 static int
    647 mvxore_intr(struct gtidmac_softc *sc, int port)
    648 {
    649 	uint32_t cause;
    650 	int handled = 0, chan, error;
    651 
    652 	cause =
    653 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port));
    654 	DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause));
    655 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    656 	    MVXORE_XEICR(sc, port), ~cause);
    657 
    658 	chan = 0;
    659 	while (cause) {
    660 		error = 0;
    661 		if (cause & MVXORE_I_ADDRDECODE) {
    662 			aprint_error_dev(sc->sc_dev, "Failed address decoding");
    663 			error = EINVAL;
    664 		}
    665 		if (cause & MVXORE_I_ACCPROT) {
    666 			aprint_error_dev(sc->sc_dev,
    667 			    "Access Protect Violation");
    668 			error = EACCES;
    669 		}
    670 		if (cause & MVXORE_I_WRPROT) {
    671 			aprint_error_dev(sc->sc_dev, "Write Protect");
    672 			error = EACCES;
    673 		}
    674 		if (cause & MVXORE_I_OWN) {
    675 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
    676 			error = EINVAL;
    677 		}
    678 		if (cause & MVXORE_I_INTPARITY) {
    679 			aprint_error_dev(sc->sc_dev, "Parity Error");
    680 			error = EIO;
    681 		}
    682 		if (cause & MVXORE_I_XBAR) {
    683 			aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
    684 			error = EINVAL;
    685 		}
    686 
    687 #define MVXORE_I_ERROR		  \
    688 	   (MVXORE_I_ADDRDECODE	| \
    689 	    MVXORE_I_ACCPROT	| \
    690 	    MVXORE_I_WRPROT	| \
    691 	    MVXORE_I_OWN	| \
    692 	    MVXORE_I_INTPARITY	| \
    693 	    MVXORE_I_XBAR)
    694 		if (cause & MVXORE_I_ERROR) {
    695 			uint32_t type;
    696 			int event;
    697 
    698 			type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    699 			    MVXORE_XEECR(sc, port));
    700 			type &= MVXORE_XEECR_ERRORTYPE_MASK;
    701 			event = type - chan * MVXORE_I_BITS;
    702 			if (event >= 0 && event < MVXORE_I_BITS) {
    703 				uint32_t xeear;
    704 
    705 				xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    706 				    MVXORE_XEEAR(sc, port));
    707 				aprint_error(": Error Address 0x%x\n", xeear);
    708 			} else
    709 				aprint_error(": lost Error Address\n");
    710 		}
    711 
    712 		if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
    713 			sc->sc_cdesc_xore[chan].chan_dma_done(
    714 			    sc->sc_cdesc_xore[chan].chan_running, chan,
    715 			    sc->sc_cdesc_xore[chan].chan_in,
    716 			    &sc->sc_cdesc_xore[chan].chan_out, error);
    717 			handled++;
    718 		}
    719 
    720 		cause >>= MVXORE_I_BITS;
    721 	}
    722 	DPRINTF(("XORE port %d intr: %shandled\n",
    723 	    port, handled ? "" : "not "));
    724 
    725 	return handled;
    726 }
    727 
    728 
    729 /*
    730  * dmover(9) backend function.
    731  */
    732 static void
    733 gtidmac_process(struct dmover_backend *dmb)
    734 {
    735 	struct gtidmac_softc *sc = dmb->dmb_cookie;
    736 	int s;
    737 
    738 	/* If the backend is currently idle, go process the queue. */
    739 	s = splbio();
    740 	if (!sc->sc_dmb_busy)
    741 		gtidmac_dmover_run(dmb);
    742 	splx(s);
    743 }
    744 
    745 static void
    746 gtidmac_dmover_run(struct dmover_backend *dmb)
    747 {
    748 	struct gtidmac_softc *sc = dmb->dmb_cookie;
    749 	struct dmover_request *dreq;
    750 	const struct dmover_algdesc *algdesc;
    751 	struct gtidmac_function *df;
    752 	bus_dmamap_t *dmamap_in, *dmamap_out;
    753 	int chan, ninputs, error, i;
    754 
    755 	sc->sc_dmb_busy = 1;
    756 
    757 	for (;;) {
    758 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
    759 		if (dreq == NULL)
    760 			break;
    761 		algdesc = dreq->dreq_assignment->das_algdesc;
    762 		df = algdesc->dad_data;
    763 		chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
    764 		if (chan == -1)
    765 			return;
    766 
    767 		dmover_backend_remque(dmb, dreq);
    768 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
    769 
    770 		/* XXXUNLOCK */
    771 
    772 		error = 0;
    773 
    774 		/* Load in/out buffers of dmover to bus_dmamap. */
    775 		ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    776 		if (ninputs == 0) {
    777 			int pno = 0;
    778 
    779 			if (algdesc->dad_name == DMOVER_FUNC_FILL8)
    780 				pno = dreq->dreq_immediate[0];
    781 
    782 			i = 0;
    783 			error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
    784 			    &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
    785 			    BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
    786 			if (error == 0) {
    787 				bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
    788 				    sizeof(sc->sc_pbuf[pno]),
    789 				    BUS_DMASYNC_PREWRITE);
    790 
    791 				/*
    792 				 * We will call gtidmac_dmmap_unload() when
    793 				 * becoming an error.
    794 				 */
    795 				i = 1;
    796 			}
    797 		} else
    798 			for (i = 0; i < ninputs; i++) {
    799 				error = gtidmac_dmmap_load(sc,
    800 				    *(dmamap_in + i), dreq->dreq_inbuf_type,
    801 				    &dreq->dreq_inbuf[i], 0/*write*/);
    802 				if (error != 0)
    803 					break;
    804 			}
    805 		if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
    806 			if (error == 0)
    807 				error = gtidmac_dmmap_load(sc, *dmamap_out,
    808 				    dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
    809 				    1/*read*/);
    810 
    811 			if (error == 0) {
    812 				/*
    813 				 * The size of outbuf is always believed to be
    814 				 * DMA transfer size in dmover request.
    815 				 */
    816 				error = (*df->dma_setup)(sc, chan, ninputs,
    817 				    dmamap_in, dmamap_out,
    818 				    (*dmamap_out)->dm_mapsize);
    819 				if (error != 0)
    820 					gtidmac_dmmap_unload(sc, *dmamap_out,
    821 					    1);
    822 			}
    823 		} else
    824 			if (error == 0)
    825 				error = (*df->dma_setup)(sc, chan, ninputs,
    826 				    dmamap_in, dmamap_out,
    827 				    (*dmamap_in)->dm_mapsize);
    828 
    829 		/* XXXLOCK */
    830 
    831 		if (error != 0) {
    832 			for (; i-- > 0;)
    833 				gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
    834 			(*df->chan_free)(sc, chan);
    835 
    836 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
    837 			dreq->dreq_error = error;
    838 			/* XXXUNLOCK */
    839 			dmover_done(dreq);
    840 			/* XXXLOCK */
    841 			continue;
    842 		}
    843 
    844 		(*df->dma_start)(sc, chan, gtidmac_dmover_done);
    845 		break;
    846 	}
    847 
    848 	/* All done */
    849 	sc->sc_dmb_busy = 0;
    850 }
    851 
    852 static void
    853 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
    854 		    bus_dmamap_t *dmamap_out, int error)
    855 {
    856 	struct gtidmac_softc *sc;
    857 	struct dmover_request *dreq = object;
    858 	struct dmover_backend *dmb;
    859 	struct gtidmac_function *df;
    860 	uint32_t result;
    861 	int ninputs, i;
    862 
    863 	KASSERT(dreq != NULL);
    864 
    865 	dmb = dreq->dreq_assignment->das_backend;
    866 	df = dreq->dreq_assignment->das_algdesc->dad_data;
    867 	ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
    868 	sc = dmb->dmb_cookie;
    869 
    870 	result = (*df->dma_finish)(sc, chan, error);
    871 	for (i = 0; i < ninputs; i++)
    872 		gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
    873 	if (dreq->dreq_assignment->das_algdesc->dad_name ==
    874 	    DMOVER_FUNC_ISCSI_CRC32C)
    875 		memcpy(dreq->dreq_immediate, &result, sizeof(result));
    876 	else
    877 		gtidmac_dmmap_unload(sc, *dmamap_out, 1);
    878 
    879 	(*df->chan_free)(sc, chan);
    880 
    881 	if (error) {
    882 		dreq->dreq_error = error;
    883 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
    884 	}
    885 
    886 	dmover_done(dreq);
    887 
    888 	/*
    889 	 * See if we can start some more dmover(9) requests.
    890 	 *
    891 	 * Note: We're already at splbio() here.
    892 	 */
    893 	if (!sc->sc_dmb_busy)
    894 		gtidmac_dmover_run(dmb);
    895 }
    896 
    897 static __inline int
    898 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
    899 		   dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
    900 		   int read)
    901 {
    902 	int error, flags;
    903 
    904 	flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
    905 	    read ? BUS_DMA_READ : BUS_DMA_WRITE;
    906 
    907 	switch (dmbuf_type) {
    908 	case DMOVER_BUF_LINEAR:
    909 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
    910 		    dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
    911 		    NULL, flags);
    912 		break;
    913 
    914 	case DMOVER_BUF_UIO:
    915 		if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
    916 		    (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
    917 			return (EINVAL);
    918 
    919 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
    920 		    dmbuf->dmbuf_uio, flags);
    921 		break;
    922 
    923 	default:
    924 		error = EINVAL;
    925 	}
    926 
    927 	if (error == 0)
    928 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    929 		    read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    930 
    931 	return error;
    932 }
    933 
    934 static __inline void
    935 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
    936 {
    937 
    938 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    939 	    read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    940 
    941 	bus_dmamap_unload(sc->sc_dmat, dmamap);
    942 }
    943 
    944 
    945 /*
    946  * IDMAC functions
    947  */
    948 int
    949 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
    950 		   bus_dmamap_t **dmamap_out, void *object)
    951 {
    952 	struct gtidmac_softc *sc = tag;
    953 	int chan;
    954 
    955 /* maybe need lock */
    956 
    957 	for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
    958 		if (sc->sc_cdesc[chan].chan_running == NULL)
    959 			break;
    960 	if (chan >= sc->sc_gtidmac_nchan)
    961 		return -1;
    962 
    963 
    964 	sc->sc_cdesc[chan].chan_running = object;
    965 
    966 /* unlock */
    967 
    968 	*dmamap_in = &sc->sc_cdesc[chan].chan_in;
    969 	*dmamap_out = &sc->sc_cdesc[chan].chan_out;
    970 
    971 	return chan;
    972 }
    973 
    974 void
    975 gtidmac_chan_free(void *tag, int chan)
    976 {
    977 	struct gtidmac_softc *sc = tag;
    978 
    979 /* maybe need lock */
    980 
    981 	sc->sc_cdesc[chan].chan_running = NULL;
    982 
    983 /* unlock */
    984 }
    985 
    986 /* ARGSUSED */
    987 int
    988 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
    989 	      bus_dmamap_t *dmamap_out, bus_size_t size)
    990 {
    991 	struct gtidmac_softc *sc = tag;
    992 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
    993 	struct gtidmac_desc *desc;
    994 	uint32_t ccl, bcnt, ires, ores;
    995 	int n = 0, iidx, oidx;
    996 
    997 	KASSERT(ninputs == 0 || ninputs == 1);
    998 
    999 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1000 #ifdef DIAGNOSTIC
   1001 	if (ccl & GTIDMAC_CCLR_CHANACT)
   1002 		panic("gtidmac_setup: chan%d already active", chan);
   1003 #endif
   1004 
   1005 	/* We always Chain-mode and max (16M - 1)byte/desc */
   1006 	ccl = (GTIDMAC_CCLR_DESCMODE_16M				|
   1007 #ifdef GTIDMAC_DEBUG
   1008 	    GTIDMAC_CCLR_CDEN						|
   1009 #endif
   1010 	    GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */	|
   1011 	    GTIDMAC_CCLR_INTMODE_NULL   /* Intr Mode: Next Desc NULL */	|
   1012 	    GTIDMAC_CCLR_CHAINMODE_C    /* Chain Mode: Chaind */);
   1013 	if (size != (*dmamap_in)->dm_mapsize) {
   1014 		ccl |= GTIDMAC_CCLR_SRCHOLD;
   1015 		if ((*dmamap_in)->dm_mapsize == 8)
   1016 			ccl |= GTIDMAC_CCLR_SBL_8B;
   1017 		else if ((*dmamap_in)->dm_mapsize == 16)
   1018 			ccl |= GTIDMAC_CCLR_SBL_16B;
   1019 		else if ((*dmamap_in)->dm_mapsize == 32)
   1020 			ccl |= GTIDMAC_CCLR_SBL_32B;
   1021 		else if ((*dmamap_in)->dm_mapsize == 64)
   1022 			ccl |= GTIDMAC_CCLR_SBL_64B;
   1023 		else if ((*dmamap_in)->dm_mapsize == 128)
   1024 			ccl |= GTIDMAC_CCLR_SBL_128B;
   1025 		else
   1026 			panic("gtidmac_setup: chan%d source:"
   1027 			    " unsupport hold size", chan);
   1028 	} else
   1029 		ccl |= GTIDMAC_CCLR_SBL_128B;
   1030 	if (size != (*dmamap_out)->dm_mapsize) {
   1031 		ccl |= GTIDMAC_CCLR_DESTHOLD;
   1032 		if ((*dmamap_out)->dm_mapsize == 8)
   1033 			ccl |= GTIDMAC_CCLR_DBL_8B;
   1034 		else if ((*dmamap_out)->dm_mapsize == 16)
   1035 			ccl |= GTIDMAC_CCLR_DBL_16B;
   1036 		else if ((*dmamap_out)->dm_mapsize == 32)
   1037 			ccl |= GTIDMAC_CCLR_DBL_32B;
   1038 		else if ((*dmamap_out)->dm_mapsize == 64)
   1039 			ccl |= GTIDMAC_CCLR_DBL_64B;
   1040 		else if ((*dmamap_out)->dm_mapsize == 128)
   1041 			ccl |= GTIDMAC_CCLR_DBL_128B;
   1042 		else
   1043 			panic("gtidmac_setup: chan%d destination:"
   1044 			    " unsupport hold size", chan);
   1045 	} else
   1046 		ccl |= GTIDMAC_CCLR_DBL_128B;
   1047 
   1048 	fstdd = SLIST_FIRST(&sc->sc_dlist);
   1049 	if (fstdd == NULL) {
   1050 		aprint_error_dev(sc->sc_dev, "no descriptor\n");
   1051 		return ENOMEM;
   1052 	}
   1053 	SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
   1054 	sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
   1055 
   1056 	dd = fstdd;
   1057 	ires = ores = 0;
   1058 	iidx = oidx = 0;
   1059 	while (1 /*CONSTCOND*/) {
   1060 		if (ccl & GTIDMAC_CCLR_SRCHOLD) {
   1061 			if (ccl & GTIDMAC_CCLR_DESTHOLD)
   1062 				bcnt = size;	/* src/dst hold */
   1063 			else
   1064 				bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
   1065 		} else if (ccl & GTIDMAC_CCLR_DESTHOLD)
   1066 			bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
   1067 		else
   1068 			bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
   1069 			    (*dmamap_out)->dm_segs[oidx].ds_len - ores);
   1070 
   1071 		desc = dd->dd_idmac_vaddr;
   1072 		desc->bc.mode16m.bcnt =
   1073 		    bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
   1074 		desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
   1075 		desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
   1076 
   1077 		n += bcnt;
   1078 		if (n >= size)
   1079 			break;
   1080 		if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
   1081 			ires += bcnt;
   1082 			if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
   1083 				ires = 0;
   1084 				iidx++;
   1085 				KASSERT(iidx < (*dmamap_in)->dm_nsegs);
   1086 			}
   1087 		}
   1088 		if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
   1089 			ores += bcnt;
   1090 			if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
   1091 				ores = 0;
   1092 				oidx++;
   1093 				KASSERT(oidx < (*dmamap_out)->dm_nsegs);
   1094 			}
   1095 		}
   1096 
   1097 		nxtdd = SLIST_FIRST(&sc->sc_dlist);
   1098 		if (nxtdd == NULL) {
   1099 			aprint_error_dev(sc->sc_dev, "no descriptor\n");
   1100 			return ENOMEM;
   1101 		}
   1102 		SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
   1103 
   1104 		desc->nextdp = (uint32_t)nxtdd->dd_paddr;
   1105 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1106 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1107 #ifdef GTIDMAC_DEBUG
   1108 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1109 #else
   1110 		    BUS_DMASYNC_PREWRITE);
   1111 #endif
   1112 
   1113 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
   1114 		dd = nxtdd;
   1115 	}
   1116 	desc->nextdp = (uint32_t)NULL;
   1117 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
   1118 #ifdef GTIDMAC_DEBUG
   1119 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1120 #else
   1121 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
   1122 #endif
   1123 
   1124 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
   1125 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
   1126 	    fstdd->dd_paddr);
   1127 
   1128 #if BYTE_ORDER == LITTLE_ENDIAN
   1129 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
   1130 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
   1131 #else
   1132 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
   1133 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
   1134 #endif
   1135 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
   1136 
   1137 #ifdef GTIDMAC_DEBUG
   1138 	gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
   1139 #endif
   1140 
   1141 	sc->sc_cdesc[chan].chan_totalcnt += size;
   1142 
   1143 	return 0;
   1144 }
   1145 
   1146 void
   1147 gtidmac_start(void *tag, int chan,
   1148 	      void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
   1149 				  int))
   1150 {
   1151 	struct gtidmac_softc *sc = tag;
   1152 	uint32_t ccl;
   1153 
   1154 	DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
   1155 
   1156 #ifdef GTIDMAC_DEBUG
   1157 	gtidmac_dump_idmacreg(sc, chan);
   1158 #endif
   1159 
   1160 	sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
   1161 
   1162 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1163 	/* Start and 'Fetch Next Descriptor' */
   1164 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
   1165 	    ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
   1166 }
   1167 
   1168 static uint32_t
   1169 gtidmac_finish(void *tag, int chan, int error)
   1170 {
   1171 	struct gtidmac_softc *sc = tag;
   1172 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1173 	struct gtidmac_desc *desc;
   1174 
   1175 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
   1176 
   1177 #ifdef GTIDMAC_DEBUG
   1178 	if (error || gtidmac_debug > 1) {
   1179 		uint32_t ccl;
   1180 
   1181 		gtidmac_dump_idmacreg(sc, chan);
   1182 		ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   1183 		    GTIDMAC_CCLR(chan));
   1184 		gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
   1185 	}
   1186 #endif
   1187 
   1188 	dd = fstdd;
   1189 	do {
   1190 		desc = dd->dd_idmac_vaddr;
   1191 
   1192 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1193 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1194 #ifdef GTIDMAC_DEBUG
   1195 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1196 #else
   1197 		    BUS_DMASYNC_POSTWRITE);
   1198 #endif
   1199 
   1200 		nxtdd = SLIST_NEXT(dd, dd_next);
   1201 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
   1202 		dd = nxtdd;
   1203 	} while (desc->nextdp);
   1204 
   1205 	return 0;
   1206 }
   1207 
   1208 /*
   1209  * XORE functions
   1210  */
   1211 int
   1212 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
   1213 		  bus_dmamap_t **dmamap_out, void *object)
   1214 {
   1215 	struct gtidmac_softc *sc = tag;
   1216 	int chan;
   1217 
   1218 /* maybe need lock */
   1219 
   1220 	for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
   1221 		if (sc->sc_cdesc_xore[chan].chan_running == NULL)
   1222 			break;
   1223 	if (chan >= sc->sc_mvxore_nchan)
   1224 		return -1;
   1225 
   1226 
   1227 	sc->sc_cdesc_xore[chan].chan_running = object;
   1228 
   1229 /* unlock */
   1230 
   1231 	*dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
   1232 	*dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
   1233 
   1234 	return chan;
   1235 }
   1236 
   1237 void
   1238 mvxore_chan_free(void *tag, int chan)
   1239 {
   1240 	struct gtidmac_softc *sc = tag;
   1241 
   1242 /* maybe need lock */
   1243 
   1244 	sc->sc_cdesc_xore[chan].chan_running = NULL;
   1245 
   1246 /* unlock */
   1247 }
   1248 
   1249 /* ARGSUSED */
   1250 int
   1251 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
   1252 	     bus_dmamap_t *dmamap_out, bus_size_t size)
   1253 {
   1254 	struct gtidmac_softc *sc = tag;
   1255 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1256 	struct mvxore_desc *desc;
   1257 	uint32_t xexc, bcnt, cmd, lastcmd;
   1258 	int n = 0, i;
   1259 	uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
   1260 	int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
   1261 
   1262 #ifdef DIAGNOSTIC
   1263 	uint32_t xexact =
   1264 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
   1265 
   1266 	if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
   1267 	    MVXORE_XEXACTR_XESTATUS_ACT)
   1268 		panic("mvxore_setup: chan%d already active."
   1269 		    " mvxore not support hot insertion", chan);
   1270 #endif
   1271 
   1272 	xexc =
   1273 	    (MVXORE_XEXCR_REGACCPROTECT	|
   1274 	     MVXORE_XEXCR_DBL_128B	|
   1275 	     MVXORE_XEXCR_SBL_128B);
   1276 	cmd = lastcmd = 0;
   1277 	if (ninputs > 1) {
   1278 		xexc |= MVXORE_XEXCR_OM_XOR;
   1279 		lastcmd = cmd = (1 << ninputs) - 1;
   1280 	} else if (ninputs == 1) {
   1281 		if ((*dmamap_out)->dm_nsegs == 0) {
   1282 			xexc |= MVXORE_XEXCR_OM_CRC32;
   1283 			lastcmd = MVXORE_DESC_CMD_CRCLAST;
   1284 		} else
   1285 			xexc |= MVXORE_XEXCR_OM_DMA;
   1286 	} else if (ninputs == 0) {
   1287 		if ((*dmamap_out)->dm_nsegs != 1) {
   1288 			aprint_error_dev(sc->sc_dev,
   1289 			    "XORE not supports %d DMA segments\n",
   1290 			    (*dmamap_out)->dm_nsegs);
   1291 			return EINVAL;
   1292 		}
   1293 
   1294 		if ((*dmamap_in)->dm_mapsize == 0) {
   1295 			xexc |= MVXORE_XEXCR_OM_ECC;
   1296 
   1297 			/* XXXXX: Maybe need to set Timer Mode registers? */
   1298 
   1299 #if 0
   1300 		} else if ((*dmamap_in)->dm_mapsize == 8 ||
   1301 		    (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
   1302 			uint64_t pattern;
   1303 
   1304 			/* XXXX: Get pattern data */
   1305 
   1306 			KASSERT((*dmamap_in)->dm_mapsize == 8 ||
   1307 			    (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
   1308 						~PAGE_MASK) == sc->sc_pbuf);
   1309 			pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
   1310 
   1311 			/* XXXXX: XORE has a IVR.  We should get this first. */
   1312 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
   1313 			    pattern);
   1314 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
   1315 			    pattern >> 32);
   1316 
   1317 			xexc |= MVXORE_XEXCR_OM_MEMINIT;
   1318 #endif
   1319 		} else {
   1320 			aprint_error_dev(sc->sc_dev,
   1321 			    "XORE not supports DMA mapsize %zd\n",
   1322 			    (*dmamap_in)->dm_mapsize);
   1323 			return EINVAL;
   1324 		}
   1325 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1326 		    MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr);
   1327 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1328 		    MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize);
   1329 
   1330 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1331 		    MVXORE_XEXCR(sc, chan), xexc);
   1332 		sc->sc_cdesc_xore[chan].chan_totalcnt += size;
   1333 
   1334 		return 0;
   1335 	}
   1336 
   1337 	/* Make descriptor for DMA/CRC32/XOR */
   1338 
   1339 	fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
   1340 	if (fstdd == NULL) {
   1341 		aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
   1342 		return ENOMEM;
   1343 	}
   1344 	SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
   1345 	sc->sc_cdesc_xore[chan].chan_ddidx =
   1346 	    fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
   1347 
   1348 	dd = fstdd;
   1349 	while (1 /*CONSTCOND*/) {
   1350 		desc = dd->dd_xore_vaddr;
   1351 		desc->stat = MVXORE_DESC_STAT_OWN;
   1352 		desc->cmd = cmd;
   1353 		if ((*dmamap_out)->dm_nsegs != 0) {
   1354 			desc->dstaddr =
   1355 			    (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
   1356 			bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
   1357 		} else {
   1358 			desc->dstaddr = 0;
   1359 			bcnt = MVXORE_MAXXFER;	/* XXXXX */
   1360 		}
   1361 		for (i = 0; i < ninputs; i++) {
   1362 			desc->srcaddr[i] =
   1363 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
   1364 			bcnt = min(bcnt,
   1365 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
   1366 		}
   1367 		desc->bcnt = bcnt;
   1368 
   1369 		n += bcnt;
   1370 		if (n >= size)
   1371 			break;
   1372 		ores += bcnt;
   1373 		if ((*dmamap_out)->dm_nsegs != 0 &&
   1374 		    ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
   1375 			ores = 0;
   1376 			oidx++;
   1377 			KASSERT(oidx < (*dmamap_out)->dm_nsegs);
   1378 		}
   1379 		for (i = 0; i < ninputs; i++) {
   1380 			ires[i] += bcnt;
   1381 			if (ires[i] >=
   1382 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
   1383 				ires[i] = 0;
   1384 				iidx[i]++;
   1385 				KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
   1386 			}
   1387 		}
   1388 
   1389 		nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
   1390 		if (nxtdd == NULL) {
   1391 			aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
   1392 			return ENOMEM;
   1393 		}
   1394 		SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
   1395 
   1396 		desc->nextda = (uint32_t)nxtdd->dd_paddr;
   1397 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1398 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1399 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1400 
   1401 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
   1402 		dd = nxtdd;
   1403 	}
   1404 	desc->cmd = lastcmd;
   1405 	desc->nextda = (uint32_t)NULL;
   1406 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1407 	    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1408 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1409 
   1410 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
   1411 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan),
   1412 	    fstdd->dd_paddr);
   1413 
   1414 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc);
   1415 
   1416 #ifdef GTIDMAC_DEBUG
   1417 	gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
   1418 #endif
   1419 
   1420 	sc->sc_cdesc_xore[chan].chan_totalcnt += size;
   1421 
   1422 	return 0;
   1423 }
   1424 
   1425 void
   1426 mvxore_start(void *tag, int chan,
   1427 	     void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
   1428 				 int))
   1429 {
   1430 	struct gtidmac_softc *sc = tag;
   1431 	uint32_t xexact;
   1432 
   1433 	DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
   1434 
   1435 #ifdef GTIDMAC_DEBUG
   1436 	gtidmac_dump_xorereg(sc, chan);
   1437 #endif
   1438 
   1439 	sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
   1440 
   1441 	xexact =
   1442 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
   1443 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan),
   1444 	    xexact | MVXORE_XEXACTR_XESTART);
   1445 }
   1446 
   1447 static uint32_t
   1448 mvxore_finish(void *tag, int chan, int error)
   1449 {
   1450 	struct gtidmac_softc *sc = tag;
   1451 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
   1452 	struct mvxore_desc *desc;
   1453 	uint32_t xexc;
   1454 
   1455 #ifdef GTIDMAC_DEBUG
   1456 	if (error || gtidmac_debug > 1)
   1457 		gtidmac_dump_xorereg(sc, chan);
   1458 #endif
   1459 
   1460 	xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
   1461 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
   1462 	    (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
   1463 		return 0;
   1464 
   1465 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
   1466 
   1467 #ifdef GTIDMAC_DEBUG
   1468 	if (error || gtidmac_debug > 1)
   1469 		gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
   1470 #endif
   1471 
   1472 	dd = fstdd;
   1473 	do {
   1474 		desc = dd->dd_xore_vaddr;
   1475 
   1476 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   1477 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1478 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1479 
   1480 		nxtdd = SLIST_NEXT(dd, dd_next);
   1481 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
   1482 		dd = nxtdd;
   1483 	} while (desc->nextda);
   1484 
   1485 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
   1486 		return desc->result;
   1487 	return 0;
   1488 }
   1489 
   1490 static void
   1491 gtidmac_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
   1492 {
   1493 	device_t pdev = device_parent(sc->sc_dev);
   1494 	uint64_t base;
   1495 	uint32_t size, cxap, en, winacc;
   1496 	int window, target, attr, rv, i, j;
   1497 
   1498 	en = 0xff;
   1499 	cxap = 0;
   1500 	for (window = 0, i = 0;
   1501 	    tags[i] != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW; i++) {
   1502 		rv = marvell_winparams_by_tag(pdev, tags[i],
   1503 		    &target, &attr, &base, &size);
   1504 		if (rv != 0 || size == 0)
   1505 			continue;
   1506 
   1507 		if (base > 0xffffffffULL) {
   1508 			if (window >= GTIDMAC_NREMAP) {
   1509 				aprint_error_dev(sc->sc_dev,
   1510 				    "can't remap window %d\n", window);
   1511 				continue;
   1512 			}
   1513 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1514 			    GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
   1515 		}
   1516 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
   1517 		    GTIDMAC_BARX_TARGET(target)	|
   1518 		    GTIDMAC_BARX_ATTR(attr)	|
   1519 		    GTIDMAC_BARX_BASE(base));
   1520 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
   1521 		    GTIDMAC_SRX_SIZE(size));
   1522 		en &= ~GTIDMAC_BAER_EN(window);
   1523 
   1524 		winacc = GTIDMAC_CXAPR_WINACC_FA;
   1525 		if (gtidmac_winacctbl != NULL)
   1526 			for (j = 0;
   1527 			    gtidmac_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
   1528 			    j++) {
   1529 				if (gtidmac_winacctbl[j].tag != tags[i])
   1530 					continue;
   1531 
   1532 				switch (gtidmac_winacctbl[j].winacc) {
   1533 				case GTIDMAC_WINACC_NOACCESSALLOWED:
   1534 					winacc = GTIDMAC_CXAPR_WINACC_NOAA;
   1535 					break;
   1536 				case GTIDMAC_WINACC_READONLY:
   1537 					winacc = GTIDMAC_CXAPR_WINACC_RO;
   1538 					break;
   1539 				case GTIDMAC_WINACC_FULLACCESS:
   1540 				default: /* XXXX: default is full access */
   1541 					break;
   1542 				}
   1543 				break;
   1544 			}
   1545 		cxap |= GTIDMAC_CXAPR_WINACC(window, winacc);
   1546 
   1547 		window++;
   1548 	}
   1549 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
   1550 
   1551 	for (i = 0; i < GTIDMAC_NACCPROT; i++)
   1552 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
   1553 		    cxap);
   1554 }
   1555 
   1556 static void
   1557 mvxore_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
   1558 {
   1559 	device_t pdev = device_parent(sc->sc_dev);
   1560 	uint64_t base;
   1561 	uint32_t target, attr, size, xexwc, winacc;
   1562 	int window, rv, i, j, p;
   1563 
   1564 	xexwc = 0;
   1565 	for (window = 0, i = 0;
   1566 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW; i++) {
   1567 		rv = marvell_winparams_by_tag(pdev, tags[i],
   1568 		    &target, &attr, &base, &size);
   1569 		if (rv != 0 || size == 0)
   1570 			continue;
   1571 
   1572 		if (base > 0xffffffffULL) {
   1573 			if (window >= MVXORE_NREMAP) {
   1574 				aprint_error_dev(sc->sc_dev,
   1575 				    "can't remap window %d\n", window);
   1576 				continue;
   1577 			}
   1578 			for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++)
   1579 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1580 				    MVXORE_XEHARRX(sc, p, window),
   1581 				    (base >> 32) & 0xffffffff);
   1582 		}
   1583 
   1584 		for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) {
   1585 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1586 			    MVXORE_XEBARX(sc, p, window),
   1587 			    MVXORE_XEBARX_TARGET(target) |
   1588 			    MVXORE_XEBARX_ATTR(attr) |
   1589 			    MVXORE_XEBARX_BASE(base));
   1590 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1591 			    MVXORE_XESMRX(sc, p, window),
   1592 			    MVXORE_XESMRX_SIZE(size));
   1593 		}
   1594 
   1595 		winacc = MVXORE_XEXWCR_WINACC_FA;
   1596 		if (mvxore_winacctbl != NULL)
   1597 			for (j = 0;
   1598 			    mvxore_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
   1599 			    j++) {
   1600 				if (gtidmac_winacctbl[j].tag != tags[i])
   1601 					continue;
   1602 
   1603 				switch (gtidmac_winacctbl[j].winacc) {
   1604 				case GTIDMAC_WINACC_NOACCESSALLOWED:
   1605 					winacc = MVXORE_XEXWCR_WINACC_NOAA;
   1606 					break;
   1607 				case GTIDMAC_WINACC_READONLY:
   1608 					winacc = MVXORE_XEXWCR_WINACC_RO;
   1609 					break;
   1610 				case GTIDMAC_WINACC_FULLACCESS:
   1611 				default: /* XXXX: default is full access */
   1612 					break;
   1613 				}
   1614 				break;
   1615 			}
   1616 		xexwc |= (MVXORE_XEXWCR_WINEN(window) |
   1617 		    MVXORE_XEXWCR_WINACC(window, winacc));
   1618 		window++;
   1619 	}
   1620 
   1621 	for (i = 0; i < sc->sc_mvxore_nchan; i++) {
   1622 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i),
   1623 		    xexwc);
   1624 
   1625 		/* XXXXX: reset... */
   1626 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0),
   1627 		    0);
   1628 	}
   1629 }
   1630 
   1631 static int
   1632 gtidmac_buffer_setup(struct gtidmac_softc *sc)
   1633 {
   1634 	bus_dma_segment_t segs;
   1635 	struct gtidmac_dma_desc *dd;
   1636 	uint32_t mask;
   1637 	int nchan, nsegs, i;
   1638 
   1639 	nchan = sc->sc_gtidmac_nchan;
   1640 
   1641 	if (bus_dmamem_alloc(sc->sc_dmat,
   1642 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
   1643 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
   1644 		aprint_error_dev(sc->sc_dev,
   1645 		    "bus_dmamem_alloc failed: descriptor buffer\n");
   1646 		goto fail0;
   1647 	}
   1648 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
   1649 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
   1650 	    (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
   1651 		aprint_error_dev(sc->sc_dev,
   1652 		    "bus_dmamem_map failed: descriptor buffer\n");
   1653 		goto fail1;
   1654 	}
   1655 	if (bus_dmamap_create(sc->sc_dmat,
   1656 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1,
   1657 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0,
   1658 	    BUS_DMA_NOWAIT, &sc->sc_dmap)) {
   1659 		aprint_error_dev(sc->sc_dev,
   1660 		    "bus_dmamap_create failed: descriptor buffer\n");
   1661 		goto fail2;
   1662 	}
   1663 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
   1664 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
   1665 	    NULL, BUS_DMA_NOWAIT)) {
   1666 		aprint_error_dev(sc->sc_dev,
   1667 		    "bus_dmamap_load failed: descriptor buffer\n");
   1668 		goto fail3;
   1669 	}
   1670 	SLIST_INIT(&sc->sc_dlist);
   1671 	for (i = 0; i < GTIDMAC_NDESC * nchan; i++) {
   1672 		dd = &sc->sc_dd_buffer[i];
   1673 		dd->dd_index = i;
   1674 		dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
   1675 		dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
   1676 		    (sizeof(struct gtidmac_desc) * i);
   1677 			SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
   1678 	}
   1679 
   1680 	/* Initialize IDMAC DMA channels */
   1681 	mask = 0;
   1682 	for (i = 0; i < nchan; i++) {
   1683 		if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
   1684 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1685 			    GTIDMAC_IMR(i - 1), mask);
   1686 			mask = 0;
   1687 		}
   1688 
   1689 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
   1690 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
   1691 		    &sc->sc_cdesc[i].chan_in)) {
   1692 			aprint_error_dev(sc->sc_dev,
   1693 			    "bus_dmamap_create failed: chan%d in\n", i);
   1694 			goto fail4;
   1695 		}
   1696 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
   1697 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
   1698 		    &sc->sc_cdesc[i].chan_out)) {
   1699 			aprint_error_dev(sc->sc_dev,
   1700 			    "bus_dmamap_create failed: chan%d out\n", i);
   1701 			bus_dmamap_destroy(sc->sc_dmat,
   1702 			    sc->sc_cdesc[i].chan_in);
   1703 			goto fail4;
   1704 		}
   1705 		sc->sc_cdesc[i].chan_totalcnt = 0;
   1706 		sc->sc_cdesc[i].chan_running = NULL;
   1707 
   1708 		/* Ignore bits overflow.  The mask is 32bit. */
   1709 		mask |= GTIDMAC_I(i,
   1710 		    GTIDMAC_I_COMP	|
   1711 		    GTIDMAC_I_ADDRMISS	|
   1712 		    GTIDMAC_I_ACCPROT	|
   1713 		    GTIDMAC_I_WRPROT	|
   1714 		    GTIDMAC_I_OWN);
   1715 
   1716 		/* 8bits/channel * 4channels => 32bit */
   1717 		if ((i & 0x3) == 0x3) {
   1718 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1719 			    GTIDMAC_IMR(i), mask);
   1720 			mask = 0;
   1721 		}
   1722 	}
   1723 
   1724 	return 0;
   1725 
   1726 fail4:
   1727 	for (; i-- > 0;) {
   1728 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
   1729 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
   1730 	}
   1731 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
   1732 fail3:
   1733 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
   1734 fail2:
   1735 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
   1736 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
   1737 fail1:
   1738 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
   1739 fail0:
   1740 	return -1;
   1741 }
   1742 
   1743 static int
   1744 mvxore_buffer_setup(struct gtidmac_softc *sc)
   1745 {
   1746 	bus_dma_segment_t segs;
   1747 	struct gtidmac_dma_desc *dd;
   1748 	uint32_t mask;
   1749 	int nchan, nsegs, i, j;
   1750 
   1751 	nchan = sc->sc_mvxore_nchan;
   1752 
   1753 	if (bus_dmamem_alloc(sc->sc_dmat,
   1754 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
   1755 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
   1756 		aprint_error_dev(sc->sc_dev,
   1757 		    "bus_dmamem_alloc failed: xore descriptor buffer\n");
   1758 		goto fail0;
   1759 	}
   1760 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
   1761 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
   1762 	    (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
   1763 		aprint_error_dev(sc->sc_dev,
   1764 		    "bus_dmamem_map failed: xore descriptor buffer\n");
   1765 		goto fail1;
   1766 	}
   1767 	if (bus_dmamap_create(sc->sc_dmat,
   1768 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1,
   1769 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0,
   1770 	    BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
   1771 		aprint_error_dev(sc->sc_dev,
   1772 		    "bus_dmamap_create failed: xore descriptor buffer\n");
   1773 		goto fail2;
   1774 	}
   1775 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
   1776 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
   1777 	    NULL, BUS_DMA_NOWAIT)) {
   1778 		aprint_error_dev(sc->sc_dev,
   1779 		    "bus_dmamap_load failed: xore descriptor buffer\n");
   1780 		goto fail3;
   1781 	}
   1782 	SLIST_INIT(&sc->sc_dlist_xore);
   1783 	for (i = 0; i < MVXORE_NDESC * nchan; i++) {
   1784 		dd =
   1785 		    &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan];
   1786 		dd->dd_index = i;
   1787 		dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i];
   1788 		dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
   1789 		    (sizeof(struct mvxore_desc) * i);
   1790 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
   1791 	}
   1792 
   1793 	/* Initialize XORE DMA channels */
   1794 	mask = 0;
   1795 	for (i = 0; i < nchan; i++) {
   1796 		for (j = 0; j < MVXORE_NSRC; j++) {
   1797 			if (bus_dmamap_create(sc->sc_dmat,
   1798 			    MVXORE_MAXXFER, MVXORE_NSEGS,
   1799 			    MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
   1800 			    &sc->sc_cdesc_xore[i].chan_in[j])) {
   1801 				aprint_error_dev(sc->sc_dev,
   1802 				    "bus_dmamap_create failed:"
   1803 				    " xore chan%d in[%d]\n", i, j);
   1804 				goto fail4;
   1805 			}
   1806 		}
   1807 		if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
   1808 		    MVXORE_NSEGS, MVXORE_MAXXFER, 0,
   1809 		    BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) {
   1810 			aprint_error_dev(sc->sc_dev,
   1811 			    "bus_dmamap_create failed: chan%d out\n", i);
   1812 			goto fail5;
   1813 		}
   1814 		sc->sc_cdesc_xore[i].chan_totalcnt = 0;
   1815 		sc->sc_cdesc_xore[i].chan_running = NULL;
   1816 
   1817 		mask |= MVXORE_I(i,
   1818 		    MVXORE_I_EOC	|
   1819 		    MVXORE_I_ADDRDECODE	|
   1820 		    MVXORE_I_ACCPROT	|
   1821 		    MVXORE_I_WRPROT	|
   1822 		    MVXORE_I_OWN	|
   1823 		    MVXORE_I_INTPARITY	|
   1824 		    MVXORE_I_XBAR);
   1825 
   1826 		/* 16bits/channel * 2channels => 32bit */
   1827 		if (i & 0x1) {
   1828 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
   1829 			    MVXORE_XEIMR(sc, i >> 1), mask);
   1830 			mask = 0;
   1831 		}
   1832 	}
   1833 
   1834 	return 0;
   1835 
   1836 	for (; i-- > 0;) {
   1837 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out);
   1838 
   1839 fail5:
   1840 		j = MVXORE_NSRC;
   1841 fail4:
   1842 		for (; j-- > 0;)
   1843 			bus_dmamap_destroy(sc->sc_dmat,
   1844 			    sc->sc_cdesc_xore[i].chan_in[j]);
   1845 	}
   1846 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
   1847 fail3:
   1848 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
   1849 fail2:
   1850 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
   1851 	    sizeof(struct mvxore_desc) * MVXORE_NDESC);
   1852 fail1:
   1853 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
   1854 fail0:
   1855 	return -1;
   1856 }
   1857 
   1858 #ifdef GTIDMAC_DEBUG
   1859 static void
   1860 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
   1861 {
   1862 	uint32_t val;
   1863 	char buf[256];
   1864 
   1865 	printf("IDMAC Registers\n");
   1866 
   1867 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
   1868 	snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
   1869 	printf("  Byte Count                 : %s\n", buf);
   1870 	printf("    ByteCnt                  :   0x%06x\n",
   1871 	    val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
   1872 	printf("  Source Address             : 0x%08x\n",
   1873 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
   1874 	printf("  Destination Address        : 0x%08x\n",
   1875 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
   1876 	printf("  Next Descriptor Pointer    : 0x%08x\n",
   1877 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
   1878 	printf("  Current Descriptor Pointer : 0x%08x\n",
   1879 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
   1880 
   1881 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
   1882 	snprintb(buf, sizeof(buf),
   1883 	    "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
   1884 	    "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
   1885 	    val);
   1886 	printf("  Channel Control (Low)      : %s\n", buf);
   1887 	printf("    SrcBurstLimit            : %s Bytes\n",
   1888 	  (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
   1889 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
   1890 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
   1891 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
   1892 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
   1893 	    "unknwon");
   1894 	printf("    DstBurstLimit            : %s Bytes\n",
   1895 	  (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
   1896 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
   1897 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
   1898 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
   1899 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
   1900 	    "unknwon");
   1901 	printf("    ChainMode                : %sChained\n",
   1902 	    val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
   1903 	printf("    TransferMode             : %s\n",
   1904 	    val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
   1905 	printf("    DescMode                 : %s\n",
   1906 	    val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
   1907 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
   1908 	snprintb(buf, sizeof(buf),
   1909 	    "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
   1910 	printf("  Channel Control (High)     : %s\n", buf);
   1911 }
   1912 
   1913 static void
   1914 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
   1915 		       uint32_t mode, int post)
   1916 {
   1917 	struct gtidmac_desc *desc;
   1918 	int i;
   1919 	char buf[256];
   1920 
   1921 	printf("IDMAC Descriptor\n");
   1922 
   1923 	i = 0;
   1924 	while (1 /*CONSTCOND*/) {
   1925 		if (post)
   1926 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1927 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1928 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1929 
   1930 		desc = dd->dd_idmac_vaddr;
   1931 
   1932 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
   1933 		if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
   1934 			snprintb(buf, sizeof(buf),
   1935 			    "\177\020b\037Own\0b\036BCLeft\0",
   1936 			    desc->bc.mode16m.bcnt);
   1937 			printf("  Byte Count              : %s\n", buf);
   1938 			printf("    ByteCount             :   0x%06x\n",
   1939 			    desc->bc.mode16m.bcnt &
   1940 			    GTIDMAC_CIDMABCR_BYTECNT_MASK);
   1941 		} else {
   1942 			printf("  Byte Count              :     0x%04x\n",
   1943 			    desc->bc.mode64k.bcnt);
   1944 			printf("  Remind Byte Count       :     0x%04x\n",
   1945 			    desc->bc.mode64k.rbc);
   1946 		}
   1947 		printf("  Source Address          : 0x%08x\n", desc->srcaddr);
   1948 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
   1949 		printf("  Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
   1950 
   1951 		if (desc->nextdp == (uint32_t)NULL)
   1952 			break;
   1953 
   1954 		if (!post)
   1955 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1956 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1957 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1958 
   1959 		i++;
   1960 		dd = SLIST_NEXT(dd, dd_next);
   1961 	}
   1962 	if (!post)
   1963 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
   1964 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   1965 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1966 }
   1967 
   1968 static void
   1969 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
   1970 {
   1971 	uint32_t val, opmode;
   1972 	char buf[64];
   1973 
   1974 	printf("XORE Registers\n");
   1975 
   1976 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
   1977 	snprintb(buf, sizeof(buf),
   1978 	    "\177\020"
   1979 	    "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
   1980 	    val);
   1981 	printf(" Configuration    : 0x%s\n", buf);
   1982 	opmode = val & MVXORE_XEXCR_OM_MASK;
   1983 	printf("    OperationMode : %s operation\n",
   1984 	  opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
   1985 	  opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
   1986 	  opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
   1987 	  opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
   1988 	  opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
   1989 	  "unknown");
   1990 	printf("    SrcBurstLimit : %s Bytes\n",
   1991 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
   1992 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
   1993 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
   1994 	    "unknwon");
   1995 	printf("    DstBurstLimit : %s Bytes\n",
   1996 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
   1997 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
   1998 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
   1999 	    "unknwon");
   2000 	val =
   2001 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
   2002 	printf("  Activation      : 0x%08x\n", val);
   2003 	val &= MVXORE_XEXACTR_XESTATUS_MASK;
   2004 	printf("    XEstatus      : %s\n",
   2005 	    val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
   2006 	    val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
   2007 	    val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
   2008 
   2009 	if (opmode == MVXORE_XEXCR_OM_XOR ||
   2010 	    opmode == MVXORE_XEXCR_OM_CRC32 ||
   2011 	    opmode == MVXORE_XEXCR_OM_DMA) {
   2012 		printf("  NextDescPtr     : 0x%08x\n",
   2013 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2014 		    MVXORE_XEXNDPR(sc, chan)));
   2015 		printf("  CurrentDescPtr  : 0x%08x\n",
   2016 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2017 		    MVXORE_XEXCDPR(chan)));
   2018 	}
   2019 	printf("  ByteCnt         : 0x%08x\n",
   2020 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
   2021 
   2022 	if (opmode == MVXORE_XEXCR_OM_ECC ||
   2023 	    opmode == MVXORE_XEXCR_OM_MEMINIT) {
   2024 		printf("  DstPtr          : 0x%08x\n",
   2025 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2026 		    MVXORE_XEXDPR(sc, chan)));
   2027 		printf("  BlockSize       : 0x%08x\n",
   2028 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2029 		    MVXORE_XEXBSR(sc, chan)));
   2030 
   2031 		if (opmode == MVXORE_XEXCR_OM_ECC) {
   2032 			val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2033 			    MVXORE_XETMCR);
   2034 			if (val & MVXORE_XETMCR_TIMEREN) {
   2035 				val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
   2036 				val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
   2037 				printf("  SectionSizeCtrl : 0x%08x\n", 2 ^ val);
   2038 				printf("  TimerInitVal    : 0x%08x\n",
   2039 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2040 				    MVXORE_XETMIVR));
   2041 				printf("  TimerCrntVal    : 0x%08x\n",
   2042 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2043 				    MVXORE_XETMCVR));
   2044 			}
   2045 		} else	/* MVXORE_XEXCR_OM_MEMINIT */
   2046 			printf("  InitVal         : 0x%08x%08x\n",
   2047 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2048 			    MVXORE_XEIVRH),
   2049 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
   2050 			    MVXORE_XEIVRL));
   2051 	}
   2052 }
   2053 
   2054 static void
   2055 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
   2056 		      uint32_t mode, int post)
   2057 {
   2058 	struct mvxore_desc *desc;
   2059 	int i, j;
   2060 	char buf[256];
   2061 
   2062 	printf("XORE Descriptor\n");
   2063 
   2064 	mode &= MVXORE_XEXCR_OM_MASK;
   2065 
   2066 	i = 0;
   2067 	while (1 /*CONSTCOND*/) {
   2068 		if (post)
   2069 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   2070 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   2071 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2072 
   2073 		desc = dd->dd_xore_vaddr;
   2074 
   2075 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
   2076 
   2077 		snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
   2078 		    desc->stat);
   2079 		printf("  Status                  : 0x%s\n", buf);
   2080 		if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
   2081 			printf("  CRC-32 Result           : 0x%08x\n",
   2082 			    desc->result);
   2083 		snprintb(buf, sizeof(buf),
   2084 		    "\177\020b\037EODIntEn\0b\036CRCLast\0"
   2085 		    "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
   2086 		    "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
   2087 		    desc->cmd);
   2088 		printf("  Command                 : 0x%s\n", buf);
   2089 		printf("  Next Descriptor Address : 0x%08x\n", desc->nextda);
   2090 		printf("  Byte Count              :   0x%06x\n", desc->bcnt);
   2091 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
   2092 		if (mode == MVXORE_XEXCR_OM_XOR) {
   2093 			for (j = 0; j < MVXORE_NSRC; j++)
   2094 				if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
   2095 					printf("  Source Address#%d        :"
   2096 					    " 0x%08x\n", j, desc->srcaddr[j]);
   2097 		} else
   2098 			printf("  Source Address          : 0x%08x\n",
   2099 			    desc->srcaddr[0]);
   2100 
   2101 		if (desc->nextda == (uint32_t)NULL)
   2102 			break;
   2103 
   2104 		if (!post)
   2105 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   2106 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
   2107 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2108 
   2109 		i++;
   2110 		dd = SLIST_NEXT(dd, dd_next);
   2111 	}
   2112 	if (!post)
   2113 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
   2114 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
   2115 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2116 }
   2117 #endif
   2118