gtidmac.c revision 1.3 1 /* $NetBSD: gtidmac.c,v 1.3 2010/06/04 06:31:50 kiyohara Exp $ */
2 /*
3 * Copyright (c) 2008 KIYOHARA Takashi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.3 2010/06/04 06:31:50 kiyohara Exp $");
30
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/kmem.h>
37
38 #include <uvm/uvm_param.h> /* For PAGE_SIZE */
39
40 #include <dev/dmover/dmovervar.h>
41
42 #include <dev/marvell/gtidmacreg.h>
43 #include <dev/marvell/gtidmacvar.h>
44 #include <dev/marvell/marvellreg.h>
45 #include <dev/marvell/marvellvar.h>
46
47 #include <prop/proplib.h>
48
49 #include "locators.h"
50
51 #ifdef GTIDMAC_DEBUG
52 #define DPRINTF(x) if (gtidmac_debug) printf x
53 int gtidmac_debug = 0;
54 #else
55 #define DPRINTF(x)
56 #endif
57
58 #define GTIDMAC_NDESC 64
59 #define GTIDMAC_MAXCHAN 8
60 #define MVXORE_NDESC 128
61 #define MVXORE_MAXCHAN 2
62
63 #define GTIDMAC_NSEGS ((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
64 #define MVXORE_NSEGS ((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
65
66
67 struct gtidmac_softc;
68
69 struct gtidmac_function {
70 int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
71 void (*chan_free)(void *, int);
72 int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
73 bus_size_t);
74 void (*dma_start)(void *, int,
75 void (*dma_done_cb)(void *, int, bus_dmamap_t *,
76 bus_dmamap_t *, int));
77 uint32_t (*dma_finish)(void *, int, int);
78 };
79
80 struct gtidmac_dma_desc {
81 int dd_index;
82 union {
83 struct gtidmac_desc *idmac_vaddr;
84 struct mvxore_desc *xore_vaddr;
85 } dd_vaddr;
86 #define dd_idmac_vaddr dd_vaddr.idmac_vaddr
87 #define dd_xore_vaddr dd_vaddr.xore_vaddr
88 paddr_t dd_paddr;
89 SLIST_ENTRY(gtidmac_dma_desc) dd_next;
90 };
91
92 struct gtidmac_softc {
93 device_t sc_dev;
94
95 bus_space_tag_t sc_iot;
96 bus_space_handle_t sc_ioh;
97
98 bus_dma_tag_t sc_dmat;
99 struct gtidmac_dma_desc *sc_dd_buffer;
100 bus_dma_segment_t sc_pattern_segment;
101 struct {
102 u_char pbuf[16]; /* 16byte/pattern */
103 } *sc_pbuf; /* x256 pattern */
104
105 int sc_gtidmac_nchan;
106 struct gtidmac_desc *sc_dbuf;
107 bus_dmamap_t sc_dmap;
108 SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
109 struct {
110 bus_dmamap_t chan_in; /* In dmamap */
111 bus_dmamap_t chan_out; /* Out dmamap */
112 uint64_t chan_totalcnt; /* total transfered byte */
113 int chan_ddidx;
114 void *chan_running; /* opaque object data */
115 void (*chan_dma_done)(void *, int, bus_dmamap_t *,
116 bus_dmamap_t *, int);
117 } sc_cdesc[GTIDMAC_MAXCHAN];
118 struct gtidmac_intr_arg {
119 struct gtidmac_softc *ia_sc;
120 uint32_t ia_cause;
121 uint32_t ia_mask;
122 uint32_t ia_eaddr;
123 uint32_t ia_eselect;
124 } sc_intrarg[GTIDMAC_NINTRRUPT];
125
126 int sc_mvxore_nchan;
127 struct mvxore_desc *sc_dbuf_xore;
128 bus_dmamap_t sc_dmap_xore;
129 SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
130 struct {
131 bus_dmamap_t chan_in[MVXORE_NSRC]; /* In dmamap */
132 bus_dmamap_t chan_out; /* Out dmamap */
133 uint64_t chan_totalcnt; /* total transfered */
134 int chan_ddidx;
135 void *chan_running; /* opaque object data */
136 void (*chan_dma_done)(void *, int, bus_dmamap_t *,
137 bus_dmamap_t *, int);
138 } sc_cdesc_xore[MVXORE_MAXCHAN];
139
140 struct dmover_backend sc_dmb;
141 struct dmover_backend sc_dmb_xore;
142 int sc_dmb_busy;
143 };
144 struct gtidmac_softc *gtidmac_softc = NULL;
145
146 static int gtidmac_match(device_t, struct cfdata *, void *);
147 static void gtidmac_attach(device_t, device_t, void *);
148
149 static int gtidmac_intr(void *);
150 static int mvxore_intr(void *);
151
152 static void gtidmac_process(struct dmover_backend *);
153 static void gtidmac_dmover_run(struct dmover_backend *);
154 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
155 int);
156 __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
157 dmover_buffer_type, dmover_buffer *, int);
158 __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
159
160 static uint32_t gtidmac_finish(void *, int, int);
161 static uint32_t mvxore_finish(void *, int, int);
162
163 static void gtidmac_wininit(struct gtidmac_softc *);
164 static void mvxore_wininit(struct gtidmac_softc *);
165
166 #ifdef GTIDMAC_DEBUG
167 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
168 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
169 struct gtidmac_dma_desc *, uint32_t, int);
170 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
171 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
172 struct gtidmac_dma_desc *, uint32_t, int);
173 #endif
174
175
176 static struct gtidmac_function gtidmac_functions = {
177 .chan_alloc = gtidmac_chan_alloc,
178 .chan_free = gtidmac_chan_free,
179 .dma_setup = gtidmac_setup,
180 .dma_start = gtidmac_start,
181 .dma_finish = gtidmac_finish,
182 };
183
184 static struct gtidmac_function mvxore_functions = {
185 .chan_alloc = mvxore_chan_alloc,
186 .chan_free = mvxore_chan_free,
187 .dma_setup = mvxore_setup,
188 .dma_start = mvxore_start,
189 .dma_finish = mvxore_finish,
190 };
191
192 static const struct dmover_algdesc gtidmac_algdescs[] = {
193 {
194 .dad_name = DMOVER_FUNC_ZERO,
195 .dad_data = >idmac_functions,
196 .dad_ninputs = 0
197 },
198 {
199 .dad_name = DMOVER_FUNC_FILL8,
200 .dad_data = >idmac_functions,
201 .dad_ninputs = 0
202 },
203 {
204 .dad_name = DMOVER_FUNC_COPY,
205 .dad_data = >idmac_functions,
206 .dad_ninputs = 1
207 },
208 };
209
210 static const struct dmover_algdesc mvxore_algdescs[] = {
211 #if 0
212 /*
213 * As for these operations, there are a lot of restrictions. It is
214 * necessary to use IDMAC.
215 */
216 {
217 .dad_name = DMOVER_FUNC_ZERO,
218 .dad_data = &mvxore_functions,
219 .dad_ninputs = 0
220 },
221 {
222 .dad_name = DMOVER_FUNC_FILL8,
223 .dad_data = &mvxore_functions,
224 .dad_ninputs = 0
225 },
226 #endif
227 {
228 .dad_name = DMOVER_FUNC_COPY,
229 .dad_data = &mvxore_functions,
230 .dad_ninputs = 1
231 },
232 {
233 .dad_name = DMOVER_FUNC_ISCSI_CRC32C,
234 .dad_data = &mvxore_functions,
235 .dad_ninputs = 1
236 },
237 {
238 .dad_name = DMOVER_FUNC_XOR2,
239 .dad_data = &mvxore_functions,
240 .dad_ninputs = 2
241 },
242 {
243 .dad_name = DMOVER_FUNC_XOR3,
244 .dad_data = &mvxore_functions,
245 .dad_ninputs = 3
246 },
247 {
248 .dad_name = DMOVER_FUNC_XOR4,
249 .dad_data = &mvxore_functions,
250 .dad_ninputs = 4
251 },
252 {
253 .dad_name = DMOVER_FUNC_XOR5,
254 .dad_data = &mvxore_functions,
255 .dad_ninputs = 5
256 },
257 {
258 .dad_name = DMOVER_FUNC_XOR6,
259 .dad_data = &mvxore_functions,
260 .dad_ninputs = 6
261 },
262 {
263 .dad_name = DMOVER_FUNC_XOR7,
264 .dad_data = &mvxore_functions,
265 .dad_ninputs = 7
266 },
267 {
268 .dad_name = DMOVER_FUNC_XOR8,
269 .dad_data = &mvxore_functions,
270 .dad_ninputs = 8
271 },
272 };
273
274 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
275 gtidmac_match, gtidmac_attach, NULL, NULL);
276 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
277 gtidmac_match, gtidmac_attach, NULL, NULL);
278
279
280 /* ARGSUSED */
281 static int
282 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
283 {
284 struct marvell_attach_args *mva = aux;
285
286 if (strcmp(mva->mva_name, match->cf_name) != 0)
287 return 0;
288
289 if (mva->mva_model == MARVELL_ORION_1_88F6082)
290 return 0;
291
292 if (mva->mva_offset == GTCF_OFFSET_DEFAULT ||
293 mva->mva_irq == GTCF_IRQ_DEFAULT)
294 return 0;
295
296 mva->mva_size = GTIDMAC_SIZE;
297 return 1;
298 }
299
300 /* ARGSUSED */
301 static void
302 gtidmac_attach(device_t parent, device_t self, void *aux)
303 {
304 struct gtidmac_softc *sc = device_private(self);
305 struct marvell_attach_args *mva = aux;
306 bus_dma_segment_t segs, segs_xore;
307 struct gtidmac_dma_desc *dd;
308 prop_dictionary_t dict = device_properties(self);
309 uint32_t mask, dmb_speed, xore_irq;
310 int idmac_nchan, xore_nchan, nsegs, nsegs_xore, i, j, k, n;
311
312 xore_irq = 0;
313 idmac_nchan = 8;
314 xore_nchan = 0;
315 switch (mva->mva_model) {
316 case MARVELL_DISCOVERY:
317 case MARVELL_DISCOVERY_II:
318 case MARVELL_DISCOVERY_III:
319 #if 0
320 case MARVELL_DISCOVERY_V: ????
321 #endif
322 break;
323
324 #if 0
325 case MARVELL_DISCOVERY_LT: ????
326 case MARVELL_DISCOVERY_VI: ????
327 #endif
328 case MARVELL_ORION_1_88F1181:
329 case MARVELL_ORION_1_88F5082:
330 case MARVELL_ORION_1_88F5180N:
331 case MARVELL_ORION_1_88F5181:
332 case MARVELL_ORION_1_88W8660:
333 case MARVELL_ORION_2_88F1281:
334 case MARVELL_ORION_2_88F5281:
335 idmac_nchan = 4;
336 break;
337
338 case MARVELL_ORION_1_88F5182:
339 idmac_nchan = 4;
340 xore_nchan = 2;
341 break;
342 }
343 if (xore_nchan != 0)
344 if (!prop_dictionary_get_uint32(dict, "xore-irq-begin",
345 &xore_irq)) {
346 aprint_error(": no xore-irq-begin property\n");
347 return;
348 }
349
350 aprint_naive("\n");
351 aprint_normal(": Marvell IDMA Controller%s\n",
352 xore_nchan ? "/XOR Engine" : "");
353
354 sc->sc_dev = self;
355 sc->sc_iot = mva->mva_iot;
356
357 /* Map I/O registers */
358 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
359 mva->mva_size, &sc->sc_ioh)) {
360 aprint_error_dev(self, "can't map registers\n");
361 return;
362 }
363
364 /*
365 * Initialise DMA descriptors and associated metadata
366 */
367 sc->sc_dmat = mva->mva_dmat;
368 n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
369 sc->sc_dd_buffer =
370 kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
371 if (sc->sc_dd_buffer == NULL) {
372 aprint_error_dev(self, "can't allocate memory\n");
373 goto fail1;
374 }
375 /* pattern buffer */
376 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
377 &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
378 aprint_error_dev(self,
379 "bus_dmamem_alloc failed: pattern buffer\n");
380 goto fail2;
381 }
382 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
383 (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
384 aprint_error_dev(self,
385 "bus_dmamem_map failed: pattern buffer\n");
386 goto fail3;
387 }
388 for (i = 0; i < 0x100; i++)
389 for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
390 sc->sc_pbuf[i].pbuf[j] = i;
391
392 /* IDMAC DMA descriptor buffer */
393 sc->sc_gtidmac_nchan = idmac_nchan;
394 if (bus_dmamem_alloc(sc->sc_dmat,
395 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan,
396 PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
397 aprint_error_dev(self,
398 "bus_dmamem_alloc failed: descriptor buffer\n");
399 goto fail4;
400 }
401 if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
402 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan,
403 (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
404 aprint_error_dev(self,
405 "bus_dmamem_map failed: descriptor buffer\n");
406 goto fail5;
407 }
408 if (bus_dmamap_create(sc->sc_dmat,
409 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, 1,
410 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, 0,
411 BUS_DMA_NOWAIT, &sc->sc_dmap)) {
412 aprint_error_dev(self,
413 "bus_dmamap_create failed: descriptor buffer\n");
414 goto fail6;
415 }
416 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
417 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * idmac_nchan, NULL,
418 BUS_DMA_NOWAIT)) {
419 aprint_error_dev(self,
420 "bus_dmamap_load failed: descriptor buffer\n");
421 goto fail7;
422 }
423 SLIST_INIT(&sc->sc_dlist);
424 for (i = 0; i < GTIDMAC_NDESC * idmac_nchan; i++) {
425 dd = &sc->sc_dd_buffer[i];
426 dd->dd_index = i;
427 dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
428 dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
429 (sizeof(struct gtidmac_desc) * i);
430 SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
431 }
432
433 /* Initialize IDMAC DMA channels */
434 mask = 0;
435 for (i = 0; i < idmac_nchan; i++) {
436 if (i > 0 &&
437 ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
438 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
439 GTIDMAC_IMR(i - 1), mask);
440 mask = 0;
441 }
442
443 if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
444 GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
445 &sc->sc_cdesc[i].chan_in)) {
446 aprint_error_dev(self,
447 "bus_dmamap_create failed: chan%d in\n", i);
448 goto fail8;
449 }
450 if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
451 GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
452 &sc->sc_cdesc[i].chan_out)) {
453 aprint_error_dev(self,
454 "bus_dmamap_create failed: chan%d out\n", i);
455 bus_dmamap_destroy(sc->sc_dmat,
456 sc->sc_cdesc[i].chan_in);
457 goto fail8;
458 }
459 sc->sc_cdesc[i].chan_totalcnt = 0;
460 sc->sc_cdesc[i].chan_running = NULL;
461
462 /* Ignore bits overflow. The mask is 32bit. */
463 mask |= GTIDMAC_I(i,
464 GTIDMAC_I_COMP |
465 GTIDMAC_I_ADDRMISS |
466 GTIDMAC_I_ACCPROT |
467 GTIDMAC_I_WRPROT |
468 GTIDMAC_I_OWN);
469 }
470 if (i > 0)
471 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_IMR(i - 1),
472 mask);
473
474 /* Setup interrupt */
475 for (j = 0; j < GTIDMAC_NINTRRUPT; j++) {
476 int c = j * idmac_nchan / __arraycount(sc->sc_intrarg);
477
478 sc->sc_intrarg[j].ia_sc = sc;
479 sc->sc_intrarg[j].ia_cause = GTIDMAC_ICR(c);
480 sc->sc_intrarg[j].ia_eaddr = GTIDMAC_EAR(c);
481 sc->sc_intrarg[j].ia_eselect = GTIDMAC_ESR(c);
482 marvell_intr_establish(mva->mva_irq + j, IPL_BIO,
483 gtidmac_intr, &sc->sc_intrarg[j]);
484 }
485
486 if (mva->mva_model != MARVELL_DISCOVERY)
487 gtidmac_wininit(sc);
488
489 /* Register us with dmover. */
490 sc->sc_dmb.dmb_name = device_xname(self);
491 if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
492 aprint_error_dev(self, "no dmb_speed property\n");
493 dmb_speed = 10; /* More than fast swdmover perhaps. */
494 }
495 sc->sc_dmb.dmb_speed = dmb_speed;
496 sc->sc_dmb.dmb_cookie = sc;
497 sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
498 sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
499 sc->sc_dmb.dmb_process = gtidmac_process;
500 dmover_backend_register(&sc->sc_dmb);
501 sc->sc_dmb_busy = 0;
502
503 if (xore_nchan) {
504 /* XORE DMA descriptor buffer */
505 sc->sc_mvxore_nchan = xore_nchan;
506 if (bus_dmamem_alloc(sc->sc_dmat,
507 sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
508 PAGE_SIZE, 0, &segs_xore, 1, &nsegs_xore, BUS_DMA_NOWAIT)) {
509 aprint_error_dev(self, "bus_dmamem_alloc failed:"
510 " xore descriptor buffer\n");
511 goto fail8;
512 }
513 if (bus_dmamem_map(sc->sc_dmat, &segs_xore, 1,
514 sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
515 (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
516 aprint_error_dev(self,
517 "bus_dmamem_map failed: xore descriptor buffer\n");
518 goto fail9;
519 }
520 if (bus_dmamap_create(sc->sc_dmat,
521 sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan, 1,
522 sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan, 0,
523 BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
524 aprint_error_dev(self, "bus_dmamap_create failed:"
525 " xore descriptor buffer\n");
526 goto fail10;
527 }
528 if (bus_dmamap_load(
529 sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
530 sizeof(struct mvxore_desc) * MVXORE_NDESC * xore_nchan,
531 NULL, BUS_DMA_NOWAIT)) {
532 aprint_error_dev(self,
533 "bus_dmamap_load failed: xore descriptor buffer\n");
534 goto fail11;
535 }
536 SLIST_INIT(&sc->sc_dlist_xore);
537 for (j = 0; j < MVXORE_NDESC * xore_nchan; j++) {
538 dd = &sc->sc_dd_buffer[j + GTIDMAC_NDESC * idmac_nchan];
539 dd->dd_index = j;
540 dd->dd_xore_vaddr = &sc->sc_dbuf_xore[j];
541 dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
542 (sizeof(struct mvxore_desc) * j);
543 SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
544 }
545
546 /* Initialize XORE DMA channels */
547 mask = 0;
548 for (j = 0; j < xore_nchan; j++) {
549 for (k = 0; k < MVXORE_NSRC; k++) {
550 if (bus_dmamap_create(sc->sc_dmat,
551 MVXORE_MAXXFER, MVXORE_NSEGS,
552 MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
553 &sc->sc_cdesc_xore[j].chan_in[k])) {
554 aprint_error_dev(self,
555 "bus_dmamap_create failed:"
556 " xore chan%d in[%d]\n", j, k);
557 goto fail12;
558 }
559 }
560 if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
561 MVXORE_NSEGS, MVXORE_MAXXFER, 0,
562 BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[j].chan_out)) {
563 aprint_error_dev(self,
564 "bus_dmamap_create failed: chan%d out\n",
565 j);
566 goto fail13;
567 }
568 sc->sc_cdesc_xore[j].chan_totalcnt = 0;
569 sc->sc_cdesc_xore[j].chan_running = NULL;
570
571 mask |= MVXORE_I(j,
572 MVXORE_I_EOC |
573 MVXORE_I_ADDRDECODE |
574 MVXORE_I_ACCPROT |
575 MVXORE_I_WRPROT |
576 MVXORE_I_OWN |
577 MVXORE_I_INTPARITY |
578 MVXORE_I_XBAR);
579 }
580 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIMR, mask);
581
582 marvell_intr_establish(xore_irq + 0, IPL_BIO, mvxore_intr, sc);
583 marvell_intr_establish(xore_irq + 1, IPL_BIO, mvxore_intr, sc);
584
585 mvxore_wininit(sc);
586
587 /* Register us with dmover. */
588 sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
589 sc->sc_dmb_xore.dmb_speed = dmb_speed;
590 sc->sc_dmb_xore.dmb_cookie = sc;
591 sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
592 sc->sc_dmb_xore.dmb_nalgdescs =
593 __arraycount(mvxore_algdescs);
594 sc->sc_dmb_xore.dmb_process = gtidmac_process;
595 dmover_backend_register(&sc->sc_dmb_xore);
596 }
597
598 gtidmac_softc = sc;
599
600 return;
601
602 for (; j-- > 0;) {
603 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[j].chan_out);
604
605 fail13:
606 k = MVXORE_NSRC;
607 fail12:
608 for (; k-- > 0;)
609 bus_dmamap_destroy(sc->sc_dmat,
610 sc->sc_cdesc_xore[j].chan_in[k]);
611 }
612 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
613 fail11:
614 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
615 fail10:
616 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
617 sizeof(struct mvxore_desc) * MVXORE_NDESC);
618 fail9:
619 bus_dmamem_free(sc->sc_dmat, &segs_xore, 1);
620 fail8:
621 for (; i-- > 0;) {
622 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
623 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
624 }
625 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
626 fail7:
627 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
628 fail6:
629 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
630 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
631 fail5:
632 bus_dmamem_free(sc->sc_dmat, &segs, 1);
633 fail4:
634 bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
635 fail3:
636 bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
637 fail2:
638 kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
639 fail1:
640 bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
641 return;
642 }
643
644
645 static int
646 gtidmac_intr(void *arg)
647 {
648 struct gtidmac_intr_arg *ia = arg;
649 struct gtidmac_softc *sc = ia->ia_sc;
650 uint32_t cause;
651 int handled = 0, chan, error;
652
653 cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
654 DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
655 bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
656
657 chan = 0;
658 while (cause) {
659 error = 0;
660 if (cause & GTIDMAC_I_ADDRMISS) {
661 aprint_error_dev(sc->sc_dev, "Address Miss");
662 error = EINVAL;
663 }
664 if (cause & GTIDMAC_I_ACCPROT) {
665 aprint_error_dev(sc->sc_dev,
666 "Access Protect Violation");
667 error = EACCES;
668 }
669 if (cause & GTIDMAC_I_WRPROT) {
670 aprint_error_dev(sc->sc_dev, "Write Protect");
671 error = EACCES;
672 }
673 if (cause & GTIDMAC_I_OWN) {
674 aprint_error_dev(sc->sc_dev, "Ownership Violation");
675 error = EINVAL;
676 }
677
678 #define GTIDMAC_I_ERROR \
679 (GTIDMAC_I_ADDRMISS | \
680 GTIDMAC_I_ACCPROT | \
681 GTIDMAC_I_WRPROT | \
682 GTIDMAC_I_OWN)
683 if (cause & GTIDMAC_I_ERROR) {
684 uint32_t sel;
685 int select;
686
687 sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
688 ia->ia_eselect) & GTIDMAC_ESR_SEL;
689 select = sel - chan * GTIDMAC_I_BITS;
690 if (select >= 0 && select < GTIDMAC_I_BITS) {
691 uint32_t ear;
692
693 ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
694 ia->ia_eaddr);
695 aprint_error(": Error Address 0x%x\n", ear);
696 } else
697 aprint_error(": lost Error Address\n");
698 }
699
700 if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
701 sc->sc_cdesc[chan].chan_dma_done(
702 sc->sc_cdesc[chan].chan_running, chan,
703 &sc->sc_cdesc[chan].chan_in,
704 &sc->sc_cdesc[chan].chan_out, error);
705 handled++;
706 }
707
708 cause >>= GTIDMAC_I_BITS;
709 }
710 DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
711
712 return handled;
713 }
714
715 static int
716 mvxore_intr(void *arg)
717 {
718 struct gtidmac_softc *sc = arg;
719 uint32_t cause;
720 int handled = 0, chan, error;
721
722 cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR);
723 DPRINTF(("XORE intr: cause=0x%x\n", cause));
724 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR, ~cause);
725
726 chan = 0;
727 while (cause) {
728 error = 0;
729 if (cause & MVXORE_I_ADDRDECODE) {
730 aprint_error_dev(sc->sc_dev, "Failed address decoding");
731 error = EINVAL;
732 }
733 if (cause & MVXORE_I_ACCPROT) {
734 aprint_error_dev(sc->sc_dev,
735 "Access Protect Violation");
736 error = EACCES;
737 }
738 if (cause & MVXORE_I_WRPROT) {
739 aprint_error_dev(sc->sc_dev, "Write Protect");
740 error = EACCES;
741 }
742 if (cause & MVXORE_I_OWN) {
743 aprint_error_dev(sc->sc_dev, "Ownership Violation");
744 error = EINVAL;
745 }
746 if (cause & MVXORE_I_INTPARITY) {
747 aprint_error_dev(sc->sc_dev, "Parity Error");
748 error = EIO;
749 }
750 if (cause & MVXORE_I_XBAR) {
751 aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
752 error = EINVAL;
753 }
754
755 #define MVXORE_I_ERROR \
756 (MVXORE_I_ADDRDECODE | \
757 MVXORE_I_ACCPROT | \
758 MVXORE_I_WRPROT | \
759 MVXORE_I_OWN | \
760 MVXORE_I_INTPARITY | \
761 MVXORE_I_XBAR)
762 if (cause & MVXORE_I_ERROR) {
763 uint32_t type;
764 int event;
765
766 type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
767 MVXORE_XEECR) & MVXORE_XEECR_ERRORTYPE_MASK;
768 event = type - chan * MVXORE_I_BITS;
769 if (event >= 0 && event < MVXORE_I_BITS) {
770 uint32_t xeear;
771
772 xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
773 MVXORE_XEEAR);
774 aprint_error(": Error Address 0x%x\n", xeear);
775 } else
776 aprint_error(": lost Error Address\n");
777 }
778
779 if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
780 sc->sc_cdesc_xore[chan].chan_dma_done(
781 sc->sc_cdesc_xore[chan].chan_running, chan,
782 sc->sc_cdesc_xore[chan].chan_in,
783 &sc->sc_cdesc_xore[chan].chan_out, error);
784 handled++;
785 }
786
787 cause >>= MVXORE_I_BITS;
788 }
789 DPRINTF(("XORE intr: %shandled\n", handled ? "" : "not "));
790
791 return handled;
792 }
793
794
795 /*
796 * dmover(9) backend function.
797 */
798 static void
799 gtidmac_process(struct dmover_backend *dmb)
800 {
801 struct gtidmac_softc *sc = dmb->dmb_cookie;
802 int s;
803
804 /* If the backend is currently idle, go process the queue. */
805 s = splbio();
806 if (!sc->sc_dmb_busy)
807 gtidmac_dmover_run(dmb);
808 splx(s);
809 }
810
811 static void
812 gtidmac_dmover_run(struct dmover_backend *dmb)
813 {
814 struct gtidmac_softc *sc = dmb->dmb_cookie;
815 struct dmover_request *dreq;
816 const struct dmover_algdesc *algdesc;
817 struct gtidmac_function *df;
818 bus_dmamap_t *dmamap_in, *dmamap_out;
819 int chan, ninputs, error, i;
820
821 sc->sc_dmb_busy = 1;
822
823 for (;;) {
824 dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
825 if (dreq == NULL)
826 break;
827 algdesc = dreq->dreq_assignment->das_algdesc;
828 df = algdesc->dad_data;
829 chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
830 if (chan == -1)
831 return;
832
833 dmover_backend_remque(dmb, dreq);
834 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
835
836 /* XXXUNLOCK */
837
838 error = 0;
839
840 /* Load in/out buffers of dmover to bus_dmamap. */
841 ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
842 if (ninputs == 0) {
843 int pno = 0;
844
845 if (algdesc->dad_name == DMOVER_FUNC_FILL8)
846 pno = dreq->dreq_immediate[0];
847
848 i = 0;
849 error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
850 &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
851 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
852 if (error == 0) {
853 bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
854 sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
855
856 /*
857 * We will call gtidmac_dmmap_unload() when
858 * becoming an error.
859 */
860 i = 1;
861 }
862 } else
863 for (i = 0; i < ninputs; i++) {
864 error = gtidmac_dmmap_load(sc,
865 *(dmamap_in + i), dreq->dreq_inbuf_type,
866 &dreq->dreq_inbuf[i], 0/*write*/);
867 if (error != 0)
868 break;
869 }
870 if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
871 if (error == 0)
872 error = gtidmac_dmmap_load(sc, *dmamap_out,
873 dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
874 1/*read*/);
875
876 if (error == 0) {
877 /*
878 * The size of outbuf is always believed to be
879 * DMA transfer size in dmover request.
880 */
881 error = (*df->dma_setup)(sc, chan, ninputs,
882 dmamap_in, dmamap_out,
883 (*dmamap_out)->dm_mapsize);
884 if (error != 0)
885 gtidmac_dmmap_unload(sc, *dmamap_out,
886 1);
887 }
888 } else
889 if (error == 0)
890 error = (*df->dma_setup)(sc, chan, ninputs,
891 dmamap_in, dmamap_out,
892 (*dmamap_in)->dm_mapsize);
893
894 /* XXXLOCK */
895
896 if (error != 0) {
897 for (; i-- > 0;)
898 gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
899 (*df->chan_free)(sc, chan);
900
901 dreq->dreq_flags |= DMOVER_REQ_ERROR;
902 dreq->dreq_error = error;
903 /* XXXUNLOCK */
904 dmover_done(dreq);
905 /* XXXLOCK */
906 continue;
907 }
908
909 (*df->dma_start)(sc, chan, gtidmac_dmover_done);
910 break;
911 }
912
913 /* All done */
914 sc->sc_dmb_busy = 0;
915 }
916
917 static void
918 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
919 bus_dmamap_t *dmamap_out, int error)
920 {
921 struct gtidmac_softc *sc;
922 struct dmover_request *dreq = object;
923 struct dmover_backend *dmb;
924 struct gtidmac_function *df;
925 uint32_t result;
926 int ninputs, i;
927
928 KASSERT(dreq != NULL);
929
930 dmb = dreq->dreq_assignment->das_backend;
931 df = dreq->dreq_assignment->das_algdesc->dad_data;
932 ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
933 sc = dmb->dmb_cookie;
934
935 result = (*df->dma_finish)(sc, chan, error);
936 for (i = 0; i < ninputs; i++)
937 gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
938 if (dreq->dreq_assignment->das_algdesc->dad_name ==
939 DMOVER_FUNC_ISCSI_CRC32C)
940 memcpy(dreq->dreq_immediate, &result, sizeof(result));
941 else
942 gtidmac_dmmap_unload(sc, *dmamap_out, 1);
943
944 (*df->chan_free)(sc, chan);
945
946 if (error) {
947 dreq->dreq_error = error;
948 dreq->dreq_flags |= DMOVER_REQ_ERROR;
949 }
950
951 dmover_done(dreq);
952
953 /*
954 * See if we can start some more dmover(9) requests.
955 *
956 * Note: We're already at splbio() here.
957 */
958 if (!sc->sc_dmb_busy)
959 gtidmac_dmover_run(dmb);
960 }
961
962 __inline int
963 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
964 dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
965 int read)
966 {
967 int error, flags;
968
969 flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
970 read ? BUS_DMA_READ : BUS_DMA_WRITE;
971
972 switch (dmbuf_type) {
973 case DMOVER_BUF_LINEAR:
974 error = bus_dmamap_load(sc->sc_dmat, dmamap,
975 dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
976 NULL, flags);
977 break;
978
979 case DMOVER_BUF_UIO:
980 if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
981 (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
982 return (EINVAL);
983
984 error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
985 dmbuf->dmbuf_uio, flags);
986 break;
987
988 default:
989 error = EINVAL;
990 }
991
992 if (error == 0)
993 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
994 read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
995
996 return error;
997 }
998
999 __inline void
1000 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
1001 {
1002
1003 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1004 read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1005
1006 bus_dmamap_unload(sc->sc_dmat, dmamap);
1007 }
1008
1009
1010 void *
1011 gtidmac_tag_get()
1012 {
1013
1014 return gtidmac_softc;
1015 }
1016
1017 /*
1018 * IDMAC functions
1019 */
1020 int
1021 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1022 bus_dmamap_t **dmamap_out, void *object)
1023 {
1024 struct gtidmac_softc *sc = tag;
1025 int chan;
1026
1027 /* maybe need lock */
1028
1029 for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
1030 if (sc->sc_cdesc[chan].chan_running == NULL)
1031 break;
1032 if (chan >= sc->sc_gtidmac_nchan)
1033 return -1;
1034
1035
1036 sc->sc_cdesc[chan].chan_running = object;
1037
1038 /* unlock */
1039
1040 *dmamap_in = &sc->sc_cdesc[chan].chan_in;
1041 *dmamap_out = &sc->sc_cdesc[chan].chan_out;
1042
1043 return chan;
1044 }
1045
1046 void
1047 gtidmac_chan_free(void *tag, int chan)
1048 {
1049 struct gtidmac_softc *sc = tag;
1050
1051 /* maybe need lock */
1052
1053 sc->sc_cdesc[chan].chan_running = NULL;
1054
1055 /* unlock */
1056 }
1057
1058 /* ARGSUSED */
1059 int
1060 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1061 bus_dmamap_t *dmamap_out, bus_size_t size)
1062 {
1063 struct gtidmac_softc *sc = tag;
1064 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1065 struct gtidmac_desc *desc;
1066 uint32_t ccl, bcnt, ires, ores;
1067 int n = 0, iidx, oidx;
1068
1069 KASSERT(ninputs == 0 || ninputs == 1);
1070
1071 ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1072 #ifdef DIAGNOSTIC
1073 if (ccl & GTIDMAC_CCLR_CHANACT)
1074 panic("gtidmac_setup: chan%d already active", chan);
1075 #endif
1076
1077 /* We always Chain-mode and max (16M - 1)byte/desc */
1078 ccl = (GTIDMAC_CCLR_DESCMODE_16M |
1079 #ifdef GTIDMAC_DEBUG
1080 GTIDMAC_CCLR_CDEN |
1081 #endif
1082 GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */ |
1083 GTIDMAC_CCLR_INTMODE_NULL /* Intr Mode: Next Desc NULL */ |
1084 GTIDMAC_CCLR_CHAINMODE_C /* Chain Mode: Chaind */);
1085 if (size != (*dmamap_in)->dm_mapsize) {
1086 ccl |= GTIDMAC_CCLR_SRCHOLD;
1087 if ((*dmamap_in)->dm_mapsize == 8)
1088 ccl |= GTIDMAC_CCLR_SBL_8B;
1089 else if ((*dmamap_in)->dm_mapsize == 16)
1090 ccl |= GTIDMAC_CCLR_SBL_16B;
1091 else if ((*dmamap_in)->dm_mapsize == 32)
1092 ccl |= GTIDMAC_CCLR_SBL_32B;
1093 else if ((*dmamap_in)->dm_mapsize == 64)
1094 ccl |= GTIDMAC_CCLR_SBL_64B;
1095 else if ((*dmamap_in)->dm_mapsize == 128)
1096 ccl |= GTIDMAC_CCLR_SBL_128B;
1097 else
1098 panic("gtidmac_setup: chan%d source:"
1099 " unsupport hold size", chan);
1100 } else
1101 ccl |= GTIDMAC_CCLR_SBL_128B;
1102 if (size != (*dmamap_out)->dm_mapsize) {
1103 ccl |= GTIDMAC_CCLR_DESTHOLD;
1104 if ((*dmamap_out)->dm_mapsize == 8)
1105 ccl |= GTIDMAC_CCLR_DBL_8B;
1106 else if ((*dmamap_out)->dm_mapsize == 16)
1107 ccl |= GTIDMAC_CCLR_DBL_16B;
1108 else if ((*dmamap_out)->dm_mapsize == 32)
1109 ccl |= GTIDMAC_CCLR_DBL_32B;
1110 else if ((*dmamap_out)->dm_mapsize == 64)
1111 ccl |= GTIDMAC_CCLR_DBL_64B;
1112 else if ((*dmamap_out)->dm_mapsize == 128)
1113 ccl |= GTIDMAC_CCLR_DBL_128B;
1114 else
1115 panic("gtidmac_setup: chan%d destination:"
1116 " unsupport hold size", chan);
1117 } else
1118 ccl |= GTIDMAC_CCLR_DBL_128B;
1119
1120 fstdd = SLIST_FIRST(&sc->sc_dlist);
1121 if (fstdd == NULL) {
1122 aprint_error_dev(sc->sc_dev, "no descriptor\n");
1123 return ENOMEM;
1124 }
1125 SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1126 sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
1127
1128 dd = fstdd;
1129 ires = ores = 0;
1130 iidx = oidx = 0;
1131 while (1 /*CONSTCOND*/) {
1132 if (ccl & GTIDMAC_CCLR_SRCHOLD) {
1133 if (ccl & GTIDMAC_CCLR_DESTHOLD)
1134 bcnt = size; /* src/dst hold */
1135 else
1136 bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
1137 } else if (ccl & GTIDMAC_CCLR_DESTHOLD)
1138 bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
1139 else
1140 bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
1141 (*dmamap_out)->dm_segs[oidx].ds_len - ores);
1142
1143 desc = dd->dd_idmac_vaddr;
1144 desc->bc.mode16m.bcnt =
1145 bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
1146 desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
1147 desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1148
1149 n += bcnt;
1150 if (n >= size)
1151 break;
1152 if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
1153 ires += bcnt;
1154 if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
1155 ires = 0;
1156 iidx++;
1157 KASSERT(iidx < (*dmamap_in)->dm_nsegs);
1158 }
1159 }
1160 if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
1161 ores += bcnt;
1162 if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1163 ores = 0;
1164 oidx++;
1165 KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1166 }
1167 }
1168
1169 nxtdd = SLIST_FIRST(&sc->sc_dlist);
1170 if (nxtdd == NULL) {
1171 aprint_error_dev(sc->sc_dev, "no descriptor\n");
1172 return ENOMEM;
1173 }
1174 SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1175
1176 desc->nextdp = (uint32_t)nxtdd->dd_paddr;
1177 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1178 dd->dd_index * sizeof(*desc), sizeof(*desc),
1179 #ifdef GTIDMAC_DEBUG
1180 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1181 #else
1182 BUS_DMASYNC_PREWRITE);
1183 #endif
1184
1185 SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1186 dd = nxtdd;
1187 }
1188 desc->nextdp = (uint32_t)NULL;
1189 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
1190 #ifdef GTIDMAC_DEBUG
1191 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1192 #else
1193 sizeof(*desc), BUS_DMASYNC_PREWRITE);
1194 #endif
1195
1196 /* Set paddr of descriptor to Channel Next Descriptor Pointer */
1197 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
1198 fstdd->dd_paddr);
1199
1200 #if BYTE_ORDER == LITTLE_ENDIAN
1201 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1202 GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
1203 #else
1204 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1205 GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
1206 #endif
1207 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
1208
1209 #ifdef GTIDMAC_DEBUG
1210 gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
1211 #endif
1212
1213 sc->sc_cdesc[chan].chan_totalcnt += size;
1214
1215 return 0;
1216 }
1217
1218 void
1219 gtidmac_start(void *tag, int chan,
1220 void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1221 int))
1222 {
1223 struct gtidmac_softc *sc = tag;
1224 uint32_t ccl;
1225
1226 DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
1227
1228 #ifdef GTIDMAC_DEBUG
1229 gtidmac_dump_idmacreg(sc, chan);
1230 #endif
1231
1232 sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
1233
1234 ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1235 /* Start and 'Fetch Next Descriptor' */
1236 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
1237 ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
1238 }
1239
1240 static uint32_t
1241 gtidmac_finish(void *tag, int chan, int error)
1242 {
1243 struct gtidmac_softc *sc = tag;
1244 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1245 struct gtidmac_desc *desc;
1246
1247 fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
1248
1249 #ifdef GTIDMAC_DEBUG
1250 if (error || gtidmac_debug > 1) {
1251 uint32_t ccl;
1252
1253 gtidmac_dump_idmacreg(sc, chan);
1254 ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1255 GTIDMAC_CCLR(chan));
1256 gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
1257 }
1258 #endif
1259
1260 dd = fstdd;
1261 do {
1262 desc = dd->dd_idmac_vaddr;
1263
1264 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1265 dd->dd_index * sizeof(*desc), sizeof(*desc),
1266 #ifdef GTIDMAC_DEBUG
1267 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1268 #else
1269 BUS_DMASYNC_POSTWRITE);
1270 #endif
1271
1272 nxtdd = SLIST_NEXT(dd, dd_next);
1273 SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1274 dd = nxtdd;
1275 } while (desc->nextdp);
1276
1277 return 0;
1278 }
1279
1280 /*
1281 * XORE functions
1282 */
1283 int
1284 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1285 bus_dmamap_t **dmamap_out, void *object)
1286 {
1287 struct gtidmac_softc *sc = tag;
1288 int chan;
1289
1290 /* maybe need lock */
1291
1292 for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
1293 if (sc->sc_cdesc_xore[chan].chan_running == NULL)
1294 break;
1295 if (chan >= sc->sc_mvxore_nchan)
1296 return -1;
1297
1298
1299 sc->sc_cdesc_xore[chan].chan_running = object;
1300
1301 /* unlock */
1302
1303 *dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
1304 *dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
1305
1306 return chan;
1307 }
1308
1309 void
1310 mvxore_chan_free(void *tag, int chan)
1311 {
1312 struct gtidmac_softc *sc = tag;
1313
1314 /* maybe need lock */
1315
1316 sc->sc_cdesc_xore[chan].chan_running = NULL;
1317
1318 /* unlock */
1319 }
1320
1321 /* ARGSUSED */
1322 int
1323 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1324 bus_dmamap_t *dmamap_out, bus_size_t size)
1325 {
1326 struct gtidmac_softc *sc = tag;
1327 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1328 struct mvxore_desc *desc;
1329 uint32_t xexc, bcnt, cmd, lastcmd;
1330 int n = 0, i;
1331 uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
1332 int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
1333
1334 #ifdef DIAGNOSTIC
1335 uint32_t xexact;
1336
1337 xexact = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
1338 if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
1339 MVXORE_XEXACTR_XESTATUS_ACT)
1340 panic("mvxore_setup: chan%d already active."
1341 " mvxore not support hot insertion", chan);
1342 #endif
1343
1344 xexc =
1345 (MVXORE_XEXCR_REGACCPROTECT |
1346 MVXORE_XEXCR_DBL_128B |
1347 MVXORE_XEXCR_SBL_128B);
1348 cmd = lastcmd = 0;
1349 if (ninputs > 1) {
1350 xexc |= MVXORE_XEXCR_OM_XOR;
1351 lastcmd = cmd = (1 << ninputs) - 1;
1352 } else if (ninputs == 1) {
1353 if ((*dmamap_out)->dm_nsegs == 0) {
1354 xexc |= MVXORE_XEXCR_OM_CRC32;
1355 lastcmd = MVXORE_DESC_CMD_CRCLAST;
1356 } else
1357 xexc |= MVXORE_XEXCR_OM_DMA;
1358 } else if (ninputs == 0) {
1359 if ((*dmamap_out)->dm_nsegs != 1) {
1360 aprint_error_dev(sc->sc_dev,
1361 "XORE not supports %d DMA segments\n",
1362 (*dmamap_out)->dm_nsegs);
1363 return EINVAL;
1364 }
1365
1366 if ((*dmamap_in)->dm_mapsize == 0) {
1367 xexc |= MVXORE_XEXCR_OM_ECC;
1368
1369 /* XXXXX: Maybe need to set Timer Mode registers? */
1370
1371 #if 0
1372 } else if ((*dmamap_in)->dm_mapsize == 8 ||
1373 (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
1374 uint64_t pattern;
1375
1376 /* XXXX: Get pattern data */
1377
1378 KASSERT((*dmamap_in)->dm_mapsize == 8 ||
1379 (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
1380 ~PAGE_MASK) == sc->sc_pbuf);
1381 pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
1382
1383 /* XXXXX: XORE has a IVR. We should get this first. */
1384 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
1385 pattern);
1386 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
1387 pattern >> 32);
1388
1389 xexc |= MVXORE_XEXCR_OM_MEMINIT;
1390 #endif
1391 } else {
1392 aprint_error_dev(sc->sc_dev,
1393 "XORE not supports DMA mapsize %zd\n",
1394 (*dmamap_in)->dm_mapsize);
1395 return EINVAL;
1396 }
1397 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXDPR(chan),
1398 (*dmamap_out)->dm_segs[0].ds_addr);
1399 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBSR(chan),
1400 (*dmamap_out)->dm_mapsize);
1401
1402 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan),
1403 xexc);
1404 sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1405
1406 return 0;
1407 }
1408
1409 /* Make descriptor for DMA/CRC32/XOR */
1410
1411 fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
1412 if (fstdd == NULL) {
1413 aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1414 return ENOMEM;
1415 }
1416 SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1417 sc->sc_cdesc_xore[chan].chan_ddidx =
1418 fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
1419
1420 dd = fstdd;
1421 while (1 /*CONSTCOND*/) {
1422 desc = dd->dd_xore_vaddr;
1423 desc->stat = MVXORE_DESC_STAT_OWN;
1424 desc->cmd = cmd;
1425 if ((*dmamap_out)->dm_nsegs != 0) {
1426 desc->dstaddr =
1427 (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1428 bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
1429 } else {
1430 desc->dstaddr = 0;
1431 bcnt = MVXORE_MAXXFER; /* XXXXX */
1432 }
1433 for (i = 0; i < ninputs; i++) {
1434 desc->srcaddr[i] =
1435 (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
1436 bcnt = min(bcnt,
1437 (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
1438 }
1439 desc->bcnt = bcnt;
1440
1441 n += bcnt;
1442 if (n >= size)
1443 break;
1444 ores += bcnt;
1445 if ((*dmamap_out)->dm_nsegs != 0 &&
1446 ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1447 ores = 0;
1448 oidx++;
1449 KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1450 }
1451 for (i = 0; i < ninputs; i++) {
1452 ires[i] += bcnt;
1453 if (ires[i] >=
1454 (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
1455 ires[i] = 0;
1456 iidx[i]++;
1457 KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
1458 }
1459 }
1460
1461 nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
1462 if (nxtdd == NULL) {
1463 aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1464 return ENOMEM;
1465 }
1466 SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1467
1468 desc->nextda = (uint32_t)nxtdd->dd_paddr;
1469 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1470 dd->dd_index * sizeof(*desc), sizeof(*desc),
1471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1472
1473 SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1474 dd = nxtdd;
1475 }
1476 desc->cmd = lastcmd;
1477 desc->nextda = (uint32_t)NULL;
1478 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1479 dd->dd_index * sizeof(*desc), sizeof(*desc),
1480 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1481
1482 /* Set paddr of descriptor to Channel Next Descriptor Pointer */
1483 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(chan),
1484 fstdd->dd_paddr);
1485
1486 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan), xexc);
1487
1488 #ifdef GTIDMAC_DEBUG
1489 gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
1490 #endif
1491
1492 sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1493
1494 return 0;
1495 }
1496
1497 void
1498 mvxore_start(void *tag, int chan,
1499 void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1500 int))
1501 {
1502 struct gtidmac_softc *sc = tag;
1503 uint32_t xexact;
1504
1505 DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
1506
1507 #ifdef GTIDMAC_DEBUG
1508 gtidmac_dump_xorereg(sc, chan);
1509 #endif
1510
1511 sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
1512
1513 xexact = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
1514 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan),
1515 xexact | MVXORE_XEXACTR_XESTART);
1516 }
1517
1518 static uint32_t
1519 mvxore_finish(void *tag, int chan, int error)
1520 {
1521 struct gtidmac_softc *sc = tag;
1522 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1523 struct mvxore_desc *desc;
1524 uint32_t xexc;
1525
1526 #ifdef GTIDMAC_DEBUG
1527 if (error || gtidmac_debug > 1)
1528 gtidmac_dump_xorereg(sc, chan);
1529 #endif
1530
1531 xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan));
1532 if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
1533 (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
1534 return 0;
1535
1536 fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
1537
1538 #ifdef GTIDMAC_DEBUG
1539 if (error || gtidmac_debug > 1)
1540 gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
1541 #endif
1542
1543 dd = fstdd;
1544 do {
1545 desc = dd->dd_xore_vaddr;
1546
1547 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1548 dd->dd_index * sizeof(*desc), sizeof(*desc),
1549 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1550
1551 nxtdd = SLIST_NEXT(dd, dd_next);
1552 SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1553 dd = nxtdd;
1554 } while (desc->nextda);
1555
1556 if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
1557 return desc->result;
1558 return 0;
1559 }
1560
1561 static void
1562 gtidmac_wininit(struct gtidmac_softc *sc)
1563 {
1564 device_t pdev = device_parent(sc->sc_dev);
1565 uint64_t base;
1566 uint32_t size, cxap, en;
1567 int window, target, attr, rv, i;
1568 struct {
1569 int tag;
1570 int winacc;
1571 } targets[] = {
1572 { MARVELL_TAG_SDRAM_CS0, GTIDMAC_CXAPR_WINACC_FA },
1573 { MARVELL_TAG_SDRAM_CS1, GTIDMAC_CXAPR_WINACC_FA },
1574 { MARVELL_TAG_SDRAM_CS2, GTIDMAC_CXAPR_WINACC_FA },
1575 { MARVELL_TAG_SDRAM_CS3, GTIDMAC_CXAPR_WINACC_FA },
1576
1577 /* Also can set following targets. */
1578 /* Devices = 0x1(ORION_TARGETID_DEVICE_*) */
1579 /* PCI = 0x3(ORION_TARGETID_PCI0_*) */
1580 /* PCI Express = 0x4(ORION_TARGETID_PEX?_*) */
1581 /* Tunit SRAM(?) = 0x5(???) */
1582
1583 { MARVELL_TAG_UNDEFINED, GTIDMAC_CXAPR_WINACC_NOAA }
1584 };
1585
1586 en = 0xff;
1587 cxap = 0;
1588 for (window = 0, i = 0;
1589 targets[i].tag != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW;
1590 i++) {
1591 rv = marvell_winparams_by_tag(pdev, targets[i].tag,
1592 &target, &attr, &base, &size);
1593 if (rv != 0 || size == 0)
1594 continue;
1595
1596 if (base > 0xffffffffULL) {
1597 if (window >= GTIDMAC_NREMAP) {
1598 aprint_error_dev(sc->sc_dev,
1599 "can't remap window %d\n", window);
1600 continue;
1601 }
1602 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1603 GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
1604 }
1605 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
1606 GTIDMAC_BARX_TARGET(target) |
1607 GTIDMAC_BARX_ATTR(attr) |
1608 GTIDMAC_BARX_BASE(base));
1609 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
1610 GTIDMAC_SRX_SIZE(size));
1611 en &= ~GTIDMAC_BAER_EN(window);
1612 cxap |= GTIDMAC_CXAPR_WINACC(window, targets[i].winacc);
1613 window++;
1614 }
1615 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
1616
1617 for (i = 0; i < GTIDMAC_NACCPROT; i++)
1618 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
1619 cxap);
1620 }
1621
1622 static void
1623 mvxore_wininit(struct gtidmac_softc *sc)
1624 {
1625 device_t pdev = device_parent(sc->sc_dev);
1626 uint64_t base;
1627 uint32_t target, attr, size, xexwc;
1628 int window, rv, i;
1629 struct {
1630 int tag;
1631 int winacc;
1632 } targets[] = {
1633 { MARVELL_TAG_SDRAM_CS0, MVXORE_XEXWCR_WINACC_FA },
1634 { MARVELL_TAG_SDRAM_CS1, MVXORE_XEXWCR_WINACC_FA },
1635 { MARVELL_TAG_SDRAM_CS2, MVXORE_XEXWCR_WINACC_FA },
1636 { MARVELL_TAG_SDRAM_CS3, MVXORE_XEXWCR_WINACC_FA },
1637
1638 { MARVELL_TAG_UNDEFINED, MVXORE_XEXWCR_WINACC_NOAA }
1639 };
1640
1641 xexwc = 0;
1642 for (window = 0, i = 0;
1643 targets[i].tag != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW;
1644 i++) {
1645 rv = marvell_winparams_by_tag(pdev, targets[i].tag,
1646 &target, &attr, &base, &size);
1647 if (rv != 0 || size == 0)
1648 continue;
1649
1650 if (base > 0xffffffffULL) {
1651 if (window >= MVXORE_NREMAP) {
1652 aprint_error_dev(sc->sc_dev,
1653 "can't remap window %d\n", window);
1654 continue;
1655 }
1656 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1657 MVXORE_XEHARRX(window), (base >> 32) & 0xffffffff);
1658 }
1659
1660 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEBARX(window),
1661 MVXORE_XEBARX_TARGET(target) |
1662 MVXORE_XEBARX_ATTR(attr) |
1663 MVXORE_XEBARX_BASE(base));
1664 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1665 MVXORE_XESMRX(window), MVXORE_XESMRX_SIZE(size));
1666 xexwc |= (MVXORE_XEXWCR_WINEN(window) |
1667 MVXORE_XEXWCR_WINACC(window, targets[i].winacc));
1668 window++;
1669 }
1670 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(0), xexwc);
1671 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(1), xexwc);
1672
1673 /* XXXXX: reset... */
1674 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(0), 0);
1675 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(1), 0);
1676 }
1677
1678
1679 #ifdef GTIDMAC_DEBUG
1680 static void
1681 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
1682 {
1683 uint32_t val;
1684 char buf[256];
1685
1686 printf("IDMAC Registers\n");
1687
1688 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
1689 snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
1690 printf(" Byte Count : %s\n", buf);
1691 printf(" ByteCnt : 0x%06x\n",
1692 val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
1693 printf(" Source Address : 0x%08x\n",
1694 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
1695 printf(" Destination Address : 0x%08x\n",
1696 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
1697 printf(" Next Descriptor Pointer : 0x%08x\n",
1698 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
1699 printf(" Current Descriptor Pointer : 0x%08x\n",
1700 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
1701
1702 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1703 snprintb(buf, sizeof(buf),
1704 "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
1705 "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
1706 val);
1707 printf(" Channel Control (Low) : %s\n", buf);
1708 printf(" SrcBurstLimit : %s Bytes\n",
1709 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
1710 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
1711 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
1712 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
1713 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
1714 "unknwon");
1715 printf(" DstBurstLimit : %s Bytes\n",
1716 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
1717 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
1718 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
1719 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
1720 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
1721 "unknwon");
1722 printf(" ChainMode : %sChained\n",
1723 val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
1724 printf(" TransferMode : %s\n",
1725 val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
1726 printf(" DescMode : %s\n",
1727 val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
1728 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
1729 snprintb(buf, sizeof(buf),
1730 "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
1731 printf(" Channel Control (High) : %s\n", buf);
1732 }
1733
1734 static void
1735 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1736 uint32_t mode, int post)
1737 {
1738 struct gtidmac_desc *desc;
1739 int i;
1740 char buf[256];
1741
1742 printf("IDMAC Descriptor\n");
1743
1744 i = 0;
1745 while (1 /*CONSTCOND*/) {
1746 if (post)
1747 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1748 dd->dd_index * sizeof(*desc), sizeof(*desc),
1749 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1750
1751 desc = dd->dd_idmac_vaddr;
1752
1753 printf("%d (0x%lx)\n", i, dd->dd_paddr);
1754 if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
1755 snprintb(buf, sizeof(buf),
1756 "\177\020b\037Own\0b\036BCLeft\0",
1757 desc->bc.mode16m.bcnt);
1758 printf(" Byte Count : %s\n", buf);
1759 printf(" ByteCount : 0x%06x\n",
1760 desc->bc.mode16m.bcnt &
1761 GTIDMAC_CIDMABCR_BYTECNT_MASK);
1762 } else {
1763 printf(" Byte Count : 0x%04x\n",
1764 desc->bc.mode64k.bcnt);
1765 printf(" Remind Byte Count : 0x%04x\n",
1766 desc->bc.mode64k.rbc);
1767 }
1768 printf(" Source Address : 0x%08x\n", desc->srcaddr);
1769 printf(" Destination Address : 0x%08x\n", desc->dstaddr);
1770 printf(" Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
1771
1772 if (desc->nextdp == (uint32_t)NULL)
1773 break;
1774
1775 if (!post)
1776 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1777 dd->dd_index * sizeof(*desc), sizeof(*desc),
1778 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1779
1780 i++;
1781 dd = SLIST_NEXT(dd, dd_next);
1782 }
1783 if (!post)
1784 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1785 dd->dd_index * sizeof(*desc), sizeof(*desc),
1786 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1787 }
1788
1789 static void
1790 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
1791 {
1792 uint32_t val, opmode;
1793 char buf[64];
1794
1795 printf("XORE Registers\n");
1796
1797 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(chan));
1798 snprintb(buf, sizeof(buf),
1799 "\177\020"
1800 "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
1801 val);
1802 printf(" Configuration : 0x%s\n", buf);
1803 opmode = val & MVXORE_XEXCR_OM_MASK;
1804 printf(" OperationMode : %s operation\n",
1805 opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
1806 opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
1807 opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
1808 opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
1809 opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
1810 "unknown");
1811 printf(" SrcBurstLimit : %s Bytes\n",
1812 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1813 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1814 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1815 "unknwon");
1816 printf(" DstBurstLimit : %s Bytes\n",
1817 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1818 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1819 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1820 "unknwon");
1821 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(chan));
1822 printf(" Activation : 0x%08x\n", val);
1823 val &= MVXORE_XEXACTR_XESTATUS_MASK;
1824 printf(" XEstatus : %s\n",
1825 val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
1826 val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
1827 val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
1828
1829 if (opmode == MVXORE_XEXCR_OM_XOR ||
1830 opmode == MVXORE_XEXCR_OM_CRC32 ||
1831 opmode == MVXORE_XEXCR_OM_DMA) {
1832 printf(" NextDescPtr : 0x%08x\n",
1833 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1834 MVXORE_XEXNDPR(chan)));
1835 printf(" CurrentDescPtr : 0x%08x\n",
1836 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1837 MVXORE_XEXCDPR(chan)));
1838 }
1839 printf(" ByteCnt : 0x%08x\n",
1840 bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
1841
1842 if (opmode == MVXORE_XEXCR_OM_ECC ||
1843 opmode == MVXORE_XEXCR_OM_MEMINIT) {
1844 printf(" DstPtr : 0x%08x\n",
1845 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1846 MVXORE_XEXDPR(chan)));
1847 printf(" BlockSize : 0x%08x\n",
1848 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1849 MVXORE_XEXBSR(chan)));
1850
1851 if (opmode == MVXORE_XEXCR_OM_ECC) {
1852 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1853 MVXORE_XETMCR);
1854 if (val & MVXORE_XETMCR_TIMEREN) {
1855 val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
1856 val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
1857 printf(" SectionSizeCtrl : 0x%08x\n", 2 ^ val);
1858 printf(" TimerInitVal : 0x%08x\n",
1859 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1860 MVXORE_XETMIVR));
1861 printf(" TimerCrntVal : 0x%08x\n",
1862 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1863 MVXORE_XETMCVR));
1864 }
1865 } else /* MVXORE_XEXCR_OM_MEMINIT */
1866 printf(" InitVal : 0x%08x%08x\n",
1867 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1868 MVXORE_XEIVRH),
1869 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1870 MVXORE_XEIVRL));
1871 }
1872 }
1873
1874 static void
1875 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1876 uint32_t mode, int post)
1877 {
1878 struct mvxore_desc *desc;
1879 int i, j;
1880 char buf[256];
1881
1882 printf("XORE Descriptor\n");
1883
1884 mode &= MVXORE_XEXCR_OM_MASK;
1885
1886 i = 0;
1887 while (1 /*CONSTCOND*/) {
1888 if (post)
1889 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1890 dd->dd_index * sizeof(*desc), sizeof(*desc),
1891 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1892
1893 desc = dd->dd_xore_vaddr;
1894
1895 printf("%d (0x%lx)\n", i, dd->dd_paddr);
1896
1897 snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
1898 desc->stat);
1899 printf(" Status : 0x%s\n", buf);
1900 if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
1901 printf(" CRC-32 Result : 0x%08x\n",
1902 desc->result);
1903 snprintb(buf, sizeof(buf),
1904 "\177\020b\037EODIntEn\0b\036CRCLast\0"
1905 "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
1906 "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
1907 desc->cmd);
1908 printf(" Command : 0x%s\n", buf);
1909 printf(" Next Descriptor Address : 0x%08x\n", desc->nextda);
1910 printf(" Byte Count : 0x%06x\n", desc->bcnt);
1911 printf(" Destination Address : 0x%08x\n", desc->dstaddr);
1912 if (mode == MVXORE_XEXCR_OM_XOR) {
1913 for (j = 0; j < MVXORE_NSRC; j++)
1914 if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
1915 printf(" Source Address#%d :"
1916 " 0x%08x\n", j, desc->srcaddr[j]);
1917 } else
1918 printf(" Source Address : 0x%08x\n",
1919 desc->srcaddr[0]);
1920
1921 if (desc->nextda == (uint32_t)NULL)
1922 break;
1923
1924 if (!post)
1925 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1926 dd->dd_index * sizeof(*desc), sizeof(*desc),
1927 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1928
1929 i++;
1930 dd = SLIST_NEXT(dd, dd_next);
1931 }
1932 if (!post)
1933 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1934 dd->dd_index * sizeof(*desc), sizeof(*desc),
1935 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1936 }
1937 #endif
1938